ngram
listlengths
0
67.8k
[ "float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var)) \\ << map(float, np.percentile(var, [25.,50.,75.]))", "'+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var))", "'+var_name, \\ 'max '+var_name, 'std '+var_name, 'low_q '+var_name, \\ 'med '+var_name, 'up_q '+var_name],", "%d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] = var_names return [req] return request def get_execute_callback(rank,", "MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] = var_names return [req] return request def", "<< mesh.get_time_step() << mesh.get_time() for var_name in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\", "mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() << mesh.get_time()", "<< float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var)) \\ << map(float, np.percentile(var, [25.,50.,75.])) return", "\\ << float(np.max(var)) << float(np.std(var)) \\ << map(float, np.percentile(var, [25.,50.,75.])) return table return", "req['arrays'] = var_names return [req] return request def get_execute_callback(rank, var_names): def execute(port, data_in,", "= teca_metadata(req_in) req['arrays'] = var_names return [req] return request def get_execute_callback(rank, var_names): def", "table << mesh.get_time_step() << mesh.get_time() for var_name in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name,", "['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var)) <<", "md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] = var_names return [req]", "req = teca_metadata(req_in) req['arrays'] = var_names return [req] return request def get_execute_callback(rank, var_names):", "def get_execute_callback(rank, var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0])", "table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() << mesh.get_time() for var_name in", "'std '+var_name, 'low_q '+var_name, \\ 'med '+var_name, 'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array()", "get_request_callback(rank, var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays']", "\\ 'med '+var_name, 'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) <<", "= as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() << mesh.get_time() for", "req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] = var_names return [req] return", "[req] return request def get_execute_callback(rank, var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank))", "for var_name in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max '+var_name, 'std '+var_name,", "sys def get_request_callback(rank, var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req =", "var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max '+var_name, 'std '+var_name, 'low_q '+var_name, \\", "as np import sys def get_request_callback(rank, var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI", "sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table <<", "req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table", "table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() << mesh.get_time() for var_name in var_names: table.declare_columns(['min '+var_name,", "return request def get_execute_callback(rank, var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh", "table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max '+var_name, 'std '+var_name, 'low_q '+var_name, \\ 'med", "* import numpy as np import sys def get_request_callback(rank, var_names): def request(port, md_in,", "as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() << mesh.get_time() for var_name", "\\ 'max '+var_name, 'std '+var_name, 'low_q '+var_name, \\ 'med '+var_name, 'up_q '+var_name], ['d']*7)", "'avg '+var_name, \\ 'max '+var_name, 'std '+var_name, 'low_q '+var_name, \\ 'med '+var_name, 'up_q", "def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New()", "%d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() <<", "var_name in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max '+var_name, 'std '+var_name, 'low_q", "teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() << mesh.get_time() for var_name in var_names: table.declare_columns(['min", "'+var_name, \\ 'med '+var_name, 'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var))", "import sys def get_request_callback(rank, var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req", "def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] = var_names", "return [req] return request def get_execute_callback(rank, var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI", "var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] =", "<< float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var)) \\ << map(float, np.percentile(var,", "import * import numpy as np import sys def get_request_callback(rank, var_names): def request(port,", "mesh.get_time() for var_name in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max '+var_name, 'std", "var_names return [req] return request def get_execute_callback(rank, var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute", "'+var_name, 'avg '+var_name, \\ 'max '+var_name, 'std '+var_name, 'low_q '+var_name, \\ 'med '+var_name,", "'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var)) \\ <<", "= mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var)) \\", "request def get_execute_callback(rank, var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh =", "= var_names return [req] return request def get_execute_callback(rank, var_names): def execute(port, data_in, req):", "var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table =", "mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var)) \\ <<", "teca import * import numpy as np import sys def get_request_callback(rank, var_names): def", "def get_request_callback(rank, var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in)", "<< float(np.max(var)) << float(np.std(var)) \\ << map(float, np.percentile(var, [25.,50.,75.])) return table return execute", "table << float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var)) \\ << map(float,", "'max '+var_name, 'std '+var_name, 'low_q '+var_name, \\ 'med '+var_name, 'up_q '+var_name], ['d']*7) var", "from teca import * import numpy as np import sys def get_request_callback(rank, var_names):", "'+var_name, 'low_q '+var_name, \\ 'med '+var_name, 'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table", "in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max '+var_name, 'std '+var_name, 'low_q '+var_name,", "request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] = var_names return", "numpy as np import sys def get_request_callback(rank, var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request", "execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'],", "MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step()", "import numpy as np import sys def get_request_callback(rank, var_names): def request(port, md_in, req_in):", "var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var))", "np import sys def get_request_callback(rank, var_names): def request(port, md_in, req_in): sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank))", "data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table = teca_table.New() table.declare_columns(['step','time'], ['ul','d'])", "mesh.get_time_step() << mesh.get_time() for var_name in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max", "'+var_name, 'std '+var_name, 'low_q '+var_name, \\ 'med '+var_name, 'up_q '+var_name], ['d']*7) var =", "teca_metadata(req_in) req['arrays'] = var_names return [req] return request def get_execute_callback(rank, var_names): def execute(port,", "get_execute_callback(rank, var_names): def execute(port, data_in, req): sys.stderr.write('descriptive_stats::execute MPI %d\\n'%(rank)) mesh = as_teca_cartesian_mesh(data_in[0]) table", "sys.stderr.write('descriptive_stats::request MPI %d\\n'%(rank)) req = teca_metadata(req_in) req['arrays'] = var_names return [req] return request", "['ul','d']) table << mesh.get_time_step() << mesh.get_time() for var_name in var_names: table.declare_columns(['min '+var_name, 'avg", "<< mesh.get_time() for var_name in var_names: table.declare_columns(['min '+var_name, 'avg '+var_name, \\ 'max '+var_name,", "float(np.average(var)) \\ << float(np.max(var)) << float(np.std(var)) \\ << map(float, np.percentile(var, [25.,50.,75.])) return table", "= teca_table.New() table.declare_columns(['step','time'], ['ul','d']) table << mesh.get_time_step() << mesh.get_time() for var_name in var_names:", "'low_q '+var_name, \\ 'med '+var_name, 'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table <<", "'+var_name, 'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var)) \\", "'med '+var_name, 'up_q '+var_name], ['d']*7) var = mesh.get_point_arrays().get(var_name).as_array() table << float(np.min(var)) << float(np.average(var))" ]
[ "für Theoretische Physik # Author: <NAME> <<EMAIL>> \"\"\" Test fixtures providing input for", "-*- coding: utf-8 -*- # © 2017-2019, ETH Zurich, Institut für Theoretische Physik", "# Author: <NAME> <<EMAIL>> \"\"\" Test fixtures providing input for the strain workchains.", "Zurich, Institut für Theoretische Physik # Author: <NAME> <<EMAIL>> \"\"\" Test fixtures providing", "\"\"\" Test fixtures providing input for the strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring", "<reponame>greschd/aiida_strain<gh_stars>0 # -*- coding: utf-8 -*- # © 2017-2019, ETH Zurich, Institut für", "providing input for the strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__", "params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs',", "] ) def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request):", "# © 2017-2019, ETH Zurich, Institut für Theoretische Physik # Author: <NAME> <<EMAIL>>", "'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters,", "return request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen from aiida import", "import pymatgen from aiida import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure,", "__all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] )", "input for the strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ =", "# pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001',", "sample): import pymatgen from aiida import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict(", "fixtures providing input for the strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest", "strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request): return request.param @pytest.fixture", "'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request): return request.param", "strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs']", "-*- # © 2017-2019, ETH Zurich, Institut für Theoretische Physik # Author: <NAME>", "orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind), strain_parameters=orm.Str(strain_parameters), strain_strengths=orm.List(list=[-0.2, -0.1, 0.,", "['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request):", "strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen from aiida import orm structure = orm.StructureData()", "disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111',", "Theoretische Physik # Author: <NAME> <<EMAIL>> \"\"\" Test fixtures providing input for the", "'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb',", "def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request): return request.param", "import pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110',", "'InSb', 'GaSb', ]) def strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample):", "strain_parameters, sample): import pymatgen from aiida import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return", "the strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind', 'strain_parameters',", "def strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen from aiida import orm structure =", "= orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind), strain_parameters=orm.Str(strain_parameters), strain_strengths=orm.List(list=[-0.2, -0.1, 0., 0.1, 0.2])", "aiida import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind), strain_parameters=orm.Str(strain_parameters), strain_strengths=orm.List(list=[-0.2,", "'GaSb', ]) def strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample): import", "\"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[", "pymatgen from aiida import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind),", "'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ])", "strain_kind, strain_parameters, sample): import pymatgen from aiida import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR')))", "= ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def", "Institut für Theoretische Physik # Author: <NAME> <<EMAIL>> \"\"\" Test fixtures providing input", "strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen from aiida", "import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind), strain_parameters=orm.Str(strain_parameters), strain_strengths=orm.List(list=[-0.2, -0.1,", "2017-2019, ETH Zurich, Institut für Theoretische Physik # Author: <NAME> <<EMAIL>> \"\"\" Test", "structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind), strain_parameters=orm.Str(strain_parameters), strain_strengths=orm.List(list=[-0.2, -0.1, 0., 0.1,", "request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen from aiida import orm", "'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb',", "def strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen from", "<<EMAIL>> \"\"\" Test fixtures providing input for the strain workchains. \"\"\" # pylint:", "orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind), strain_parameters=orm.Str(strain_parameters), strain_strengths=orm.List(list=[-0.2, -0.1, 0., 0.1, 0.2]) )", "@pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request): return request.param @pytest.fixture(params=[", "Author: <NAME> <<EMAIL>> \"\"\" Test fixtures providing input for the strain workchains. \"\"\"", "from aiida import orm structure = orm.StructureData() structure.set_pymatgen(pymatgen.Structure.from_file(sample('POSCAR'))) return dict( structure=structure, strain_kind=orm.Str(strain_kind), strain_parameters=orm.Str(strain_parameters),", "pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110',", "@pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen from aiida import orm structure", "© 2017-2019, ETH Zurich, Institut für Theoretische Physik # Author: <NAME> <<EMAIL>> \"\"\"", "request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure,", "@pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure, strain_kind,", "'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ] ) def strain_kind(request): return", "Physik # Author: <NAME> <<EMAIL>> \"\"\" Test fixtures providing input for the strain", "utf-8 -*- # © 2017-2019, ETH Zurich, Institut für Theoretische Physik # Author:", "for the strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind',", "]) def strain_parameters(request): return request.param @pytest.fixture def strain_inputs(configure, strain_kind, strain_parameters, sample): import pymatgen", "pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture( params=[ 'three_five.Biaxial001', 'three_five.Biaxial110', 'three_five.Biaxial111', 'three_five.Uniaxial110', ]", "return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request): return request.param @pytest.fixture def", "ETH Zurich, Institut für Theoretische Physik # Author: <NAME> <<EMAIL>> \"\"\" Test fixtures", "Test fixtures providing input for the strain workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import", ") def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def strain_parameters(request): return", "<NAME> <<EMAIL>> \"\"\" Test fixtures providing input for the strain workchains. \"\"\" #", "workchains. \"\"\" # pylint: disable=unused-argument,redefined-outer-name,missing-docstring import pytest __all__ = ['strain_kind', 'strain_parameters', 'strain_inputs'] @pytest.fixture(", "'three_five.Uniaxial110', ] ) def strain_kind(request): return request.param @pytest.fixture(params=[ 'InAs', 'InSb', 'GaSb', ]) def", "# -*- coding: utf-8 -*- # © 2017-2019, ETH Zurich, Institut für Theoretische", "coding: utf-8 -*- # © 2017-2019, ETH Zurich, Institut für Theoretische Physik #" ]
[ "query.query return [r['tz_country'] for r in query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get the", "batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return 0 most_recent_time = most_recent_time['most_recent_time']", "padding to add to counts for the concat/max/split trick count_padding = 10 #", "most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id FROM map_treenode", "logger = logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together = [ ['parent', 'word'], ['created_at',", "print splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10):", "\"\"\" DELETE FROM map_treenode WHERE (parent_id IS NULL) AND NOT (id IN %s)", "else: country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node", "LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word ) as combo FROM map_tweetchunk -- USE", "provided for caching values across calls. \"\"\" if cache is not None and", "concat/max/split trick count_padding = 10 # Get the name of the stupid index", "= query.order_by('-chunk_count') # Aggregate fields query = query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit]", "analysis. 1. Extend the BaseTimeFrame class. 2. Indicate how often to run the", "def get_stream_memory_cutoff(cls): \"\"\"Need to override this because we have tons of data that", "Meta: index_together = [ ['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES = [1, 2,", "have no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns", "the given country, non-empty words # We'll do this later instead # query", "by tz_country query = TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words query = query.exclude(tz_country='')", "was most commonly used - For the top result from each country, return", "every country # up to the limit (plus 1 to allow for the", "it is new. A dictionary can optionally be provided for caching values across", "it impossible for it to be deleted for a brief period. node.created_at =", "any fields you need to calculate. You can also store data on separate", "roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for r in tzcountries) user_tz_map[None] =", "our children query = query.filter(node__parent=self) # Aggregate fields query = query.annotate(count=models.Count('id')) return query", "ORDER BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template for", "( {subset_query} ) subset ON map_treenode.id = subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query)", "empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned == batch_size:", "propagated = cls.propagate_orphanage() while propagated > 0: logger.info(\" ...orphaned %d new nodes (should", "= cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk = chunks[random.randint(0, count - 1)] return", "transaction.atomic(): # Get or create a node with parent and word node, created", "# Only non-empty tz_countries, words query = query.exclude(tz_country='') query = query.exclude(node__word='') # Only", "we create its TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone,", "= cls.propagate_orphanage() while propagated > 0: logger.info(\" ...orphaned %d new nodes (should be", "How much padding to add to counts for the concat/max/split trick count_padding =", "a tree node for the parent and word, and whether or not it", "obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup on this frame\") #", "query.exclude(tz_country='') # Only chunks belonging to our children query = query.filter(node__parent=self) # Aggregate", "fields query = query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of", "Template for the overall query # It finds the actual chunk for each", "- settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted", "JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE", "desc query = query.order_by('-count') # Aggregate fields query = query.annotate(count=models.Count('id')) print query.query #", "return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node referenced by the", "['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES = [1, 2, 3, 4, 5, 6,", "we added a '-' character in the middle. splitquery = \"\"\" SELECT sub2.tz_country,", "self.node_cache_hits += 1 return cache[(parent, word)], False else: # We want to keep", "TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup on this frame\") # First delete old", "\"\"\"Need to override this because we have tons of data that depends on", "query = query.order_by('-chunk_count') # Aggregate fields query = query.annotate(chunk_count=models.Count('id')) # Limit query =", "# The word is substring(maxcombo, padding+2) because # it is 1-indexed and we", "south.db.generic import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name from", "settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id FROM map_treenode LEFT OUTER JOIN map_tweetchunk ON", "to allow for the empty country) maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count) AS", "self.id % 3 == 0: # Then remove obsolete tree nodes TreeNode.cleanup_empty() else:", "(should not be needed)\") propagated = cls.propagate_orphanage() while propagated > 0: logger.info(\" ...orphaned", "if reset and settings.DEBUG: # Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django", "chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned", "reset and settings.DEBUG: # Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import", "and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk = chunks[random.randint(0,", "# Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if", "# country, and the number of tweets with that word. # Concatenate the", "We want to keep trying to grab this node until we get one", "MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by country, chunk", "try: chunks = cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk = chunks[random.randint(0, count -", "tree\") prefix_node = root.get_child(prefix) if prefix_node is None: return None node = prefix_node", "LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template for the overall query #", "Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together =", "out empty words and countries. superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM", "children query = query.filter(node__parent=self) # Order by count, desc query = query.order_by('-count') #", "country_node_count.count FROM ({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE", "disappear while we're doing these operations return None @classmethod def delete_before(cls, oldest_date, batch_size=10000):", "words that followed this one anywhere - In every country, find the word", "We'll do this later instead # query = query.exclude(node__word='') # query = query.exclude(tz_country='')", "for every country # up to the limit (plus 1 to allow for", "the top 10 country-word-counts. \"\"\" # Make sure this is valid country_limit =", "those chunks by frequency. Returns a list. \"\"\" # Group by tz_country query", "AND map_tweetchunk.tz_country != '' AND map_treenode.word != '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding,", "= {} new_tweet_chunks = [] for tweet in tweets: root = self.check_prefix(tweet, roots)", "rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth = 0 for chunk", "the given country and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node) count = chunks.count()", "chunks belonging to our children query = query.filter(node__parent=self) # Order by count query", "map_treenode LEFT OUTER JOIN map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id", "models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0)", "connection, transaction, IntegrityError, DatabaseError from django.utils import timezone import random from south.db.generic import", "\"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template for the overall query # It finds", "self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets def cleanup(self): if self.id %", "None node_cache = {} new_tweet_chunks = [] for tweet in tweets: root =", "up the max combo into the count and the word # The word", "demo analysis. 1. Extend the BaseTimeFrame class. 2. Indicate how often to run", "for it to be deleted for a brief period. node.created_at = timezone.now() node.save()", "while True: try: with transaction.atomic(): # Get or create a node with parent", "tweet in tweets: root = self.check_prefix(tweet, roots) if not root: continue rh =", "the end, make sure to call self.mark_done(tweets) 5. Add any additional functions related", "tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup on this frame\") # First", "# First delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to", "logger.info(\"Deleted %d tweet chunks\", total_deleted) else: logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\"", "(should be 0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting", "print query.query # Limit try: result = query.first() return result['node__word'], result['count'] except ObjectDoesNotExist:", "\\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any? if now['latest_start_time'] is None:", "return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False): # Get the most recently finished", "Get the most recently finished map time frame now = MapTimeFrame.objects \\ .filter(calculated=True)", "prefix_node = root.get_child(prefix) if prefix_node is None: return None node = prefix_node for", "cache: self.node_cache_hits += 1 return cache[(parent, word)], False else: # We want to", "return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\" A handy", "we get one while True: try: with transaction.atomic(): # Get or create a", "words # We'll do this later instead # query = query.exclude(node__word='') # query", "with for a few minutes # It won't be deleted by cleanup before", "cursor.fetchall() @classmethod def get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist: return None class Tz_Country(models.Model):", "before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while batch_deleted ==", "Disconnect TreeNodes without any chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned", "time prior to that time frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks", "< %s) AND NOT (map_treenode.id IN %s) LIMIT %s \"\"\" query = \"\"\"", "\"\": continue if depth > settings.MAX_DEPTH: break # This node is guaranteed safe", "DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template for the overall query", "the tweet count to the word subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT", "chunks before the given date\"\"\" cursor = connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk", "Aggregate fields query = query.annotate(count=models.Count('id')) print query.query # Limit try: result = query.first()", "node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return", "one anywhere - In every country, find the word following this one that", "query.filter(tz_country=country) # Only chunks belonging to our children query = query.filter(node__parent=self) # Order", "WHERE ( country_node_count.count = countrymax.max_count AND country_node_count.tz_country != '' AND country_node_count.word != ''", "and (parent, word) in cache: self.node_cache_hits += 1 return cache[(parent, word)], False else:", "@classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of orphans\"\"\" query = \"\"\" DELETE", "cls.propagate_orphanage() while propagated > 0: logger.info(\" ...orphaned %d new nodes (should be 0!)\",", "['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0)", "the word # The word is substring(maxcombo, padding+2) because # it is 1-indexed", "return None node = prefix_node for chunk in query_chunks: node = node.get_child(chunk.lower()) if", "10 country-word-counts. \"\"\" # How much padding to add to counts for the", "batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted", "exists\"\"\" for root in roots: if root.word in tweet.text: return root return None", "\"\"\" Generates SQL like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM", "{padding}) AS UNSIGNED) AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor", "node for every # country, and the number of tweets with that word.", "roots): \"\"\"Returns a root in the tweet, if it exists\"\"\" for root in", "query[:limit] print query.query return [r['tz_country'] for r in query] def get_most_popular_child_chunk_in(self, country): \"\"\"", "this one anywhere - In every country, find the word following this one", "10 country-word-counts. \"\"\" # Make sure this is valid country_limit = int(country_limit) subquery", "is 1-indexed and we added a '-' character in the middle. splitquery =", "settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted =", "- settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id FROM map_treenode LEFT OUTER JOIN map_tweetchunk", "country. \"\"\" # Group by chunk query = TweetChunk.objects.values('node', 'node__word') # Only with", "Group by tz_country query = TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words query =", "children of current orphans are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None)", "maybe there aren't any? if now['latest_start_time'] is None: return # Preserve some time", "store data on separate models, if your data is not strictly 1:1 with", "# Get the most recently finished map time frame now = MapTimeFrame.objects \\", "node=node) count = chunks.count() chunk = chunks[random.randint(0, count - 1)] return chunk.tweet_text except", "map_treenode.word ) as combo FROM map_tweetchunk -- USE INDEX ({index_name}) LEFT OUTER JOIN", "total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted batch of %d\",", "return node, created def calculate(self, tweets): self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all() roots", "= [cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000,", "= timezone.now() node.save() except IntegrityError: # it was deleted while we were getting", "a batch of chunks before the given date\"\"\" cursor = connection.cursor() deleted =", "words query = query.exclude(node__word='') query = query.filter(tz_country=country) # Only chunks belonging to our", "chunks[random.randint(0, count - 1)] return chunk.tweet_text except DatabaseError: # things could potentially disappear", "if cache is not None and (parent, word) in cache: self.node_cache_hits += 1", "the given country. \"\"\" # Group by chunk query = TweetChunk.objects.values('node', 'node__word') #", "map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS NOT NULL)", "while batch_deleted == batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted =", "country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor = connection.cursor() cursor.execute(superquery)", "could potentially disappear while we're doing these operations return None @classmethod def delete_before(cls,", "map_treenode.word != '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select the", "return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest created_at in any tweet chunk.\"\"\"", "None: country = '' else: country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at,", "FROM map_tweetchunk -- USE INDEX ({index_name}) LEFT OUTER JOIN map_treenode ON ( map_tweetchunk.node_id", "DatabaseError: # things could potentially disappear while we're doing these operations return None", "It won't be deleted by cleanup before we create its TweetChunk. node, created", "in the given country. \"\"\" # Group by chunk query = TweetChunk.objects.values('node', 'node__word')", "6, 7, 8, 9, 10] parent = models.ForeignKey('self', null=True, blank=True, related_name='children') word =", "number of tweets # with chunks following this node for every country #", "where you do your work. At the end, make sure to call self.mark_done(tweets)", "TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for r in tzcountries) user_tz_map[None] = None node_cache", "Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is not None: return min(earliest_created_at, baseline_cutoff)", "def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return 0 most_recent_time", "`map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by country, chunk query = TweetChunk.objects.values('tz_country', 'node__word') #", "SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery}) sub GROUP BY sub.tz_country ORDER BY", "== \"\": continue if depth > settings.MAX_DEPTH: break # This node is guaranteed", "batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent", "parent, word, cache=None): \"\"\" Returns a tree node for the parent and word,", "future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of orphans\"\"\" query = \"\"\"", "tz_country=country)) parent = node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size =", "there aren't any? if now['latest_start_time'] is None: return # Preserve some time prior", "(countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count AND country_node_count.tz_country != '' AND", "cleanup # procedure will delete it while we are working with it. #", "and the word # The word is substring(maxcombo, padding+2) because # it is", "else: logger.info(\"Skipping empty treenode cleanup on this frame\") # First delete old tweet", "be 0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\")", "of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned > 0:", "frames. 4. Implement calculate(tweets). This is where you do your work. At the", "node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True) created_at", "= cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted", "the chunks that refer to them. Return the top countries for those chunks", "finds the maximum number of tweets # with chunks following this node for", "things could potentially disappear while we're doing these operations return None @classmethod def", "Look at the children of this node. Look at the chunks that refer", "None and (parent, word) in cache: self.node_cache_hits += 1 return cache[(parent, word)], False", "for a brief period. node.created_at = timezone.now() node.save() except IntegrityError: # it was", "country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of country, word, count", "`count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id,", "= query.annotate(count=models.Count('id')) print query.query # Limit try: result = query.first() return result['node__word'], result['count']", "return cursor.fetchall() @classmethod def get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist: return None class", "we got one if cache is not None: cache[(parent, word)] = node if", "countries. superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER JOIN", "get_stream_memory_cutoff(cls): \"\"\"Need to override this because we have tons of data that depends", "your work. At the end, make sure to call self.mark_done(tweets) 5. Add any", "[cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000, reset=False):", "# Preserve some time prior to that time frame trailing_edge_date = now['latest_start_time'] -", "= most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id FROM", "UPDATE map_treenode JOIN ( {subset_query} ) subset ON map_treenode.id = subset.id SET map_treenode.parent_id", "count, desc query = query.order_by('-count') # Aggregate fields query = query.annotate(count=models.Count('id')) print query.query", "except ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32) class", "date\"\"\" cursor = connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <= %s", "the words following this node for every # country, and the number of", "combo field for each country # Since we've padded with 0s, alphabetic max", "models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True)", "in the tweet, if it exists\"\"\" for root in roots: if root.word in", "and whether or not it is new. A dictionary can optionally be provided", "the tweet, if it exists\"\"\" for root in roots: if root.word in tweet.text:", "country-word-counts. \"\"\" # How much padding to add to counts for the concat/max/split", "django.db import models, connection, transaction, IntegrityError, DatabaseError from django.utils import timezone import random", "tweet tree\") prefix_node = root.get_child(prefix) if prefix_node is None: return None node =", "# with chunks following this node for every country # up to the", "word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if country is None: country = ''", "self.mark_done(tweets) 5. Add any additional functions related to your time frames that will", "get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the children of this node. Look at the", "given country and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk", "'node__word') # Only with the given country, non-empty words query = query.exclude(node__word='') query", "the max combo into the count and the word # The word is", "(plus 1 to allow for the empty country) maxquery = \"\"\" SELECT sub.tz_country,", "operations return None @classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch of chunks", "# Only with the given country, non-empty words # We'll do this later", "the empty country) maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery})", "orphans are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls,", "the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER JOIN `map_treenode`", "return [r['tz_country'] for r in query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk", "few minutes # It won't be deleted by cleanup before we create its", "# Aggregate fields query = query.annotate(count=models.Count('id')) print query.query # Limit try: result =", "cursor = connection.cursor() return cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset)", "7, 8, 9, 10] parent = models.ForeignKey('self', null=True, blank=True, related_name='children') word = models.CharField(max_length=150)", "do this later instead # query = query.exclude(node__word='') # query = query.exclude(tz_country='') #", "(map_treenode.id IN %s) LIMIT %s \"\"\" query = \"\"\" UPDATE map_treenode JOIN (", "= TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted", "def get_most_recent(cls, limit=20): \"\"\" A handy static method to get the <limit> most", "if not root: continue rh = tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks =", "node: return None return node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if", "them. Return the top countries for those chunks by frequency. Returns a list.", "cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted", "\"\"\" Look at the children of this node. Look at the chunks that", "Generates SQL like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk`", "risk that the cleanup # procedure will delete it while we are working", "\"\"\" A handy static method to get the <limit> most recent frames. \"\"\"", "reset=False): # Disconnect TreeNodes without any chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned =", "chunk in query_chunks: node = node.get_child(chunk.lower()) if not node: return None return node", "with chunks following this node for every country # up to the limit", "None: cache[(parent, word)] = node if created: self.nodes_added += 1 return node, created", "the top result from each country, return the top 10 country-word-counts. \"\"\" #", "any chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while", "top countries for those chunks by frequency. Returns a list. \"\"\" # Group", "connection.cursor() return cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset)", "max maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery}) sub GROUP", "leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted", "word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding)", "NOT (id IN %s) ORDER BY id LIMIT %s \"\"\" params = [cls.ROOT_NODES,", "1)[1] rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth =", "\"\"\"Delete a batch of chunks before the given date\"\"\" cursor = connection.cursor() deleted", "> 0: logger.info(\"Deleted %d tweet chunks\", total_deleted) else: logger.info(\"No chunks to delete\") class", "combo into the count and the word # The word is substring(maxcombo, padding+2)", "country, find the word following this one that was most commonly used -", "country # Since we've padded with 0s, alphabetic max is the same as", "Limit try: result = query.first() return result['node__word'], result['count'] except ObjectDoesNotExist: return None def", "Analyze every 15 seconds DURATION = timedelta(seconds=60) # Simply store the total tweet", "def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of orphans\"\"\" query = \"\"\" DELETE FROM", "Now split up the max combo into the count and the word #", "of data that depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the", "tz_country query = TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words query = query.exclude(tz_country='') query", "%s \"\"\".format(subquery=subquery) # Now split up the max combo into the count and", "'0'), '-', map_treenode.word ) as combo FROM map_tweetchunk -- USE INDEX ({index_name}) LEFT", "because we have tons of data that depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame,", "the actual chunk for each country # that had the maximum count. #", "and settings.DEBUG: # Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db", "of country, word, count for the top 10 countries following this node. More", "of tweets # with chunks following this node for every country # up", "tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest tweet referenced in any", "this later instead # query = query.exclude(node__word='') # query = query.exclude(tz_country='') # Only", "top 10 countries following this node. More specifically: - Look at all the", "models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet for the given", "most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id FROM map_treenode LEFT OUTER JOIN", "remove obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup on this frame\")", "word) in cache: self.node_cache_hits += 1 return cache[(parent, word)], False else: # We", "def get_empty_nodes(cls): \"\"\" Return a queryset for all the tree nodes that have", "db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d tweet chunks\", total_deleted) else: logger.info(\"No chunks", "models.ForeignKey('self', null=True, blank=True, related_name='children') word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True)", "cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes without any chunks logger.info(\"Orphaning empty tree nodes...\")", "%d tweet chunks\", total_deleted) else: logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A", "most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id", "SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor()", "countries for those chunks by frequency. Returns a list. \"\"\" # Group by", "any? if now['latest_start_time'] is None: return # Preserve some time prior to that", "frame for demo analysis. 1. Extend the BaseTimeFrame class. 2. Indicate how often", "= models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet for the", "tweets): self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone,", "result from each country, return the top 10 country-word-counts. \"\"\" # How much", "= timedelta(seconds=60) # Simply store the total tweet count in this time frame", "country = models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together = [ ['tz_country', 'node'], ]", "new_tweet_chunks = [] for tweet in tweets: root = self.check_prefix(tweet, roots) if not", "deleted batch of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset", "parent = models.ForeignKey('self', null=True, blank=True, related_name='children') word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None,", "query_chunks: node = node.get_child(chunk.lower()) if not node: return None return node @classmethod def", "Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if total_deleted", "cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted =", "GROUP BY sub.tz_country ORDER BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) # Now split", "word, cache=None): \"\"\" Returns a tree node for the parent and word, and", "Get the earliest tweet referenced in any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if", "0 for chunk in chunks: if chunk == \"\": continue if depth >", "following this node with the most tweets in the given country. \"\"\" #", "orphan\") logger.info(\"Orphaning children of orphans... (should not be needed)\") propagated = cls.propagate_orphanage() while", "# it is 1-indexed and we added a '-' character in the middle.", "In every country, find the word following this one that was most commonly", "Extend the BaseTimeFrame class. 2. Indicate how often to run the analysis (same", "class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame for demo analysis. 1. Extend the", "user_tz_map.get(tweet.user_time_zone, None) if country is None: country = '' else: country = country.country", "is None: country = '' else: country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text,", "country): \"\"\" Get the chunk following this node with the most tweets in", "sub.tz_country ORDER BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) # Now split up the", "total_deleted > 0: logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No orphans to delete\") def", "delete it while we are working with it. # Setting created_at makes it", "in roots: if root.word in tweet.text: return root return None def get_tree_node(self, parent,", "HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by country, chunk query", "self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r)", "db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No orphans to", "This query finds the maximum number of tweets # with chunks following this", "created_at in any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls,", "return None return node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time']", "filters out empty words and countries. superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count", "for chunk in chunks: if chunk == \"\": continue if depth > settings.MAX_DEPTH:", "django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d tweet chunks\", total_deleted)", "1:1 with time frames. 4. Implement calculate(tweets). This is where you do your", "FROM map_treenode WHERE (parent_id IS NULL) AND NOT (id IN %s) ORDER BY", "batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted", "None def get_subquery(self): \"\"\" Generates SQL like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`)", "overall query # It finds the actual chunk for each country # that", "tons of data that depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get", "import TweetTimeFrame import logging import re logger = logging.getLogger('map') class TreeNode(models.Model): class Meta:", "class. 2. Indicate how often to run the analysis (same as the time", "root = self.check_prefix(tweet, roots) if not root: continue rh = tweet.text.split(root.word, 1)[1] rh", "from datetime import timedelta import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import", "country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country)", "connection.cursor() return cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure that all children of", "return None @classmethod def get_empty_nodes(cls): \"\"\" Return a queryset for all the tree", "A handy static method to get the <limit> most recent frames. \"\"\" query", "LIMIT %s \"\"\".format(subquery=subquery) # Now split up the max combo into the count", "3. Add any fields you need to calculate. You can also store data", "import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import re logger = logging.getLogger('map')", "reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes without any", "non-empty words # We'll do this later instead # query = query.exclude(node__word='') #", "return chunk.tweet_text except DatabaseError: # things could potentially disappear while we're doing these", "node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if not created: # If it is an", "country is None: country = '' else: country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id,", "each country # that had the maximum count. # Further, filters out empty", "the analysis (same as the time frame duration) 3. Add any fields you", "self.get_subquery() # This query finds the maximum number of tweets # with chunks", "batch_size]) return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest created_at in any tweet", "1 return cache[(parent, word)], False else: # We want to keep trying to", "%s) ORDER BY id LIMIT %s \"\"\" params = [cls.ROOT_NODES, batch_size] cursor =", "from each country, return the top 10 country-word-counts. \"\"\" # How much padding", "NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params)", "given date\"\"\" cursor = connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <=", ") WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country != '' AND map_treenode.word != ''", "batch_size=10000): \"\"\"Delete a batch of orphans\"\"\" query = \"\"\" DELETE FROM map_treenode WHERE", "( map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country != ''", "0: logger.info(\" ...orphaned %d new nodes (should be 0!)\", propagated) propagated = cls.propagate_orphanage()", "until we get one while True: try: with transaction.atomic(): # Get or create", "[ ['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES = [1, 2, 3, 4, 5,", "= 0 for chunk in chunks: if chunk == \"\": continue if depth", "= models.ForeignKey('self', null=True, blank=True, related_name='children') word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True,", "top result from each country, return the top 10 country-word-counts. \"\"\" # How", "is valid country_limit = int(country_limit) subquery = self.get_subquery() # This query finds the", "the overall query # It finds the actual chunk for each country #", "query = query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of country,", "raise Exception(\"No root node in tweet tree\") prefix_node = root.get_child(prefix) if prefix_node is", "do your work. At the end, make sure to call self.mark_done(tweets) 5. Add", "the word following this one that was most commonly used - For the", "how often to run the analysis (same as the time frame duration) 3.", "\"\"\"Delete a batch of orphans\"\"\" query = \"\"\" DELETE FROM map_treenode WHERE (parent_id", "... deleted batch of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if", "4, 5, 6, 7, 8, 9, 10] parent = models.ForeignKey('self', null=True, blank=True, related_name='children')", "one if cache is not None: cache[(parent, word)] = node if created: self.nodes_added", "most tweets in the given country. \"\"\" # Group by chunk query =", "= int(country_limit) subquery = self.get_subquery() # This query finds the maximum number of", "class Meta: index_together = [ ['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True) node =", "= TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for r in tzcountries) user_tz_map[None] = None", "= connection.cursor() return cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure that all children", "def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes without any chunks logger.info(\"Orphaning empty tree", "add to counts for the concat/max/split trick count_padding = 10 # Get the", "# We want to keep trying to grab this node until we get", "= models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns a root in the tweet, if", "the <limit> most recent frames. \"\"\" query = cls.get_in_range(calculated=True) \\ .order_by('-start_time') return query[:limit]", "# It finds the actual chunk for each country # that had the", "Simply store the total tweet count in this time frame tweet_count = models.IntegerField(default=0)", "class TweetChunk(models.Model): class Meta: index_together = [ ['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True)", "maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery}) sub GROUP BY", "\"\"\"Returns a Tweet for the given country and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name,", "query = query.filter(node__parent=self) # Order by count query = query.order_by('-chunk_count') # Aggregate fields", "# query = query.exclude(node__word='') # query = query.exclude(tz_country='') # Only chunks belonging to", "maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery}) sub GROUP BY", "map_treenode ON ( map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country", "batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent", "across calls. \"\"\" if cache is not None and (parent, word) in cache:", "if now['latest_start_time'] is None: return # Preserve some time prior to that time", "tz_country ORDER BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template", "to work with for a few minutes # It won't be deleted by", "INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES", "baseline_cutoff) else: return baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\" A handy static method", "make them easier to work with. \"\"\" # Analyze every 15 seconds DURATION", "re logger = logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together = [ ['parent', 'word'],", "'' AND country_node_count.word != '' ) ORDER BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query,", "= models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together = [ ['tz_country',", "[ ['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id =", "chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False): # Get", "query.exclude(node__word='') query = query.filter(tz_country=country) # Only chunks belonging to our children query =", "node.created_at = timezone.now() node.save() except IntegrityError: # it was deleted while we were", "orphans... (should not be needed)\") propagated = cls.propagate_orphanage() while propagated > 0: logger.info(\"", "tweet_text = models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod", "query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit] print query.query return [r['tz_country'] for r in", "created: # If it is an old node, there is a risk that", "and chunks.\"\"\" root = cls.get_root() if not root: raise Exception(\"No root node in", "import random from south.db.generic import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper", "MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame for demo analysis. 1. Extend the BaseTimeFrame", "a queryset for all the tree nodes that have no associated TweetChunks \"\"\"", "= MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any? if now['latest_start_time']", "one that was most commonly used - For the top result from each", "belonging to our children query = query.filter(node__parent=self) # Order by count, desc query", "query.first() return result['node__word'], result['count'] except ObjectDoesNotExist: return None def get_subquery(self): \"\"\" Generates SQL", "our children query = query.filter(node__parent=self) # Order by count query = query.order_by('-chunk_count') #", "child = list(self.children.filter(word=word)[:1]) if child: return child[0] return None @classmethod def get_empty_nodes(cls): \"\"\"", "this frame\") # First delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls):", "if not node: return None return node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time =", "batch_size=10000, reset=False): # Get the most recently finished map time frame now =", "map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor() return", "query = TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words query = query.exclude(tz_country='') query =", "batch of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if reset", "# Order by count query = query.order_by('-chunk_count') # Aggregate fields query = query.annotate(chunk_count=models.Count('id'))", "= root depth = 0 for chunk in chunks: if chunk == \"\":", "tz_countries, words query = query.exclude(tz_country='') query = query.exclude(node__word='') # Only chunks belonging to", "node): \"\"\"Returns a Tweet for the given country and node.\"\"\" try: chunks =", "= cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of", "map_treenode.parent_id = %s AND map_tweetchunk.tz_country != '' AND map_treenode.word != '' GROUP BY", "# things could potentially disappear while we're doing these operations return None @classmethod", "by country, chunk query = TweetChunk.objects.values('tz_country', 'node__word') # Only with the given country,", "is None: return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query", "tweets: root = self.check_prefix(tweet, roots) if not root: continue rh = tweet.text.split(root.word, 1)[1]", "no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the", "= root.get_child(prefix) if prefix_node is None: return None node = prefix_node for chunk", "query_chunks): \"\"\"Returns the node referenced by the given prefix and chunks.\"\"\" root =", "# Get or create a node with parent and word node, created =", "= query.exclude(tz_country='') query = query.exclude(node__word='') # Only chunks belonging to our children query", "alphabetic max is the same as numeric max maxquery = \"\"\" SELECT sub.tz_country,", "connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <= %s ORDER BY id", "root: continue rh = tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh)", "= TweetChunk.get_earliest_created_at() if earliest_created_at is not None: return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff", "node = node.get_child(chunk.lower()) if not node: return None return node @classmethod def orphan_empty_nodes(cls,", "\"\"\".format(subquery=subquery) # Now split up the max combo into the count and the", "try: return cls.objects.get(id=1) except ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country", "+= len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets def cleanup(self): if self.id % 3", "max is the same as numeric max maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo)", "def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of country, word, count for the top", "null=True) created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name, node):", "Group by chunk query = TweetChunk.objects.values('node', 'node__word') # Only with the given country,", "if earliest_created_at is not None: return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod def", "import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No", "1. Extend the BaseTimeFrame class. 2. Indicate how often to run the analysis", "your data is not strictly 1:1 with time frames. 4. Implement calculate(tweets). This", "country_node_count.tz_country != '' AND country_node_count.word != '' ) ORDER BY country_node_count.count DESC LIMIT", "def get_subquery(self): \"\"\" Generates SQL like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS", "for caching values across calls. \"\"\" if cache is not None and (parent,", ".exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of orphans\"\"\" query", "# Setting created_at makes it impossible for it to be deleted for a", "working with it. # Setting created_at makes it impossible for it to be", "node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets", "rh = tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent =", "return # Preserve some time prior to that time frame trailing_edge_date = now['latest_start_time']", "'word'], ['created_at', 'parent'], ] ROOT_NODES = [1, 2, 3, 4, 5, 6, 7,", "AND map_treenode.word != '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select", "on this frame\") # First delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def", "countrymax.max_count AND country_node_count.tz_country != '' AND country_node_count.word != '' ) ORDER BY country_node_count.count", "= self.get_subquery() # This query finds the maximum number of tweets # with", "the chunk following this node with the most tweets in the given country.", "WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at < %s)", "def get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk following this node with the most", "= super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest tweet referenced in any Tweet Chunk", "15 seconds DURATION = timedelta(seconds=60) # Simply store the total tweet count in", "tweet.text: return root return None def get_tree_node(self, parent, word, cache=None): \"\"\" Returns a", "None return node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is", "chunks belonging to our children query = query.filter(node__parent=self) # Order by count, desc", "parent and word node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if not created: # If", "stupid index from South db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) #", "10] parent = models.ForeignKey('self', null=True, blank=True, related_name='children') word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True,", "TreeNode.objects.get_or_create(parent=parent, word=word) if not created: # If it is an old node, there", "them easier to work with. \"\"\" # Analyze every 15 seconds DURATION =", "self.node_cache_size = len(node_cache) return tweets def cleanup(self): if self.id % 3 == 0:", "DESC LIMIT %s \"\"\".format(subquery=subquery) # Now split up the max combo into the", "country, and the number of tweets with that word. # Concatenate the tweet", "AND NOT (map_treenode.id IN %s) LIMIT %s \"\"\" query = \"\"\" UPDATE map_treenode", "country, chunk query = TweetChunk.objects.values('tz_country', 'node__word') # Only with the given country, non-empty", "def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while", "except DatabaseError: # things could potentially disappear while we're doing these operations return", "on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest tweet referenced in", "\"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node referenced by", "timedelta import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import re logger =", "FROM ({subquery}) sub GROUP BY tz_country ORDER BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query,", "count for the top 10 countries following this node. More specifically: - Look", "ON (countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count AND country_node_count.tz_country != ''", "# up to the limit (plus 1 to allow for the empty country)", "or create a node with parent and word node, created = TreeNode.objects.get_or_create(parent=parent, word=word)", "logger.info(\"Orphaning empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned ==", "procedure will delete it while we are working with it. # Setting created_at", "get_most_recent(cls, limit=20): \"\"\" A handy static method to get the <limit> most recent", "work. At the end, make sure to call self.mark_done(tweets) 5. Add any additional", "a batch of orphans\"\"\" query = \"\"\" DELETE FROM map_treenode WHERE (parent_id IS", "maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) # Now split up the max combo into", "call self.mark_done(tweets) 5. Add any additional functions related to your time frames that", "in any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is not None: return", "Only chunks belonging to our children query = query.filter(node__parent=self) # Order by count,", "SELECT sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery}) sub GROUP BY tz_country ORDER BY", "country = '' else: country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country))", "sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery}) sub GROUP BY sub.tz_country ORDER BY maxcombo", "deleted batch of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if", "word, and whether or not it is new. A dictionary can optionally be", "datetime import timedelta import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import re", "tweet referenced in any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is not", "FROM `map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id`", "cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure that all children of current orphans", "country # up to the limit (plus 1 to allow for the empty", "total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent apparent memory leaks #", "following this node. More specifically: - Look at all the words that followed", "a risk that the cleanup # procedure will delete it while we are", "with the given country, non-empty words # We'll do this later instead #", "the maximum count. # Further, filters out empty words and countries. superquery =", "def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node referenced by the given prefix and", "def cleanup(self): if self.id % 3 == 0: # Then remove obsolete tree", "\"\"\" SELECT map_treenode.id FROM map_treenode LEFT OUTER JOIN map_tweetchunk ON ( map_treenode.id =", "return baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\" A handy static method to get", ") GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by country, chunk query =", "# query = query.exclude(tz_country='') # Only chunks belonging to our children query =", "if most_recent_time['most_recent_time'] is None: return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time -", "index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the words following this node for", "continue rh = tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent", "= connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <= %s ORDER BY", "= query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of country, word,", "to the word subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'),", "django.core.exceptions import ObjectDoesNotExist from django.db import models, connection, transaction, IntegrityError, DatabaseError from django.utils", "batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted", "models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet,", "not created: # If it is an old node, there is a risk", "every 15 seconds DURATION = timedelta(seconds=60) # Simply store the total tweet count", "models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns a root in the tweet, if it", "baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest tweet referenced in any Tweet", "delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch of chunks before the given date\"\"\" cursor", "that was most commonly used - For the top result from each country,", "cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist: return None", "map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id", "node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns a root in the tweet,", "actual chunk for each country # that had the maximum count. # Further,", "propagate_orphanage(cls): \"\"\"Makes sure that all children of current orphans are also orphaned.\"\"\" future_orphans", "BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) # Now split up the max combo", "-- USE INDEX ({index_name}) LEFT OUTER JOIN map_treenode ON ( map_tweetchunk.node_id = map_treenode.id", "TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if country", "is substring(maxcombo, padding+2) because # it is 1-indexed and we added a '-'", "this because we have tons of data that depends on tweets.\"\"\" baseline_cutoff =", "get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of country, word, count for the top 10", "LIMIT %s \"\"\" query = \"\"\" UPDATE map_treenode JOIN ( {subset_query} ) subset", "of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if reset and", "want to keep trying to grab this node until we get one while", "if total_deleted > 0: logger.info(\"Deleted %d tweet chunks\", total_deleted) else: logger.info(\"No chunks to", "\"\"\".format(padding=count_padding, index_name=index_name) # Now select the max of the combo field for each", "# Further, filters out empty words and countries. superquery = \"\"\" SELECT country_node_count.tz_country,", "node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if country is", "safe to work with for a few minutes # It won't be deleted", "SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED)", "'node__word') # Only with the given country, non-empty words # We'll do this", "all children of current orphans are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return", "analysis (same as the time frame duration) 3. Add any fields you need", "tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned == batch_size: logger.info(\"", "twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country =", "+= 1 return cache[(parent, word)], False else: # We want to keep trying", "empty country) maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery}) sub", "deleted while we were getting it continue # we got one if cache", "%d empty nodes\", total_orphaned) else: logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning children of", "`count` FROM `map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND", "@classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node referenced by the given prefix", "def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the children of this node. Look at", "return None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model): class", "these operations return None @classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch of", "Get the chunk following this node with the most tweets in the given", "orphaned batch of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned", "= \"\"\" DELETE FROM map_treenode WHERE (parent_id IS NULL) AND NOT (id IN", "a node with parent and word node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if not", "None: return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\" A", "= len(tweets) tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for", "time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id FROM map_treenode LEFT", "with transaction.atomic(): # Get or create a node with parent and word node,", "maximum number of tweets # with chunks following this node for every country", "that refer to them. Return the top countries for those chunks by frequency.", "substring(maxcombo, padding+2) because # it is 1-indexed and we added a '-' character", "potentially disappear while we're doing these operations return None @classmethod def delete_before(cls, oldest_date,", "chunk following this node with the most tweets in the given country. \"\"\"", "the given country, non-empty words query = query.exclude(node__word='') query = query.filter(tz_country=country) # Only", "[time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes", "BaseTimeFrame class. 2. Indicate how often to run the analysis (same as the", "query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of country, word, count for the", "for all the tree nodes that have no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES)", "its TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if", "prefix_node for chunk in query_chunks: node = node.get_child(chunk.lower()) if not node: return None", "query.filter(node__parent=self) # Order by count, desc query = query.order_by('-count') # Aggregate fields query", "for demo analysis. 1. Extend the BaseTimeFrame class. 2. Indicate how often to", "to override this because we have tons of data that depends on tweets.\"\"\"", "fields query = query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit] print query.query return [r['tz_country']", "same as numeric max maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM", "params) @classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls,", "\"\"\" Return a queryset for all the tree nodes that have no associated", "blank=True) def get_child(self, word): child = list(self.children.filter(word=word)[:1]) if child: return child[0] return None", "nodes to orphan\") logger.info(\"Orphaning children of orphans... (should not be needed)\") propagated =", "{padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count` FROM", "max combo into the count and the word # The word is substring(maxcombo,", "chunk in chunks: if chunk == \"\": continue if depth > settings.MAX_DEPTH: break", "as maxcombo FROM ({subquery}) sub GROUP BY sub.tz_country ORDER BY maxcombo DESC LIMIT", "connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of", "deleted for a brief period. node.created_at = timezone.now() node.save() except IntegrityError: # it", "maxcombo FROM ({subquery}) sub GROUP BY sub.tz_country ORDER BY maxcombo DESC LIMIT %s", "in the middle. splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS", "Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is not None: return min(earliest_created_at, baseline_cutoff) else:", "the name of the stupid index from South db = DatabaseOperations(None) index_name =", "before the given date\"\"\" cursor = connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE", "= models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together = [ ['tz_country', 'node'], ] id", "= `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`,", "to call self.mark_done(tweets) 5. Add any additional functions related to your time frames", "def get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet for the given country and node.\"\"\"", "padding=count_padding) print splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self,", "propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size)", "= models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod def", "from South db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the", "node if created: self.nodes_added += 1 return node, created def calculate(self, tweets): self.tweet_count", "root return None def get_tree_node(self, parent, word, cache=None): \"\"\" Returns a tree node", "query = TweetChunk.objects.values('node', 'node__word') # Only with the given country, non-empty words query", "= models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns", "time frame for demo analysis. 1. Extend the BaseTimeFrame class. 2. Indicate how", "to add to counts for the concat/max/split trick count_padding = 10 # Get", "is a risk that the cleanup # procedure will delete it while we", "batch of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset and", "\"\"\" Get tuples of country, word, count for the top 10 countries following", "BY tz_country ORDER BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) #", "id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250,", "batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect", "for tweet in tweets: root = self.check_prefix(tweet, roots) if not root: continue rh", "be provided for caching values across calls. \"\"\" if cache is not None", "depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest tweet referenced", "OUTER JOIN map_treenode ON ( map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id = %s", "get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32)", "import timezone import random from south.db.generic import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey", "country) maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery}) sub GROUP", "every # country, and the number of tweets with that word. # Concatenate", "- Look at all the words that followed this one anywhere - In", "create a node with parent and word node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if", "we've padded with 0s, alphabetic max is the same as numeric max maxquery", "'' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select the max of", "= \"\"\" UPDATE map_treenode JOIN ( {subset_query} ) subset ON map_treenode.id = subset.id", "1, {padding}) AS UNSIGNED) AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery", "models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True)", "parent = node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache)", "allow for the empty country) maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count", "!= '' AND map_treenode.word != '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) #", "orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return 0 most_recent_time =", "{limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template for the overall query # It", "@classmethod def cleanup(cls, batch_size=10000, reset=False): # Get the most recently finished map time", "Get tuples of country, word, count for the top 10 countries following this", "query.exclude(node__word='') # Only chunks belonging to our children query = query.filter(node__parent=self) # Order", "# This node is guaranteed safe to work with for a few minutes", "cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent apparent memory leaks", "= Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for r in tzcountries)", "work with for a few minutes # It won't be deleted by cleanup", "len(node_cache) return tweets def cleanup(self): if self.id % 3 == 0: # Then", "0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted", "padding+2) because # it is 1-indexed and we added a '-' character in", "superquery cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls): try: return cls.objects.get(id=1)", "DELETE FROM map_treenode WHERE (parent_id IS NULL) AND NOT (id IN %s) ORDER", "Returns a list. \"\"\" # Group by tz_country query = TweetChunk.objects.values('tz_country') # Only", "trying to grab this node until we get one while True: try: with", "def get_earliest_created_at(cls): \"\"\"Get the earliest created_at in any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at'))", "from django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d tweet chunks\",", "keep trying to grab this node until we get one while True: try:", "batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size,", "node with the most tweets in the given country. \"\"\" # Group by", "tweets def cleanup(self): if self.id % 3 == 0: # Then remove obsolete", "without any chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned", "ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP", "def propagate_orphanage(cls): \"\"\"Makes sure that all children of current orphans are also orphaned.\"\"\"", "BY id LIMIT %s \"\"\" params = [cls.ROOT_NODES, batch_size] cursor = connection.cursor() return", "'node'], ] id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text", "query.exclude(tz_country='') query = query.exclude(node__word='') # Only chunks belonging to our children query =", "SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word ) as combo FROM", "word following this one that was most commonly used - For the top", "@classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch of chunks before the given", "DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the words following this node", "result from each country, return the top 10 country-word-counts. \"\"\" # Make sure", "map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at", "and countries. superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER", "chunks: if chunk == \"\": continue if depth > settings.MAX_DEPTH: break # This", "ON ( map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS", "get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk following this node with the most tweets", "following this node for every country # up to the limit (plus 1", "logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\"", "words query = query.exclude(tz_country='') query = query.exclude(node__word='') # Only chunks belonging to our", "roots: if root.word in tweet.text: return root return None def get_tree_node(self, parent, word,", "in tweet.text: return root return None def get_tree_node(self, parent, word, cache=None): \"\"\" Returns", "max of the combo field for each country # Since we've padded with", "time frame duration) 3. Add any fields you need to calculate. You can", "3, 4, 5, 6, 7, 8, 9, 10] parent = models.ForeignKey('self', null=True, blank=True,", "countries following this node. More specifically: - Look at all the words that", "had the maximum count. # Further, filters out empty words and countries. superquery", "Only chunks belonging to our children query = query.filter(node__parent=self) # Aggregate fields query", "map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select the max of the combo field", "child[0] return None @classmethod def get_empty_nodes(cls): \"\"\" Return a queryset for all the", "chunk = chunks[random.randint(0, count - 1)] return chunk.tweet_text except DatabaseError: # things could", "map_tweetchunk -- USE INDEX ({index_name}) LEFT OUTER JOIN map_treenode ON ( map_tweetchunk.node_id =", "select the max of the combo field for each country # Since we've", "words following this node for every # country, and the number of tweets", "= models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self, word): child =", "Concatenate the tweet count to the word subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT(", "if it exists\"\"\" for root in roots: if root.word in tweet.text: return root", "country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node depth += 1", "store the total tweet count in this time frame tweet_count = models.IntegerField(default=0) nodes_added", "= self.check_prefix(tweet, roots) if not root: continue rh = tweet.text.split(root.word, 1)[1] rh =", "= connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples", "split up the max combo into the count and the word # The", "node, created def calculate(self, tweets): self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all() roots =", "to them. Return the top countries for those chunks by frequency. Returns a", "# We'll do this later instead # query = query.exclude(node__word='') # query =", "models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together = [ ['tz_country', 'node'],", "models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns a root in the", "NULL) AND (map_treenode.created_at < %s) AND NOT (map_treenode.id IN %s) LIMIT %s \"\"\"", "node.get_child(chunk.lower()) if not node: return None return node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time", "this node until we get one while True: try: with transaction.atomic(): # Get", "given country, non-empty words # We'll do this later instead # query =", "None def get_tree_node(self, parent, word, cache=None): \"\"\" Returns a tree node for the", "we're doing these operations return None @classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a", "= connection.cursor() return cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size,", "Returns a tree node for the parent and word, and whether or not", "list. \"\"\" # Group by tz_country query = TweetChunk.objects.values('tz_country') # Only non-empty tz_countries,", "def get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone =", "that depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest tweet", "a '-' character in the middle. splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding}", "country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country =", "there is a risk that the cleanup # procedure will delete it while", "'parent'], ] ROOT_NODES = [1, 2, 3, 4, 5, 6, 7, 8, 9,", "= '' else: country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent", "a brief period. node.created_at = timezone.now() node.save() except IntegrityError: # it was deleted", "= [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] parent =", "batch_size=10000, reset=False): # Disconnect TreeNodes without any chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned", "and the number of tweets with that word. # Concatenate the tweet count", "empty nodes to orphan\") logger.info(\"Orphaning children of orphans... (should not be needed)\") propagated", "apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if total_deleted >", "( map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS NOT", "return cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod", "AND (map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at < %s) AND NOT (map_treenode.id IN", "aren't any? if now['latest_start_time'] is None: return # Preserve some time prior to", "frame tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0)", "logging import re logger = logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together = [", "country-word-counts. \"\"\" # Make sure this is valid country_limit = int(country_limit) subquery =", "the cleanup # procedure will delete it while we are working with it.", "we were getting it continue # we got one if cache is not", "to that time frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before", "earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is not None: return min(earliest_created_at, baseline_cutoff) else: return", "cleanup(cls, batch_size=10000, reset=False): # Get the most recently finished map time frame now", "old node, there is a risk that the cleanup # procedure will delete", "tweet, roots): \"\"\"Returns a root in the tweet, if it exists\"\"\" for root", "\"\"\" params = [cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def", "batch of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned >", "= models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32,", "be deleted for a brief period. node.created_at = timezone.now() node.save() except IntegrityError: #", "handy static method to get the <limit> most recent frames. \"\"\" query =", "for the top 10 countries following this node. More specifically: - Look at", "if total_deleted > 0: logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No orphans to delete\")", "= batch_orphaned while batch_orphaned == batch_size: logger.info(\" ... orphaned batch of %d\", batch_orphaned)", "Since we've padded with 0s, alphabetic max is the same as numeric max", "commonly used - For the top result from each country, return the top", "cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False): # Get the most recently", "# Disconnect TreeNodes without any chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size)", "reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes", "%s) AND NOT (map_treenode.id IN %s) LIMIT %s \"\"\" query = \"\"\" UPDATE", "for those chunks by frequency. Returns a list. \"\"\" # Group by tz_country", "# How much padding to add to counts for the concat/max/split trick count_padding", "is not strictly 1:1 with time frames. 4. Implement calculate(tweets). This is where", "tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for r in", "all the words that followed this one anywhere - In every country, find", "dictionary can optionally be provided for caching values across calls. \"\"\" if cache", "word=word) if not created: # If it is an old node, there is", "to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the children of this node.", "TreeNode(models.Model): class Meta: index_together = [ ['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES =", "# Simply store the total tweet count in this time frame tweet_count =", "country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node depth", "tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks)", "the given prefix and chunks.\"\"\" root = cls.get_root() if not root: raise Exception(\"No", "and word, and whether or not it is new. A dictionary can optionally", "BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by country, chunk query = TweetChunk.objects.values('tz_country', 'node__word')", "DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name from datetime import", "@classmethod def get_empty_nodes(cls): \"\"\" Return a queryset for all the tree nodes that", "== batch_size: logger.info(\" ... orphaned batch of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned", "GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select the max of the", "WHERE created_at <= %s ORDER BY id LIMIT %s\", [oldest_date, batch_size]) return deleted", "word)] = node if created: self.nodes_added += 1 return node, created def calculate(self,", "total_orphaned += batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned %d empty nodes\", total_orphaned) else:", "data on separate models, if your data is not strictly 1:1 with time", "batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted ==", "nodes (should be 0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False):", "calls. \"\"\" if cache is not None and (parent, word) in cache: self.node_cache_hits", "the parent and word, and whether or not it is new. A dictionary", "was deleted while we were getting it continue # we got one if", "query = query.exclude(tz_country='') query = query.exclude(node__word='') # Only chunks belonging to our children", "with that word. # Concatenate the tweet count to the word subquery =", "map_treenode.id FROM map_treenode LEFT OUTER JOIN map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id )", "= query.order_by('-count') # Aggregate fields query = query.annotate(count=models.Count('id')) print query.query # Limit try:", "trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\"", "the time frame duration) 3. Add any fields you need to calculate. You", "reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size:", "= TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words query = query.exclude(tz_country='') query = query.exclude(node__word='')", "(map_treenode.created_at < %s) AND NOT (map_treenode.id IN %s) LIMIT %s \"\"\" query =", "= query.exclude(node__word='') query = query.filter(tz_country=country) # Only chunks belonging to our children query", "node.save() except IntegrityError: # it was deleted while we were getting it continue", "country = user_tz_map.get(tweet.user_time_zone, None) if country is None: country = '' else: country", "map_treenode JOIN ( {subset_query} ) subset ON map_treenode.id = subset.id SET map_treenode.parent_id =", "tzcountries) user_tz_map[None] = None node_cache = {} new_tweet_chunks = [] for tweet in", "'-' character in the middle. splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} +", "blank=True, related_name='children') word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self,", "prefix_node is None: return None node = prefix_node for chunk in query_chunks: node", "IntegrityError, DatabaseError from django.utils import timezone import random from south.db.generic import DatabaseOperations from", "COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE", "== batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted", "recently finished map time frame now = MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) #", "BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template for the", "node is guaranteed safe to work with for a few minutes # It", "referenced by the given prefix and chunks.\"\"\" root = cls.get_root() if not root:", "rh) parent = root depth = 0 for chunk in chunks: if chunk", "have tons of data that depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() #", "for root in roots: if root.word in tweet.text: return root return None def", "import get_model_name from datetime import timedelta import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import", "oldest_date, batch_size=10000): \"\"\"Delete a batch of chunks before the given date\"\"\" cursor =", "= map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS NOT NULL) AND", "that had the maximum count. # Further, filters out empty words and countries.", "map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word ) as combo FROM map_tweetchunk -- USE INDEX", "= now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date,", "cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted batch of", "`map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by", "logger.info(\"Orphaned %d empty nodes\", total_orphaned) else: logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning children", "ORDER BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor =", "instead # query = query.exclude(node__word='') # query = query.exclude(tz_country='') # Only chunks belonging", "any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False):", "with. \"\"\" # Analyze every 15 seconds DURATION = timedelta(seconds=60) # Simply store", "Indicate how often to run the analysis (same as the time frame duration)", "# Then remove obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup on", "children query = query.filter(node__parent=self) # Order by count query = query.order_by('-chunk_count') # Aggregate", "root depth = 0 for chunk in chunks: if chunk == \"\": continue", "- In every country, find the word following this one that was most", "query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of country, word, count", "word subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word", "Exception(\"No root node in tweet tree\") prefix_node = root.get_child(prefix) if prefix_node is None:", "the max of the combo field for each country # Since we've padded", "makes it impossible for it to be deleted for a brief period. node.created_at", "follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node referenced by the given prefix and chunks.\"\"\"", "this time frame tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits", "the words that followed this one anywhere - In every country, find the", "params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure that all children of current orphans are", "chunks by frequency. Returns a list. \"\"\" # Group by tz_country query =", "this node for every # country, and the number of tweets with that", "not root: continue rh = tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+',", "now['latest_start_time'] is None: return # Preserve some time prior to that time frame", "\"\"\" # Analyze every 15 seconds DURATION = timedelta(seconds=60) # Simply store the", "ON map_treenode.id = subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES,", "SUBSTRING(sub2.maxcombo, {padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count`", "roots) if not root: continue rh = tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks", "First delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override", "SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country", "while we were getting it continue # we got one if cache is", "%s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size:", "+= 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets def cleanup(self):", "user_tz_map = dict((r.user_time_zone, r) for r in tzcountries) user_tz_map[None] = None node_cache =", "for the overall query # It finds the actual chunk for each country", "check_prefix(self, tweet, roots): \"\"\"Returns a root in the tweet, if it exists\"\"\" for", "Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for r in tzcountries) user_tz_map[None]", "cls).get_stream_memory_cutoff() # Get the earliest tweet referenced in any Tweet Chunk earliest_created_at =", "BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select the max of the combo", "MAX(sub.count) AS max_count FROM ({subquery}) sub GROUP BY tz_country ORDER BY max_count DESC", "len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets def cleanup(self): if self.id % 3 ==", "+= 1 return node, created def calculate(self, tweets): self.tweet_count = len(tweets) tzcountries =", "were getting it continue # we got one if cache is not None:", "map_treenode WHERE (parent_id IS NULL) AND NOT (id IN %s) ORDER BY id", "created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self, word): child = list(self.children.filter(word=word)[:1]) if", "any additional functions related to your time frames that will make them easier", "tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size", "= NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query,", "TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override this because we have tons of", "to run the analysis (same as the time frame duration) 3. Add any", "\"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word ) as combo", "models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls,", "SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id`", "= query.exclude(node__word='') # Only chunks belonging to our children query = query.filter(node__parent=self) #", "sure this is valid country_limit = int(country_limit) subquery = self.get_subquery() # This query", "number of tweets with that word. # Concatenate the tweet count to the", "{limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod", "node referenced by the given prefix and chunks.\"\"\" root = cls.get_root() if not", "batch_orphaned == batch_size: logger.info(\" ... orphaned batch of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size)", "max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1) # Template for the overall", "db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the words following this node for every #", "subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor =", "query finds the maximum number of tweets # with chunks following this node", "# Group by chunk query = TweetChunk.objects.values('node', 'node__word') # Only with the given", "None node = prefix_node for chunk in query_chunks: node = node.get_child(chunk.lower()) if not", "None: return None node = prefix_node for chunk in query_chunks: node = node.get_child(chunk.lower())", "your time frames that will make them easier to work with. \"\"\" #", "# This query finds the maximum number of tweets # with chunks following", "because # it is 1-indexed and we added a '-' character in the", "= subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor", ".filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any? if now['latest_start_time'] is None: return", "get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of country, word, count for the top 10", "from swapper import get_model_name from datetime import timedelta import settings from twitter_feels.libs.twitter_analysis import", "map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word ) as combo FROM map_tweetchunk", "ON ( map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country !=", "batch of chunks before the given date\"\"\" cursor = connection.cursor() deleted = cursor.execute(\"DELETE", "additional functions related to your time frames that will make them easier to", "that followed this one anywhere - In every country, find the word following", "result = query.first() return result['node__word'], result['count'] except ObjectDoesNotExist: return None def get_subquery(self): \"\"\"", "optionally be provided for caching values across calls. \"\"\" if cache is not", "cursor = connection.cursor() return cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure that all", "while propagated > 0: logger.info(\" ...orphaned %d new nodes (should be 0!)\", propagated)", "return cache[(parent, word)], False else: # We want to keep trying to grab", "TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent apparent memory", "is the same as numeric max maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo) as", "the given date\"\"\" cursor = connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at", "r) for r in tzcountries) user_tz_map[None] = None node_cache = {} new_tweet_chunks =", "IntegrityError: # it was deleted while we were getting it continue # we", "= cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False): # Get the most", "if not root: raise Exception(\"No root node in tweet tree\") prefix_node = root.get_child(prefix)", "children of orphans... (should not be needed)\") propagated = cls.propagate_orphanage() while propagated >", "new nodes (should be 0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000,", "later instead # query = query.exclude(node__word='') # query = query.exclude(tz_country='') # Only chunks", "This node is guaranteed safe to work with for a few minutes #", "cache=None): \"\"\" Returns a tree node for the parent and word, and whether", "BY sub.tz_country ORDER BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) # Now split up", "INDEX ({index_name}) LEFT OUTER JOIN map_treenode ON ( map_tweetchunk.node_id = map_treenode.id ) WHERE", "chunk == \"\": continue if depth > settings.MAX_DEPTH: break # This node is", "def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of country, word, count for the top", "often to run the analysis (same as the time frame duration) 3. Add", "models, if your data is not strictly 1:1 with time frames. 4. Implement", "at all the words that followed this one anywhere - In every country,", "batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure that", "of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG:", "cursor = connection.cursor() deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <= %s ORDER", "tree nodes that have no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls,", "like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER JOIN", "nodes\", total_orphaned) else: logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning children of orphans... (should", "impossible for it to be deleted for a brief period. node.created_at = timezone.now()", "!= '' AND country_node_count.word != '' ) ORDER BY country_node_count.count DESC LIMIT {limit}", "will delete it while we are working with it. # Setting created_at makes", "it was deleted while we were getting it continue # we got one", "logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\"", "TweetChunk.objects.values('node', 'node__word') # Only with the given country, non-empty words query = query.exclude(node__word='')", "an old node, there is a risk that the cleanup # procedure will", "AND (map_treenode.created_at < %s) AND NOT (map_treenode.id IN %s) LIMIT %s \"\"\" query", "earliest created_at in any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def", "print query.query return [r['tz_country'] for r in query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get", "is new. A dictionary can optionally be provided for caching values across calls.", "map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country != '' AND", "FROM map_tweetchunk WHERE created_at <= %s ORDER BY id LIMIT %s\", [oldest_date, batch_size])", "orphans\", total_deleted) else: logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at", "in tzcountries) user_tz_map[None] = None node_cache = {} new_tweet_chunks = [] for tweet", "the top 10 country-word-counts. \"\"\" # How much padding to add to counts", "cleanup on this frame\") # First delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod", "Preserve some time prior to that time frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR", "'-', map_treenode.word ) as combo FROM map_tweetchunk -- USE INDEX ({index_name}) LEFT OUTER", "A basic time frame for demo analysis. 1. Extend the BaseTimeFrame class. 2.", "in query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk following this node with", "] id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text =", "to grab this node until we get one while True: try: with transaction.atomic():", "get_subquery(self): \"\"\" Generates SQL like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count`", "also store data on separate models, if your data is not strictly 1:1", "= query.filter(node__parent=self) # Order by count query = query.order_by('-chunk_count') # Aggregate fields query", "cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes without", "is not None: cache[(parent, word)] = node if created: self.nodes_added += 1 return", "rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth = 0", "by count, desc query = query.order_by('-count') # Aggregate fields query = query.annotate(count=models.Count('id')) print", "created_at=tweet.created_at, tz_country=country)) parent = node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size", "if self.id % 3 == 0: # Then remove obsolete tree nodes TreeNode.cleanup_empty()", "to the limit (plus 1 to allow for the empty country) maxquery =", "maxquery=maxquery, limit=country_limit) print superquery cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls):", "each country, return the top 10 country-word-counts. \"\"\" # Make sure this is", "transaction, IntegrityError, DatabaseError from django.utils import timezone import random from south.db.generic import DatabaseOperations", "count_padding = 10 # Get the name of the stupid index from South", "= PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None,", "time frame now = MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't", "['tz_country', 'node_id']) # Find the words following this node for every # country,", "create its TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None)", "the earliest created_at in any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod", "# Now split up the max combo into the count and the word", "child: return child[0] return None @classmethod def get_empty_nodes(cls): \"\"\" Return a queryset for", "try: result = query.first() return result['node__word'], result['count'] except ObjectDoesNotExist: return None def get_subquery(self):", "None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model): class Meta:", "most_recent_time['most_recent_time'] is None: return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL", "before we create its TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country =", "words and countries. superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count", "orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the children of this", "belonging to our children query = query.filter(node__parent=self) # Order by count query =", "query = query.exclude(node__word='') # query = query.exclude(tz_country='') # Only chunks belonging to our", "settings.MAX_DEPTH: break # This node is guaranteed safe to work with for a", "finds the actual chunk for each country # that had the maximum count.", "is not None and (parent, word) in cache: self.node_cache_hits += 1 return cache[(parent,", "used - For the top result from each country, return the top 10", "up to the limit (plus 1 to allow for the empty country) maxquery", "for r in tzcountries) user_tz_map[None] = None node_cache = {} new_tweet_chunks = []", "not it is new. A dictionary can optionally be provided for caching values", "%d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG:", "return result['node__word'], result['count'] except ObjectDoesNotExist: return None def get_subquery(self): \"\"\" Generates SQL like", "from each country, return the top 10 country-word-counts. \"\"\" # Make sure this", "break # This node is guaranteed safe to work with for a few", "country_limit=10): \"\"\" Get tuples of country, word, count for the top 10 countries", "depth = 0 for chunk in chunks: if chunk == \"\": continue if", "refer to them. Return the top countries for those chunks by frequency. Returns", "= query.exclude(node__word='') # query = query.exclude(tz_country='') # Only chunks belonging to our children", "country_name, node): \"\"\"Returns a Tweet for the given country and node.\"\"\" try: chunks", "[1, 2, 3, 4, 5, 6, 7, 8, 9, 10] parent = models.ForeignKey('self',", "continue if depth > settings.MAX_DEPTH: break # This node is guaranteed safe to", "None @classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch of chunks before the", "each country, return the top 10 country-word-counts. \"\"\" # How much padding to", "much padding to add to counts for the concat/max/split trick count_padding = 10", "LEFT OUTER JOIN map_treenode ON ( map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id =", "More specifically: - Look at all the words that followed this one anywhere", "# Get the earliest tweet referenced in any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at()", "query = query.filter(node__parent=self) # Aggregate fields query = query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self,", "def get_tree_node(self, parent, word, cache=None): \"\"\" Returns a tree node for the parent", "\"\"\" # How much padding to add to counts for the concat/max/split trick", "Only with the given country, non-empty words query = query.exclude(node__word='') query = query.filter(tz_country=country)", "@classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return 0", "sub GROUP BY tz_country ORDER BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit +", "from django.core.exceptions import ObjectDoesNotExist from django.db import models, connection, transaction, IntegrityError, DatabaseError from", "LIMIT %s\", [oldest_date, batch_size]) return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest created_at", "non-empty tz_countries, words query = query.exclude(tz_country='') query = query.exclude(node__word='') # Only chunks belonging", "followed this one anywhere - In every country, find the word following this", "this is valid country_limit = int(country_limit) subquery = self.get_subquery() # This query finds", "batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent apparent", "continue # we got one if cache is not None: cache[(parent, word)] =", "of orphans\"\"\" query = \"\"\" DELETE FROM map_treenode WHERE (parent_id IS NULL) AND", "is None: return None node = prefix_node for chunk in query_chunks: node =", "from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import re logger = logging.getLogger('map') class TreeNode(models.Model):", "TweetChunk(models.Model): class Meta: index_together = [ ['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True) node", "NULL) AND (map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at < %s) AND NOT (map_treenode.id", "AND country_node_count.tz_country != '' AND country_node_count.word != '' ) ORDER BY country_node_count.count DESC", "related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country", "orphans\"\"\" query = \"\"\" DELETE FROM map_treenode WHERE (parent_id IS NULL) AND NOT", "batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned %d empty", "`map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` =", "# It won't be deleted by cleanup before we create its TweetChunk. node,", "import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d tweet chunks\", total_deleted) else:", "Add any fields you need to calculate. You can also store data on", "LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall()", "TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words query = query.exclude(tz_country='') query = query.exclude(node__word='') #", "> 0: logger.info(\"Orphaned %d empty nodes\", total_orphaned) else: logger.info(\"No empty nodes to orphan\")", "of tweets with that word. # Concatenate the tweet count to the word", "word)], False else: # We want to keep trying to grab this node", "cls.get_root() if not root: raise Exception(\"No root node in tweet tree\") prefix_node =", "at the chunks that refer to them. Return the top countries for those", "# Analyze every 15 seconds DURATION = timedelta(seconds=60) # Simply store the total", "fields you need to calculate. You can also store data on separate models,", "len(tweets) tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map = dict((r.user_time_zone, r) for r", "subset_query = \"\"\" SELECT map_treenode.id FROM map_treenode LEFT OUTER JOIN map_tweetchunk ON (", "return cls.objects.get(id=1) except ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country =", "whether or not it is new. A dictionary can optionally be provided for", "params = [cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def cleanup(cls,", "\"\"\" UPDATE map_treenode JOIN ( {subset_query} ) subset ON map_treenode.id = subset.id SET", "depth > settings.MAX_DEPTH: break # This node is guaranteed safe to work with", "for the parent and word, and whether or not it is new. A", "if country is None: country = '' else: country = country.country new_tweet_chunks.append(TweetChunk( node=node,", "country # that had the maximum count. # Further, filters out empty words", "given prefix and chunks.\"\"\" root = cls.get_root() if not root: raise Exception(\"No root", "@classmethod def propagate_orphanage(cls): \"\"\"Makes sure that all children of current orphans are also", "@classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet for the given country and", "CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word ) as combo FROM map_tweetchunk --", "country_limit = int(country_limit) subquery = self.get_subquery() # This query finds the maximum number", "for every # country, and the number of tweets with that word. #", "cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node referenced by the given", "cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of country, word, count for the", "True: try: with transaction.atomic(): # Get or create a node with parent and", "that time frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\",", "from south.db.generic import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name", "`map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` =", "FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit])", "= dict((r.user_time_zone, r) for r in tzcountries) user_tz_map[None] = None node_cache = {}", "WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country != '' AND map_treenode.word != '' GROUP", "models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet", "if not created: # If it is an old node, there is a", "word is substring(maxcombo, padding+2) because # it is 1-indexed and we added a", "batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: #", "== batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size)", "word node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if not created: # If it is", "logger.info(\" ...orphaned %d new nodes (should be 0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod", "@classmethod def get_most_recent(cls, limit=20): \"\"\" A handy static method to get the <limit>", "ORDER BY id LIMIT %s\", [oldest_date, batch_size]) return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get", "not None and (parent, word) in cache: self.node_cache_hits += 1 return cache[(parent, word)],", "# https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d", "the BaseTimeFrame class. 2. Indicate how often to run the analysis (same as", "TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node referenced", "the most tweets in the given country. \"\"\" # Group by chunk query", "Now select the max of the combo field for each country # Since", "import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name from datetime", "calculate(self, tweets): self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map =", "= country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count AND country_node_count.tz_country != '' AND country_node_count.word", "= query[:limit] print query.query return [r['tz_country'] for r in query] def get_most_popular_child_chunk_in(self, country):", "top result from each country, return the top 10 country-word-counts. \"\"\" # Make", "list(self.children.filter(word=word)[:1]) if child: return child[0] return None @classmethod def get_empty_nodes(cls): \"\"\" Return a", "while batch_orphaned == batch_size: logger.info(\" ... orphaned batch of %d\", batch_orphaned) batch_orphaned =", "def calculate(self, tweets): self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1) user_tz_map", "frame\") # First delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need", "orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a", "\"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def", "# Aggregate fields query = query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit] print query.query", "Meta: index_together = [ ['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode,", "twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added +=", "for the given country and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node) count =", "total tweet count in this time frame tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0)", "If it is an old node, there is a risk that the cleanup", "# Get the name of the stupid index from South db = DatabaseOperations(None)", "2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count` FROM ({maxquery}) sub2", "https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d tweet", "Limit query = query[:limit] print query.query return [r['tz_country'] for r in query] def", "return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples of country, word, count for", "CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print", "this one that was most commonly used - For the top result from", "chunks.\"\"\" root = cls.get_root() if not root: raise Exception(\"No root node in tweet", "= most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT map_treenode.id FROM map_treenode LEFT OUTER", "0: logger.info(\"Orphaned %d empty nodes\", total_orphaned) else: logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning", "following this one that was most commonly used - For the top result", "node. More specifically: - Look at all the words that followed this one", "not None: cache[(parent, word)] = node if created: self.nodes_added += 1 return node,", "\"\"\" # Group by chunk query = TweetChunk.objects.values('node', 'node__word') # Only with the", "(parent_id IS NULL) AND NOT (id IN %s) ORDER BY id LIMIT %s", "r in query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk following this node", "into the count and the word # The word is substring(maxcombo, padding+2) because", "created def calculate(self, tweets): self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all() roots = TreeNode.objects.filter(parent=1)", "some time prior to that time frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning", "that have no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks):", "map time frame now = MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there", "to our children query = query.filter(node__parent=self) # Order by count, desc query =", "the maximum number of tweets # with chunks following this node for every", "that the cleanup # procedure will delete it while we are working with", "(map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at < %s) AND", "2, 3, 4, 5, 6, 7, 8, 9, 10] parent = models.ForeignKey('self', null=True,", "query = \"\"\" DELETE FROM map_treenode WHERE (parent_id IS NULL) AND NOT (id", "tuples of country, word, count for the top 10 countries following this node.", "index from South db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find", "# Group by tz_country query = TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words query", "associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix, query_chunks): \"\"\"Returns the node", "0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\" SELECT", "chunks\", total_deleted) else: logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time", "import models, connection, transaction, IntegrityError, DatabaseError from django.utils import timezone import random from", "null=True, blank=True) def get_child(self, word): child = list(self.children.filter(word=word)[:1]) if child: return child[0] return", "sure to call self.mark_done(tweets) 5. Add any additional functions related to your time", "earliest_created_at is not None: return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod def get_most_recent(cls,", "# we got one if cache is not None: cache[(parent, word)] = node", "@classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest created_at in any tweet chunk.\"\"\" results =", "chunks following this node for every country # up to the limit (plus", "= query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit] print query.query return [r['tz_country'] for r", "<= %s ORDER BY id LIMIT %s\", [oldest_date, batch_size]) return deleted @classmethod def", "# Only chunks belonging to our children query = query.filter(node__parent=self) # Order by", "query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk following this node with the", "%d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: #", "new. A dictionary can optionally be provided for caching values across calls. \"\"\"", "total_orphaned) else: logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning children of orphans... (should not", "deleted = cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <= %s ORDER BY id LIMIT", "chunk.tweet_text except DatabaseError: # things could potentially disappear while we're doing these operations", "except ObjectDoesNotExist: return None def get_subquery(self): \"\"\" Generates SQL like the following: SELECT", "= db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the words following this node for every", "to our children query = query.filter(node__parent=self) # Order by count query = query.order_by('-chunk_count')", "# Concatenate the tweet count to the word subquery = \"\"\" SELECT map_tweetchunk.tz_country,", "frame now = MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any?", "getting it continue # we got one if cache is not None: cache[(parent,", "# Group by country, chunk query = TweetChunk.objects.values('tz_country', 'node__word') # Only with the", "The word is substring(maxcombo, padding+2) because # it is 1-indexed and we added", "UNSIGNED) AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor = connection.cursor()", "to be deleted for a brief period. node.created_at = timezone.now() node.save() except IntegrityError:", "query = query.annotate(count=models.Count('id')) print query.query # Limit try: result = query.first() return result['node__word'],", "a Tweet for the given country and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node)", "= \"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery}) sub GROUP BY tz_country", "= models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self,", "logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the children of", "max_count FROM ({subquery}) sub GROUP BY tz_country ORDER BY max_count DESC LIMIT {limit}", "grab this node until we get one while True: try: with transaction.atomic(): #", "cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls): try: return cls.objects.get(id=1) except", "time frames that will make them easier to work with. \"\"\" # Analyze", "# Since we've padded with 0s, alphabetic max is the same as numeric", "if your data is not strictly 1:1 with time frames. 4. Implement calculate(tweets).", "sure that all children of current orphans are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\", "country, word, count for the top 10 countries following this node. More specifically:", "(`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP BY", "TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets def cleanup(self): if self.id", "chunk query = TweetChunk.objects.values('tz_country', 'node__word') # Only with the given country, non-empty words", "3 == 0: # Then remove obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty", "\\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any? if now['latest_start_time'] is None: return #", "This is where you do your work. At the end, make sure to", "id LIMIT %s \"\"\" params = [cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query,", "cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of country,", ") ORDER BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor", "it exists\"\"\" for root in roots: if root.word in tweet.text: return root return", "referenced in any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is not None:", "= rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth = 0 for", "we have tons of data that depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff()", "return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of orphans\"\"\" query =", "timedelta(seconds=60) # Simply store the total tweet count in this time frame tweet_count", "query = query.exclude(node__word='') # Only chunks belonging to our children query = query.filter(node__parent=self)", "field for each country # Since we've padded with 0s, alphabetic max is", "db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d tweet chunks\", total_deleted) else: logger.info(\"No", "super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest tweet referenced in any Tweet Chunk earliest_created_at", "country_node_count.count = countrymax.max_count AND country_node_count.tz_country != '' AND country_node_count.word != '' ) ORDER", "of current orphans are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod", "get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet for the given country and node.\"\"\" try:", "get_model_name from datetime import timedelta import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging", "cursor.execute(query, params) @classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def", "data is not strictly 1:1 with time frames. 4. Implement calculate(tweets). This is", "queryset for all the tree nodes that have no associated TweetChunks \"\"\" return", "= chunks[random.randint(0, count - 1)] return chunk.tweet_text except DatabaseError: # things could potentially", "batch_size: logger.info(\" ... orphaned batch of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned +=", "= \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding})", "query = query.filter(tz_country=country) # Only chunks belonging to our children query = query.filter(node__parent=self)", "are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000):", "the children of this node. Look at the chunks that refer to them.", "# Template for the overall query # It finds the actual chunk for", "count - 1)] return chunk.tweet_text except DatabaseError: # things could potentially disappear while", "batch_size=10000): \"\"\"Delete a batch of chunks before the given date\"\"\" cursor = connection.cursor()", "\"\"\" A basic time frame for demo analysis. 1. Extend the BaseTimeFrame class.", "INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count", "node with parent and word node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if not created:", "BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor = connection.cursor()", "root node in tweet tree\") prefix_node = root.get_child(prefix) if prefix_node is None: return", "belonging to our children query = query.filter(node__parent=self) # Aggregate fields query = query.annotate(count=models.Count('id'))", "this node with the most tweets in the given country. \"\"\" # Group", "prior to that time frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from", "also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete", "basic time frame for demo analysis. 1. Extend the BaseTimeFrame class. 2. Indicate", "def cleanup(cls, batch_size=10000, reset=False): # Get the most recently finished map time frame", "frame duration) 3. Add any fields you need to calculate. You can also", "- For the top result from each country, return the top 10 country-word-counts.", "get_tree_node(self, parent, word, cache=None): \"\"\" Returns a tree node for the parent and", "# Aggregate fields query = query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get", "index_together = [ ['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES = [1, 2, 3,", "= models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self, word): child = list(self.children.filter(word=word)[:1]) if child:", "= cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <= %s ORDER BY id LIMIT %s\",", "method to get the <limit> most recent frames. \"\"\" query = cls.get_in_range(calculated=True) \\", "Look at the chunks that refer to them. Return the top countries for", "SELECT map_treenode.id FROM map_treenode LEFT OUTER JOIN map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id", "if created: self.nodes_added += 1 return node, created def calculate(self, tweets): self.tweet_count =", "== 0: # Then remove obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode", "from django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d orphans\", total_deleted)", "else: logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the children", "chunks from before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while", "subquery = self.get_subquery() # This query finds the maximum number of tweets #", "= tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root", "cache[(parent, word)], False else: # We want to keep trying to grab this", "got one if cache is not None: cache[(parent, word)] = node if created:", "depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets def", "total_deleted) else: logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame", "created_at <= %s ORDER BY id LIMIT %s\", [oldest_date, batch_size]) return deleted @classmethod", "values across calls. \"\"\" if cache is not None and (parent, word) in", "AS UNSIGNED) AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor =", "= cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent apparent memory", "sub GROUP BY sub.tz_country ORDER BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) # Now", "following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER JOIN `map_treenode` ON", "get_child(self, word): child = list(self.children.filter(word=word)[:1]) if child: return child[0] return None @classmethod def", "root = cls.get_root() if not root: raise Exception(\"No root node in tweet tree\")", "import logging import re logger = logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together =", "from before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while batch_deleted", "= [ ['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES = [1, 2, 3, 4,", "\"\"\" # Make sure this is valid country_limit = int(country_limit) subquery = self.get_subquery()", "@classmethod def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000,", "# that had the maximum count. # Further, filters out empty words and", "added a '-' character in the middle. splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo,", "1 return node, created def calculate(self, tweets): self.tweet_count = len(tweets) tzcountries = Tz_Country.objects.all()", "word # The word is substring(maxcombo, padding+2) because # it is 1-indexed and", "in tweets: root = self.check_prefix(tweet, roots) if not root: continue rh = tweet.text.split(root.word,", "the node referenced by the given prefix and chunks.\"\"\" root = cls.get_root() if", "needed)\") propagated = cls.propagate_orphanage() while propagated > 0: logger.info(\" ...orphaned %d new nodes", "swapper import get_model_name from datetime import timedelta import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame", "query = query.filter(node__parent=self) # Order by count, desc query = query.order_by('-count') # Aggregate", "as combo FROM map_tweetchunk -- USE INDEX ({index_name}) LEFT OUTER JOIN map_treenode ON", "to orphan\") logger.info(\"Orphaning children of orphans... (should not be needed)\") propagated = cls.propagate_orphanage()", "TweetChunk.objects.values('tz_country', 'node__word') # Only with the given country, non-empty words # We'll do", "a root in the tweet, if it exists\"\"\" for root in roots: if", "PositiveBigAutoForeignKey from swapper import get_model_name from datetime import timedelta import settings from twitter_feels.libs.twitter_analysis", "# Limit query = query[:limit] print query.query return [r['tz_country'] for r in query]", "%s \"\"\" params = [cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod", "= list(self.children.filter(word=word)[:1]) if child: return child[0] return None @classmethod def get_empty_nodes(cls): \"\"\" Return", "= cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned == batch_size: logger.info(\" ... orphaned batch", "created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns", "node until we get one while True: try: with transaction.atomic(): # Get or", "return node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None:", "limit=10): \"\"\" Look at the children of this node. Look at the chunks", "\"\"\" query = \"\"\" UPDATE map_treenode JOIN ( {subset_query} ) subset ON map_treenode.id", "most commonly used - For the top result from each country, return the", "propagated) propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted =", "... orphaned batch of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if", "AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor = connection.cursor() cursor.execute(splitquery,", "PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True)", "'' ) ORDER BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery", "delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the children of this node. Look", "Order by count, desc query = query.order_by('-count') # Aggregate fields query = query.annotate(count=models.Count('id'))", "empty treenode cleanup on this frame\") # First delete old tweet chunks TweetChunk.cleanup()", "cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes without any chunks", "= cls.get_root() if not root: raise Exception(\"No root node in tweet tree\") prefix_node", "(id IN %s) ORDER BY id LIMIT %s \"\"\" params = [cls.ROOT_NODES, batch_size]", "None: return # Preserve some time prior to that time frame trailing_edge_date =", "at the children of this node. Look at the chunks that refer to", "the number of tweets with that word. # Concatenate the tweet count to", "logger.info(\"Skipping empty treenode cleanup on this frame\") # First delete old tweet chunks", "({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count AND country_node_count.tz_country", "You can also store data on separate models, if your data is not", "frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date) batch_deleted", "id LIMIT %s\", [oldest_date, batch_size]) return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest", "root.get_child(prefix) if prefix_node is None: return None node = prefix_node for chunk in", "batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted +=", "# Only with the given country, non-empty words query = query.exclude(node__word='') query =", "logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted +=", "chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame for demo analysis.", "empty words and countries. superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery})", "separate models, if your data is not strictly 1:1 with time frames. 4.", "counts for the concat/max/split trick count_padding = 10 # Get the name of", "while we are working with it. # Setting created_at makes it impossible for", "chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth = 0 for chunk in", "\"\"\" # Group by country, chunk query = TweetChunk.objects.values('tz_country', 'node__word') # Only with", "cursor.execute(\"DELETE FROM map_tweetchunk WHERE created_at <= %s ORDER BY id LIMIT %s\", [oldest_date,", "country, non-empty words query = query.exclude(node__word='') query = query.filter(tz_country=country) # Only chunks belonging", "new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks)", "ORDER BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) # Now split up the max", "root.word in tweet.text: return root return None def get_tree_node(self, parent, word, cache=None): \"\"\"", "# it was deleted while we were getting it continue # we got", "FROM ({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE (", "is an old node, there is a risk that the cleanup # procedure", "cls.objects.get(id=1) except ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32)", "NULL) AND NOT (id IN %s) ORDER BY id LIMIT %s \"\"\" params", "cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure", "we are working with it. # Setting created_at makes it impossible for it", "\"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod", "by frequency. Returns a list. \"\"\" # Group by tz_country query = TweetChunk.objects.values('tz_country')", "FROM map_treenode LEFT OUTER JOIN map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id ) WHERE", "total_deleted) else: logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look at the", "= re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth = 0 for chunk in chunks:", "!= '' ) ORDER BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print", "IS NULL) AND (map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at < %s) AND NOT", "from django.utils import timezone import random from south.db.generic import DatabaseOperations from twitter_stream.fields import", "...orphaned %d new nodes (should be 0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod def", "Get or create a node with parent and word node, created = TreeNode.objects.get_or_create(parent=parent,", "data that depends on tweets.\"\"\" baseline_cutoff = super(TweetTimeFrame, cls).get_stream_memory_cutoff() # Get the earliest", "query.query # Limit try: result = query.first() return result['node__word'], result['count'] except ObjectDoesNotExist: return", "the tree nodes that have no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def", "the combo field for each country # Since we've padded with 0s, alphabetic", "IN %s) ORDER BY id LIMIT %s \"\"\" params = [cls.ROOT_NODES, batch_size] cursor", "be deleted by cleanup before we create its TweetChunk. node, created = self.get_tree_node(parent=parent,", "return cursor.execute(query, params) @classmethod def propagate_orphanage(cls): \"\"\"Makes sure that all children of current", "Only non-empty tz_countries, words query = query.exclude(tz_country='') query = query.exclude(node__word='') # Only chunks", "batch_orphaned while batch_orphaned == batch_size: logger.info(\" ... orphaned batch of %d\", batch_orphaned) batch_orphaned", "OUTER JOIN map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL)", "TweetTimeFrame import logging import re logger = logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together", "`map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE )", "= TweetChunk.objects.values('tz_country', 'node__word') # Only with the given country, non-empty words # We'll", "= len(node_cache) return tweets def cleanup(self): if self.id % 3 == 0: #", "get the <limit> most recent frames. \"\"\" query = cls.get_in_range(calculated=True) \\ .order_by('-start_time') return", "by the given prefix and chunks.\"\"\" root = cls.get_root() if not root: raise", "Order by count query = query.order_by('-chunk_count') # Aggregate fields query = query.annotate(chunk_count=models.Count('id')) #", "ROOT_NODES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] parent", "= query.filter(node__parent=self) # Order by count, desc query = query.order_by('-count') # Aggregate fields", "= \"\"\" SELECT map_treenode.id FROM map_treenode LEFT OUTER JOIN map_tweetchunk ON ( map_treenode.id", "= query.filter(tz_country=country) # Only chunks belonging to our children query = query.filter(node__parent=self) #", "splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\"", "try: with transaction.atomic(): # Get or create a node with parent and word", "nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup on this frame\") # First delete", "name of the stupid index from South db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk',", "0: logger.info(\"Deleted %d tweet chunks\", total_deleted) else: logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame):", "False else: # We want to keep trying to grab this node until", "is guaranteed safe to work with for a few minutes # It won't", "can also store data on separate models, if your data is not strictly", "word, count for the top 10 countries following this node. More specifically: -", "AND `map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group", "Aggregate fields query = query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit] print query.query return", "%s AND map_tweetchunk.tz_country != '' AND map_treenode.word != '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id", "%s \"\"\" query = \"\"\" UPDATE map_treenode JOIN ( {subset_query} ) subset ON", "will make them easier to work with. \"\"\" # Analyze every 15 seconds", "({subquery}) sub GROUP BY sub.tz_country ORDER BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery) #", "logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted", "caching values across calls. \"\"\" if cache is not None and (parent, word)", "node in tweet tree\") prefix_node = root.get_child(prefix) if prefix_node is None: return None", "if root.word in tweet.text: return root return None def get_tree_node(self, parent, word, cache=None):", "to our children query = query.filter(node__parent=self) # Aggregate fields query = query.annotate(count=models.Count('id')) return", "DURATION = timedelta(seconds=60) # Simply store the total tweet count in this time", "= %s AND map_tweetchunk.tz_country != '' AND map_treenode.word != '' GROUP BY map_tweetchunk.tz_country,", "run the analysis (same as the time frame duration) 3. Add any fields", "] ROOT_NODES = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", "\"\"\" # Group by tz_country query = TweetChunk.objects.values('tz_country') # Only non-empty tz_countries, words", "prefix, query_chunks): \"\"\"Returns the node referenced by the given prefix and chunks.\"\"\" root", "'' else: country = country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent =", "with parent and word node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if not created: #", "from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name from datetime import timedelta", "IS NOT NULL) AND (map_treenode.created_at < %s) AND NOT (map_treenode.id IN %s) LIMIT", "it to be deleted for a brief period. node.created_at = timezone.now() node.save() except", "# Order by count, desc query = query.order_by('-count') # Aggregate fields query =", "= countrymax.max_count AND country_node_count.tz_country != '' AND country_node_count.word != '' ) ORDER BY", "AS `count` FROM `map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`) WHERE (", "= models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size =", "import ObjectDoesNotExist from django.db import models, connection, transaction, IntegrityError, DatabaseError from django.utils import", "import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name from datetime import timedelta import settings", "Get the name of the stupid index from South db = DatabaseOperations(None) index_name", "cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of orphans\"\"\"", "timezone import random from south.db.generic import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from", "return child[0] return None @classmethod def get_empty_nodes(cls): \"\"\" Return a queryset for all", "make sure to call self.mark_done(tweets) 5. Add any additional functions related to your", "GROUP BY tz_country ORDER BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit + 1)", "future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch", "children query = query.filter(node__parent=self) # Aggregate fields query = query.annotate(count=models.Count('id')) return query def", "\"\"\" if cache is not None and (parent, word) in cache: self.node_cache_hits +=", "models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns a", "valid country_limit = int(country_limit) subquery = self.get_subquery() # This query finds the maximum", "country_node_count INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count =", "the same as numeric max maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo", "10 countries following this node. More specifically: - Look at all the words", "subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word )", "it. # Setting created_at makes it impossible for it to be deleted for", "map_treenode.id ) WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country != '' AND map_treenode.word !=", "for a few minutes # It won't be deleted by cleanup before we", "limit=20): \"\"\" A handy static method to get the <limit> most recent frames.", "\"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery}) sub GROUP BY sub.tz_country ORDER", "every country, find the word following this one that was most commonly used", "it is 1-indexed and we added a '-' character in the middle. splitquery", "At the end, make sure to call self.mark_done(tweets) 5. Add any additional functions", "to work with. \"\"\" # Analyze every 15 seconds DURATION = timedelta(seconds=60) #", "that will make them easier to work with. \"\"\" # Analyze every 15", "Setting created_at makes it impossible for it to be deleted for a brief", "cache is not None: cache[(parent, word)] = node if created: self.nodes_added += 1", "to counts for the concat/max/split trick count_padding = 10 # Get the name", "with the most tweets in the given country. \"\"\" # Group by chunk", "current orphans are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES) return future_orphans.update(parent=None) @classmethod def", "logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame for demo", "that all children of current orphans are also orphaned.\"\"\" future_orphans = cls.objects.filter(parent__parent=None).exclude(parent=None)\\ .exclude(pk__in=cls.ROOT_NODES)", "= chunks.count() chunk = chunks[random.randint(0, count - 1)] return chunk.tweet_text except DatabaseError: #", "result['count'] except ObjectDoesNotExist: return None def get_subquery(self): \"\"\" Generates SQL like the following:", "deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest created_at in any tweet chunk.\"\"\" results", "query.filter(node__parent=self) # Aggregate fields query = query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\"", "logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted += batch_deleted", "> 0: logger.info(\" ...orphaned %d new nodes (should be 0!)\", propagated) propagated =", "functions related to your time frames that will make them easier to work", "of the combo field for each country # Since we've padded with 0s,", "\"\"\"Returns the node referenced by the given prefix and chunks.\"\"\" root = cls.get_root()", "count = chunks.count() chunk = chunks[random.randint(0, count - 1)] return chunk.tweet_text except DatabaseError:", "by chunk query = TweetChunk.objects.values('node', 'node__word') # Only with the given country, non-empty", "# Make sure this is valid country_limit = int(country_limit) subquery = self.get_subquery() #", "chunks that refer to them. Return the top countries for those chunks by", "0: # Then remove obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup", "superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER JOIN ({maxquery})", "LEFT OUTER JOIN map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS", "time frames. 4. Implement calculate(tweets). This is where you do your work. At", "= cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff =", "guaranteed safe to work with for a few minutes # It won't be", "= [ ['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks') twitter_id", "created: self.nodes_added += 1 return node, created def calculate(self, tweets): self.tweet_count = len(tweets)", "frames that will make them easier to work with. \"\"\" # Analyze every", "tweet, if it exists\"\"\" for root in roots: if root.word in tweet.text: return", "= query.first() return result['node__word'], result['count'] except ObjectDoesNotExist: return None def get_subquery(self): \"\"\" Generates", "%s ORDER BY id LIMIT %s\", [oldest_date, batch_size]) return deleted @classmethod def get_earliest_created_at(cls):", "of the stupid index from South db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country',", "middle. splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo,", "timezone.now() node.save() except IntegrityError: # it was deleted while we were getting it", "MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any? if now['latest_start_time'] is", "@classmethod def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes without any chunks logger.info(\"Orphaning empty", "cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk = chunks[random.randint(0, count - 1)] return chunk.tweet_text", "if prefix_node is None: return None node = prefix_node for chunk in query_chunks:", "+= batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned %d empty nodes\", total_orphaned) else: logger.info(\"No", "= 10 # Get the name of the stupid index from South db", "Implement calculate(tweets). This is where you do your work. At the end, make", "work with. \"\"\" # Analyze every 15 seconds DURATION = timedelta(seconds=60) # Simply", "time frame tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits =", "ORDER BY id LIMIT %s \"\"\" params = [cls.ROOT_NODES, batch_size] cursor = connection.cursor()", "limit=country_limit) print superquery cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls): try:", "can optionally be provided for caching values across calls. \"\"\" if cache is", "= TweetChunk.objects.values('node', 'node__word') # Only with the given country, non-empty words query =", "with the given country, non-empty words query = query.exclude(node__word='') query = query.filter(tz_country=country) #", "else: logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning children of orphans... (should not be", "= cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted batch", "= node.get_child(chunk.lower()) if not node: return None return node @classmethod def orphan_empty_nodes(cls, batch_size=10000):", "non-empty words query = query.exclude(node__word='') query = query.filter(tz_country=country) # Only chunks belonging to", "combo FROM map_tweetchunk -- USE INDEX ({index_name}) LEFT OUTER JOIN map_treenode ON (", "it is an old node, there is a risk that the cleanup #", "of chunks before the given date\"\"\" cursor = connection.cursor() deleted = cursor.execute(\"DELETE FROM", "%s) LIMIT %s \"\"\" query = \"\"\" UPDATE map_treenode JOIN ( {subset_query} )", "batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned %d empty nodes\", total_orphaned) else: logger.info(\"No empty", "most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff", "%d orphans\", total_deleted) else: logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10): \"\"\" Look", "won't be deleted by cleanup before we create its TweetChunk. node, created =", "padded with 0s, alphabetic max is the same as numeric max maxquery =", "tweets with that word. # Concatenate the tweet count to the word subquery", "with time frames. 4. Implement calculate(tweets). This is where you do your work.", "self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if country is None: country =", "South db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the words", ") as combo FROM map_tweetchunk -- USE INDEX ({index_name}) LEFT OUTER JOIN map_treenode", "return tweets def cleanup(self): if self.id % 3 == 0: # Then remove", "are working with it. # Setting created_at makes it impossible for it to", "created = TreeNode.objects.get_or_create(parent=parent, word=word) if not created: # If it is an old", "def get_child(self, word): child = list(self.children.filter(word=word)[:1]) if child: return child[0] return None @classmethod", "'node_id']) # Find the words following this node for every # country, and", "%s\", [oldest_date, batch_size]) return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest created_at in", "nodes that have no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod def follow_chunks(cls, prefix,", "null=True, blank=True, related_name='children') word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def", "sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery}) sub GROUP BY tz_country ORDER BY max_count", "deleted by cleanup before we create its TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk,", "JOIN map_tweetchunk ON ( map_treenode.id = map_tweetchunk.node_id ) WHERE (map_tweetchunk.id IS NULL) AND", "TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override this because we have tons", "( country_node_count.count = countrymax.max_count AND country_node_count.tz_country != '' AND country_node_count.word != '' )", "delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override this", "get_empty_nodes(cls): \"\"\" Return a queryset for all the tree nodes that have no", "tweet count in this time frame tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added", "db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No orphans", "print superquery cursor = connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls): try: return", "db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the words following", "batch_deleted if reset and settings.DEBUG: # Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from", "country, return the top 10 country-word-counts. \"\"\" # Make sure this is valid", "index_name=index_name) # Now select the max of the combo field for each country", "countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count AND country_node_count.tz_country !=", "class Meta: index_together = [ ['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES = [1,", "return root return None def get_tree_node(self, parent, word, cache=None): \"\"\" Returns a tree", "# maybe there aren't any? if now['latest_start_time'] is None: return # Preserve some", "self.check_prefix(tweet, roots) if not root: continue rh = tweet.text.split(root.word, 1)[1] rh = rh.lower()", "reset=False): # Get the most recently finished map time frame now = MapTimeFrame.objects", "cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False): #", "strictly 1:1 with time frames. 4. Implement calculate(tweets). This is where you do", "models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self, word): child = list(self.children.filter(word=word)[:1])", "Return a queryset for all the tree nodes that have no associated TweetChunks", "the count and the word # The word is substring(maxcombo, padding+2) because #", "for the empty country) maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count FROM", "Return the top countries for those chunks by frequency. Returns a list. \"\"\"", "ObjectDoesNotExist from django.db import models, connection, transaction, IntegrityError, DatabaseError from django.utils import timezone", "word. # Concatenate the tweet count to the word subquery = \"\"\" SELECT", "country_node_count.word != '' ) ORDER BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit)", "except IntegrityError: # it was deleted while we were getting it continue #", "period. node.created_at = timezone.now() node.save() except IntegrityError: # it was deleted while we", "in tweet tree\") prefix_node = root.get_child(prefix) if prefix_node is None: return None node", "if total_orphaned > 0: logger.info(\"Orphaned %d empty nodes\", total_orphaned) else: logger.info(\"No empty nodes", "related to your time frames that will make them easier to work with.", "any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is not None: return min(earliest_created_at,", "find the word following this one that was most commonly used - For", "node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added", "created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if country is None:", "the total tweet count in this time frame tweet_count = models.IntegerField(default=0) nodes_added =", "FROM ({subquery}) sub GROUP BY sub.tz_country ORDER BY maxcombo DESC LIMIT %s \"\"\".format(subquery=subquery)", "count query = query.order_by('-chunk_count') # Aggregate fields query = query.annotate(chunk_count=models.Count('id')) # Limit query", "total_orphaned > 0: logger.info(\"Orphaned %d empty nodes\", total_orphaned) else: logger.info(\"No empty nodes to", "query = query[:limit] print query.query return [r['tz_country'] for r in query] def get_most_popular_child_chunk_in(self,", "parent = root depth = 0 for chunk in chunks: if chunk ==", "django.utils import timezone import random from south.db.generic import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField,", "each country # Since we've padded with 0s, alphabetic max is the same", "that word. # Concatenate the tweet count to the word subquery = \"\"\"", "1 to allow for the empty country) maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.count)", "models, connection, transaction, IntegrityError, DatabaseError from django.utils import timezone import random from south.db.generic", "for each country # Since we've padded with 0s, alphabetic max is the", "({maxquery}) sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return", "to get the <limit> most recent frames. \"\"\" query = cls.get_in_range(calculated=True) \\ .order_by('-start_time')", "most recently finished map time frame now = MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time'))", "while we're doing these operations return None @classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete", "you do your work. At the end, make sure to call self.mark_done(tweets) 5.", "JOIN ( {subset_query} ) subset ON map_treenode.id = subset.id SET map_treenode.parent_id = NULL", "( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" #", "for each country # that had the maximum count. # Further, filters out", "= None node_cache = {} new_tweet_chunks = [] for tweet in tweets: root", "chunks belonging to our children query = query.filter(node__parent=self) # Aggregate fields query =", "count in this time frame tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added =", "WHERE (parent_id IS NULL) AND NOT (id IN %s) ORDER BY id LIMIT", "be needed)\") propagated = cls.propagate_orphanage() while propagated > 0: logger.info(\" ...orphaned %d new", "USE INDEX ({index_name}) LEFT OUTER JOIN map_treenode ON ( map_tweetchunk.node_id = map_treenode.id )", "in this time frame tweet_count = models.IntegerField(default=0) nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0)", "IN %s) LIMIT %s \"\"\" query = \"\"\" UPDATE map_treenode JOIN ( {subset_query}", "%d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned", "trick count_padding = 10 # Get the name of the stupid index from", "1) # Template for the overall query # It finds the actual chunk", "= query.exclude(tz_country='') # Only chunks belonging to our children query = query.filter(node__parent=self) #", "twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name from datetime import timedelta import", "settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import re logger = logging.getLogger('map') class", "anywhere - In every country, find the word following this one that was", "query = query.exclude(node__word='') query = query.filter(tz_country=country) # Only chunks belonging to our children", "in any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000,", "count to the word subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding},", "AND NOT (id IN %s) ORDER BY id LIMIT %s \"\"\" params =", "(same as the time frame duration) 3. Add any fields you need to", "({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count", "return the top 10 country-word-counts. \"\"\" # How much padding to add to", "JOIN map_treenode ON ( map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id = %s AND", "= user_tz_map.get(tweet.user_time_zone, None) if country is None: country = '' else: country =", "{subset_query} ) subset ON map_treenode.id = subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params", "[r['tz_country'] for r in query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk following", "self.nodes_added += 1 return node, created def calculate(self, tweets): self.tweet_count = len(tweets) tzcountries", "r in tzcountries) user_tz_map[None] = None node_cache = {} new_tweet_chunks = [] for", "logger.info(\"Orphaning children of orphans... (should not be needed)\") propagated = cls.propagate_orphanage() while propagated", "logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning children of orphans... (should not be needed)\")", "not root: raise Exception(\"No root node in tweet tree\") prefix_node = root.get_child(prefix) if", "the stupid index from South db = DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id'])", "IS NULL) AND NOT (id IN %s) ORDER BY id LIMIT %s \"\"\"", "[oldest_date, batch_size]) return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the earliest created_at in any", "related_name='children') word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self, word):", "NOT (map_treenode.id IN %s) LIMIT %s \"\"\" query = \"\"\" UPDATE map_treenode JOIN", "- 1)] return chunk.tweet_text except DatabaseError: # things could potentially disappear while we're", "SQL like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER", "= models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns a root in", "@classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override this because we have tons of data", "Aggregate fields query = query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10): \"\"\" Get tuples", "= models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns a", "chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override this because we have", "sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS", "the most recently finished map time frame now = MapTimeFrame.objects \\ .filter(calculated=True) \\", "> settings.MAX_DEPTH: break # This node is guaranteed safe to work with for", "end, make sure to call self.mark_done(tweets) 5. Add any additional functions related to", "({subquery}) sub GROUP BY tz_country ORDER BY max_count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, limit=country_limit", "not strictly 1:1 with time frames. 4. Implement calculate(tweets). This is where you", "batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned %d", "is not None: return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod def get_most_recent(cls, limit=20):", "frequency. Returns a list. \"\"\" # Group by tz_country query = TweetChunk.objects.values('tz_country') #", "\"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS", "tz_country = models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet for", "doing these operations return None @classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch", "tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False): #", "5. Add any additional functions related to your time frames that will make", "given country, non-empty words query = query.exclude(node__word='') query = query.filter(tz_country=country) # Only chunks", "propagated > 0: logger.info(\" ...orphaned %d new nodes (should be 0!)\", propagated) propagated", "default=None, null=True) created_at = models.DateTimeField(db_index=True) tz_country = models.CharField(max_length=32, blank=True) @classmethod def get_example_tweet(cls, country_name,", "ObjectDoesNotExist: return None def get_subquery(self): \"\"\" Generates SQL like the following: SELECT `map_tweetchunk`.`tz_country`,", "of orphans... (should not be needed)\") propagated = cls.propagate_orphanage() while propagated > 0:", "BY id LIMIT %s\", [oldest_date, batch_size]) return deleted @classmethod def get_earliest_created_at(cls): \"\"\"Get the", "root in roots: if root.word in tweet.text: return root return None def get_tree_node(self,", "delete_orphans(cls, batch_size=10000): \"\"\"Delete a batch of orphans\"\"\" query = \"\"\" DELETE FROM map_treenode", "query = TweetChunk.objects.values('tz_country', 'node__word') # Only with the given country, non-empty words #", "dict((r.user_time_zone, r) for r in tzcountries) user_tz_map[None] = None node_cache = {} new_tweet_chunks", "cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned == batch_size: logger.info(\" ... orphaned batch of", "@classmethod def cleanup_orphans(cls, batch_size=10000, reset=False): logger.info(\"Deleting orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted", "node for every country # up to the limit (plus 1 to allow", "to calculate. You can also store data on separate models, if your data", "for chunk in query_chunks: node = node.get_child(chunk.lower()) if not node: return None return", "+ 1) # Template for the overall query # It finds the actual", "query.annotate(count=models.Count('id')) print query.query # Limit try: result = query.first() return result['node__word'], result['count'] except", "a few minutes # It won't be deleted by cleanup before we create", "batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted batch of", "% 3 == 0: # Then remove obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping", "def cleanup(cls, batch_size=10000, reset=False): cls.cleanup_empty(batch_size=batch_size, reset=reset) cls.cleanup_orphans(batch_size=batch_size, reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False):", "['created_at', 'parent'], ] ROOT_NODES = [1, 2, 3, 4, 5, 6, 7, 8,", "django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d orphans\", total_deleted) else:", "node for the parent and word, and whether or not it is new.", "It finds the actual chunk for each country # that had the maximum", "cleanup(self): if self.id % 3 == 0: # Then remove obsolete tree nodes", "5, 6, 7, 8, 9, 10] parent = models.ForeignKey('self', null=True, blank=True, related_name='children') word", "for r in query] def get_most_popular_child_chunk_in(self, country): \"\"\" Get the chunk following this", "query = query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit] print query.query return [r['tz_country'] for", "chunk query = TweetChunk.objects.values('node', 'node__word') # Only with the given country, non-empty words", "DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery, limit=country_limit) print superquery cursor = connection.cursor() cursor.execute(superquery) return", "batch_deleted == batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = cls.delete_orphans(batch_size=batch_size)", "= map_treenode.id ) WHERE map_treenode.parent_id = %s AND map_tweetchunk.tz_country != '' AND map_treenode.word", "total_deleted > 0: logger.info(\"Deleted %d tweet chunks\", total_deleted) else: logger.info(\"No chunks to delete\")", "now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size)", "NOT NULL) AND (map_treenode.created_at < %s) AND NOT (map_treenode.id IN %s) LIMIT %s", "connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist: return", "is where you do your work. At the end, make sure to call", "it continue # we got one if cache is not None: cache[(parent, word)]", "params = [time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def", "duration) 3. Add any fields you need to calculate. You can also store", "map_tweetchunk WHERE created_at <= %s ORDER BY id LIMIT %s\", [oldest_date, batch_size]) return", "cls.objects.aggregate(most_recent_time=models.Max('created_at')) if most_recent_time['most_recent_time'] is None: return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time", "calculate. You can also store data on separate models, if your data is", "tweet.text.split(root.word, 1)[1] rh = rh.lower() chunks = re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth", "tweets # with chunks following this node for every country # up to", "None) if country is None: country = '' else: country = country.country new_tweet_chunks.append(TweetChunk(", "the earliest tweet referenced in any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at", "= [time_cutoff, cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params) @classmethod def propagate_orphanage(cls):", "batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ...", "country and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk =", "cache is not None and (parent, word) in cache: self.node_cache_hits += 1 return", "None @classmethod def get_empty_nodes(cls): \"\"\" Return a queryset for all the tree nodes", "[] for tweet in tweets: root = self.check_prefix(tweet, roots) if not root: continue", "as the time frame duration) 3. Add any fields you need to calculate.", "{} new_tweet_chunks = [] for tweet in tweets: root = self.check_prefix(tweet, roots) if", "subset ON map_treenode.id = subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff,", "to your time frames that will make them easier to work with. \"\"\"", "query # It finds the actual chunk for each country # that had", "= node if created: self.nodes_added += 1 return node, created def calculate(self, tweets):", "not be needed)\") propagated = cls.propagate_orphanage() while propagated > 0: logger.info(\" ...orphaned %d", "chunk for each country # that had the maximum count. # Further, filters", "{padding}, '0'), '-', map_treenode.word ) as combo FROM map_tweetchunk -- USE INDEX ({index_name})", "AND country_node_count.word != '' ) ORDER BY country_node_count.count DESC LIMIT {limit} \"\"\".format(subquery=subquery.query, maxquery=maxquery,", "tweet chunks\", total_deleted) else: logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic", "1-indexed and we added a '-' character in the middle. splitquery = \"\"\"", "Tweet for the given country and node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node) count", "DatabaseError from django.utils import timezone import random from south.db.generic import DatabaseOperations from twitter_stream.fields", ") subset ON map_treenode.id = subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params =", "# Only chunks belonging to our children query = query.filter(node__parent=self) # Aggregate fields", "# If it is an old node, there is a risk that the", "query.filter(node__parent=self) # Order by count query = query.order_by('-chunk_count') # Aggregate fields query =", "in cache: self.node_cache_hits += 1 return cache[(parent, word)], False else: # We want", "delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame for demo analysis. 1. Extend", "class TreeNode(models.Model): class Meta: index_together = [ ['parent', 'word'], ['created_at', 'parent'], ] ROOT_NODES", "9, 10] parent = models.ForeignKey('self', null=True, blank=True, related_name='children') word = models.CharField(max_length=150) created_at =", "in chunks: if chunk == \"\": continue if depth > settings.MAX_DEPTH: break #", "`map_treenode`.`id`) WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word`", "# Find the words following this node for every # country, and the", "tweets in the given country. \"\"\" # Group by chunk query = TweetChunk.objects.values('node',", "+ 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count` FROM ({maxquery})", "of this node. Look at the chunks that refer to them. Return the", "chunks = cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk = chunks[random.randint(0, count - 1)]", "PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import get_model_name from datetime import timedelta import settings from", "finished map time frame now = MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe", "numeric max maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery}) sub", "= \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-', map_treenode.word ) as", "chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet, roots):", "'' AND map_treenode.word != '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now", "the limit (plus 1 to allow for the empty country) maxquery = \"\"\"", "= DatabaseOperations(None) index_name = db.create_index_name('map_tweetchunk', ['tz_country', 'node_id']) # Find the words following this", "time frame trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date)", "map_tweetchunk.tz_country != '' AND map_treenode.word != '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name)", "brief period. node.created_at = timezone.now() node.save() except IntegrityError: # it was deleted while", "if chunk == \"\": continue if depth > settings.MAX_DEPTH: break # This node", "# procedure will delete it while we are working with it. # Setting", "\"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax ON", "country, non-empty words # We'll do this later instead # query = query.exclude(node__word='')", "baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\" A handy static method to get the", "to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame for demo analysis. 1.", "the word subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id), {padding}, '0'), '-',", "by count query = query.order_by('-chunk_count') # Aggregate fields query = query.annotate(chunk_count=models.Count('id')) # Limit", "created_at makes it impossible for it to be deleted for a brief period.", "({index_name}) LEFT OUTER JOIN map_treenode ON ( map_tweetchunk.node_id = map_treenode.id ) WHERE map_treenode.parent_id", "limit (plus 1 to allow for the empty country) maxquery = \"\"\" SELECT", "on separate models, if your data is not strictly 1:1 with time frames.", "else: # We want to keep trying to grab this node until we", "models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self, word): child = list(self.children.filter(word=word)[:1]) if child: return", "Then remove obsolete tree nodes TreeNode.cleanup_empty() else: logger.info(\"Skipping empty treenode cleanup on this", "not node: return None return node @classmethod def orphan_empty_nodes(cls, batch_size=10000): most_recent_time = cls.objects.aggregate(most_recent_time=models.Max('created_at'))", "fields query = query.annotate(count=models.Count('id')) print query.query # Limit try: result = query.first() return", "results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False): # Get the most recently finished map", "old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override this because", "treenode cleanup on this frame\") # First delete old tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans()", "query = \"\"\" UPDATE map_treenode JOIN ( {subset_query} ) subset ON map_treenode.id =", "1)] return chunk.tweet_text except DatabaseError: # things could potentially disappear while we're doing", "and we added a '-' character in the middle. splitquery = \"\"\" SELECT", "query.order_by('-count') # Aggregate fields query = query.annotate(count=models.Count('id')) print query.query # Limit try: result", "10 # Get the name of the stupid index from South db =", "user_tz_map[None] = None node_cache = {} new_tweet_chunks = [] for tweet in tweets:", "Look at all the words that followed this one anywhere - In every", "results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at'] @classmethod def cleanup(cls, batch_size=10000, reset=False): # Get the", "4. Implement calculate(tweets). This is where you do your work. At the end,", "count. # Further, filters out empty words and countries. superquery = \"\"\" SELECT", "get_earliest_created_at(cls): \"\"\"Get the earliest created_at in any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return", "if child: return child[0] return None @classmethod def get_empty_nodes(cls): \"\"\" Return a queryset", "default=None, null=True, blank=True) def get_child(self, word): child = list(self.children.filter(word=word)[:1]) if child: return child[0]", "= self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if country is None: country", "%d new nodes (should be 0!)\", propagated) propagated = cls.propagate_orphanage() @classmethod def cleanup_orphans(cls,", "in query_chunks: node = node.get_child(chunk.lower()) if not node: return None return node @classmethod", "children of this node. Look at the chunks that refer to them. Return", "\"\"\" SELECT sub.tz_country, MAX(sub.count) AS max_count FROM ({subquery}) sub GROUP BY tz_country ORDER", "node_cache = {} new_tweet_chunks = [] for tweet in tweets: root = self.check_prefix(tweet,", "node. Look at the chunks that refer to them. Return the top countries", "orphans...\") batch_deleted = cls.delete_orphans(batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ...", "map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select the max of the combo field for", "following this node for every # country, and the number of tweets with", "memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if total_deleted > 0:", "top 10 country-word-counts. \"\"\" # How much padding to add to counts for", "def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch of chunks before the given date\"\"\"", "tweet count to the word subquery = \"\"\" SELECT map_tweetchunk.tz_country, CONCAT( LPAD(COUNT(DISTINCT map_tweetchunk.id),", "import re logger = logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together = [ ['parent',", "blank=True) @classmethod def get_example_tweet(cls, country_name, node): \"\"\"Returns a Tweet for the given country", "character in the middle. splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2)", "need to calculate. You can also store data on separate models, if your", "by cleanup before we create its TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache)", "get one while True: try: with transaction.atomic(): # Get or create a node", "MAX(sub.combo) as maxcombo FROM ({subquery}) sub GROUP BY sub.tz_country ORDER BY maxcombo DESC", "TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted = batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted batch", "= prefix_node for chunk in query_chunks: node = node.get_child(chunk.lower()) if not node: return", "https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries() if total_deleted > 0: logger.info(\"Deleted %d orphans\",", "return None def get_subquery(self): \"\"\" Generates SQL like the following: SELECT `map_tweetchunk`.`tz_country`, `map_treenode`.`word`,", "cache=node_cache) country = user_tz_map.get(tweet.user_time_zone, None) if country is None: country = '' else:", "node, there is a risk that the cleanup # procedure will delete it", "map_treenode.id = subset.id SET map_treenode.parent_id = NULL \"\"\".format(subset_query=subset_query) params = [time_cutoff, cls.ROOT_NODES, batch_size]", "= batch_deleted while batch_deleted == batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted)", "Add any additional functions related to your time frames that will make them", "# Now select the max of the combo field for each country #", "+= batch_deleted if reset and settings.DEBUG: # Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory", "\"\"\" Get the chunk following this node with the most tweets in the", "the top countries for those chunks by frequency. Returns a list. \"\"\" #", "minutes # It won't be deleted by cleanup before we create its TweetChunk.", "chunks.count() chunk = chunks[random.randint(0, count - 1)] return chunk.tweet_text except DatabaseError: # things", "for the concat/max/split trick count_padding = 10 # Get the name of the", "min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\" A handy static", "0s, alphabetic max is the same as numeric max maxquery = \"\"\" SELECT", "override this because we have tons of data that depends on tweets.\"\"\" baseline_cutoff", "cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned %d empty nodes\", total_orphaned)", "reset=reset) @classmethod def cleanup_empty(cls, batch_size=10000, reset=False): # Disconnect TreeNodes without any chunks logger.info(\"Orphaning", "# Limit try: result = query.first() return result['node__word'], result['count'] except ObjectDoesNotExist: return None", "batch_deleted == batch_size: logger.info(\" ... deleted batch of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date,", "cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get", "root in the tweet, if it exists\"\"\" for root in roots: if root.word", "tree node for the parent and word, and whether or not it is", "import timedelta import settings from twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import re logger", "models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together = [ ['tz_country', 'node'], ] id =", "our children query = query.filter(node__parent=self) # Order by count, desc query = query.order_by('-count')", "1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache) return tweets def cleanup(self): if", "= models.ForeignKey(TreeNode, related_name='chunks') twitter_id = models.BigIntegerField(default=0) tweet_text = models.CharField(max_length=250, default=None, null=True) created_at =", "def check_prefix(self, tweet, roots): \"\"\"Returns a root in the tweet, if it exists\"\"\"", "= node depth += 1 TweetChunk.objects.bulk_create(new_tweet_chunks) self.chunks_added += len(new_tweet_chunks) self.node_cache_size = len(node_cache) return", "seconds DURATION = timedelta(seconds=60) # Simply store the total tweet count in this", "return None @classmethod def delete_before(cls, oldest_date, batch_size=10000): \"\"\"Delete a batch of chunks before", "random from south.db.generic import DatabaseOperations from twitter_stream.fields import PositiveBigAutoField, PositiveBigAutoForeignKey from swapper import", "index_together = [ ['tz_country', 'node'], ] id = PositiveBigAutoField(primary_key=True) node = models.ForeignKey(TreeNode, related_name='chunks')", "2. Indicate how often to run the analysis (same as the time frame", "nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned == batch_size: logger.info(\" ...", "else: logger.info(\"No chunks to delete\") class MapTimeFrame(TweetTimeFrame): \"\"\" A basic time frame for", "word = models.CharField(max_length=150) created_at = models.DateTimeField(auto_now_add=True, default=None, null=True, blank=True) def get_child(self, word): child", "> 0: logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self,", "if cache is not None: cache[(parent, word)] = node if created: self.nodes_added +=", "= \"\"\" SELECT country_node_count.tz_country, country_node_count.word, country_node_count.count FROM ({subquery}) country_node_count INNER JOIN ({maxquery}) countrymax", "0: logger.info(\"Deleted %d orphans\", total_deleted) else: logger.info(\"No orphans to delete\") def get_top_chunk_countries_for_children(self, limit=10):", "Only with the given country, non-empty words # We'll do this later instead", "country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count AND country_node_count.tz_country != '' AND country_node_count.word !=", "\"\"\" Returns a tree node for the parent and word, and whether or", "(map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at < %s) AND NOT (map_treenode.id IN %s)", "or not it is new. A dictionary can optionally be provided for caching", "earliest tweet referenced in any Tweet Chunk earliest_created_at = TweetChunk.get_earliest_created_at() if earliest_created_at is", "\"\"\"Get the earliest created_at in any tweet chunk.\"\"\" results = cls.objects.aggregate(earliest_created_at=models.Min('created_at')) return results['earliest_created_at']", "from django.db import models, connection, transaction, IntegrityError, DatabaseError from django.utils import timezone import", "now = MapTimeFrame.objects \\ .filter(calculated=True) \\ .aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any? if", "GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by country, chunk query = TweetChunk.objects.values('tz_country',", "twitter_feels.libs.twitter_analysis import TweetTimeFrame import logging import re logger = logging.getLogger('map') class TreeNode(models.Model): class", "logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together = [ ['parent', 'word'], ['created_at', 'parent'], ]", "TreeNodes without any chunks logger.info(\"Orphaning empty tree nodes...\") batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned =", "count and the word # The word is substring(maxcombo, padding+2) because # it", "query.order_by('-chunk_count') # Aggregate fields query = query.annotate(chunk_count=models.Count('id')) # Limit query = query[:limit] print", "prefix and chunks.\"\"\" root = cls.get_root() if not root: raise Exception(\"No root node", "node = prefix_node for chunk in query_chunks: node = node.get_child(chunk.lower()) if not node:", "static method to get the <limit> most recent frames. \"\"\" query = cls.get_in_range(calculated=True)", "limit=country_limit + 1) # Template for the overall query # It finds the", "specifically: - Look at all the words that followed this one anywhere -", "return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query = \"\"\"", "given country. \"\"\" # Group by chunk query = TweetChunk.objects.values('node', 'node__word') # Only", ".aggregate(latest_start_time=models.Max('start_time')) # maybe there aren't any? if now['latest_start_time'] is None: return # Preserve", "= logging.getLogger('map') class TreeNode(models.Model): class Meta: index_together = [ ['parent', 'word'], ['created_at', 'parent'],", "JOIN ({maxquery}) countrymax ON (countrymax.tz_country = country_node_count.tz_country) WHERE ( country_node_count.count = countrymax.max_count AND", "For the top result from each country, return the top 10 country-word-counts. \"\"\"", "you need to calculate. You can also store data on separate models, if", "`map_treenode`.`word`, COUNT(`map_tweetchunk`.`id`) AS `count` FROM `map_tweetchunk` INNER JOIN `map_treenode` ON (`map_tweetchunk`.`node_id` = `map_treenode`.`id`)", "WHERE ( AND `map_treenode`.`parent_id` = MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\"", "Find the words following this node for every # country, and the number", "(parent, word) in cache: self.node_cache_hits += 1 return cache[(parent, word)], False else: #", "logger.info(\" ... orphaned batch of %d\", batch_orphaned) batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned", "= country.country new_tweet_chunks.append(TweetChunk( node=node, twitter_id=tweet.tweet_id, tweet_text=tweet.text, created_at=tweet.created_at, tz_country=country)) parent = node depth +=", "int(country_limit) subquery = self.get_subquery() # This query finds the maximum number of tweets", "this node. More specifically: - Look at all the words that followed this", "all the tree nodes that have no associated TweetChunks \"\"\" return cls.objects.filter(chunks=None).exclude(pk__in=cls.ROOT_NODES) @classmethod", "trailing_edge_date = now['latest_start_time'] - settings.KEEP_DATA_FOR logger.info(\"Cleaning chunks from before %s...\", trailing_edge_date) batch_deleted =", "\"\"\"Returns a root in the tweet, if it exists\"\"\" for root in roots:", "@classmethod def get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone", "cache[(parent, word)] = node if created: self.nodes_added += 1 return node, created def", "batch_orphaned = cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned = batch_orphaned while batch_orphaned == batch_size: logger.info(\" ... orphaned", "AS max_count FROM ({subquery}) sub GROUP BY tz_country ORDER BY max_count DESC LIMIT", "= MY_ID_GOES HERE ) GROUP BY `map_tweetchunk`.`tz_country`, `map_treenode`.`word` \"\"\" # Group by country,", "TweetChunk.get_earliest_created_at() if earliest_created_at is not None: return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod", "user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together = [", "return the top 10 country-word-counts. \"\"\" # Make sure this is valid country_limit", "country, return the top 10 country-word-counts. \"\"\" # How much padding to add", "easier to work with. \"\"\" # Analyze every 15 seconds DURATION = timedelta(seconds=60)", "settings.DEBUG: # Prevent apparent memory leaks # https://docs.djangoproject.com/en/dev/faq/models/#why-is-django-leaking-memory from django import db db.reset_queries()", "maximum count. # Further, filters out empty words and countries. superquery = \"\"\"", "AS word, CAST(SUBSTRING(sub2.maxcombo, 1, {padding}) AS UNSIGNED) AS `count` FROM ({maxquery}) sub2 \"\"\".format(maxquery=maxquery,", "nodes_added = models.IntegerField(default=0) chunks_added = models.IntegerField(default=0) node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def", "= TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent apparent", "is None: return # Preserve some time prior to that time frame trailing_edge_date", "query.exclude(node__word='') # query = query.exclude(tz_country='') # Only chunks belonging to our children query", "calculate(tweets). This is where you do your work. At the end, make sure", "return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of country, word, count for", "return None def get_tree_node(self, parent, word, cache=None): \"\"\" Returns a tree node for", "cleanup before we create its TweetChunk. node, created = self.get_tree_node(parent=parent, word=chunk, cache=node_cache) country", "sub2 \"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall()", "re.split('[*,.!:\"\\s;()/@#]+|\\'[\\W]|\\?+', rh) parent = root depth = 0 for chunk in chunks: if", "8, 9, 10] parent = models.ForeignKey('self', null=True, blank=True, related_name='children') word = models.CharField(max_length=150) created_at", "splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS word, CAST(SUBSTRING(sub2.maxcombo, 1,", "= \"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery}) sub GROUP BY sub.tz_country", "\"\"\"Makes sure that all children of current orphans are also orphaned.\"\"\" future_orphans =", "Further, filters out empty words and countries. superquery = \"\"\" SELECT country_node_count.tz_country, country_node_count.word,", "parent and word, and whether or not it is new. A dictionary can", "as numeric max maxquery = \"\"\" SELECT sub.tz_country, MAX(sub.combo) as maxcombo FROM ({subquery})", "= cls.orphan_empty_nodes(batch_size=batch_size) total_orphaned += batch_orphaned if total_orphaned > 0: logger.info(\"Orphaned %d empty nodes\",", "the middle. splitquery = \"\"\" SELECT sub2.tz_country, SUBSTRING(sub2.maxcombo, {padding} + 2) AS word,", "= connection.cursor() cursor.execute(superquery) return cursor.fetchall() @classmethod def get_root(cls): try: return cls.objects.get(id=1) except ObjectDoesNotExist:", ") WHERE (map_tweetchunk.id IS NULL) AND (map_treenode.parent_id IS NOT NULL) AND (map_treenode.created_at <", "`map_treenode`.`word` \"\"\" # Group by country, chunk query = TweetChunk.objects.values('tz_country', 'node__word') # Only", "A dictionary can optionally be provided for caching values across calls. \"\"\" if", "word): child = list(self.children.filter(word=word)[:1]) if child: return child[0] return None @classmethod def get_empty_nodes(cls):", "ObjectDoesNotExist: return None class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model):", "class Tz_Country(models.Model): user_time_zone = models.CharField(max_length=32) country = models.CharField(max_length=32) class TweetChunk(models.Model): class Meta: index_together", "Group by country, chunk query = TweetChunk.objects.values('tz_country', 'node__word') # Only with the given", "Only chunks belonging to our children query = query.filter(node__parent=self) # Order by count", "if depth > settings.MAX_DEPTH: break # This node is guaranteed safe to work", "this node for every country # up to the limit (plus 1 to", "it while we are working with it. # Setting created_at makes it impossible", "Make sure this is valid country_limit = int(country_limit) subquery = self.get_subquery() # This", "tweet chunks TweetChunk.cleanup() TreeNode.cleanup_orphans() @classmethod def get_stream_memory_cutoff(cls): \"\"\"Need to override this because we", "not None: return min(earliest_created_at, baseline_cutoff) else: return baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\"", "= TreeNode.objects.get_or_create(parent=parent, word=word) if not created: # If it is an old node,", "LIMIT %s \"\"\" params = [cls.ROOT_NODES, batch_size] cursor = connection.cursor() return cursor.execute(query, params)", "to keep trying to grab this node until we get one while True:", "with it. # Setting created_at makes it impossible for it to be deleted", "empty nodes\", total_orphaned) else: logger.info(\"No empty nodes to orphan\") logger.info(\"Orphaning children of orphans...", "query = query.exclude(tz_country='') # Only chunks belonging to our children query = query.filter(node__parent=self)", "!= '' GROUP BY map_tweetchunk.tz_country, map_tweetchunk.node_id \"\"\".format(padding=count_padding, index_name=index_name) # Now select the max", "node_cache_hits = models.IntegerField(default=0) node_cache_size = models.IntegerField(default=0) def check_prefix(self, tweet, roots): \"\"\"Returns a root", "else: return baseline_cutoff @classmethod def get_most_recent(cls, limit=20): \"\"\" A handy static method to", "None: return 0 most_recent_time = most_recent_time['most_recent_time'] time_cutoff = most_recent_time - settings.NODE_FREEZE_INTERVAL subset_query =", "= [] for tweet in tweets: root = self.check_prefix(tweet, roots) if not root:", "this node. Look at the chunks that refer to them. Return the top", "batch of orphans\"\"\" query = \"\"\" DELETE FROM map_treenode WHERE (parent_id IS NULL)", "top 10 country-word-counts. \"\"\" # Make sure this is valid country_limit = int(country_limit)", "\"\"\".format(maxquery=maxquery, padding=count_padding) print splitquery cursor = connection.cursor() cursor.execute(splitquery, [self.id, country_limit]) return cursor.fetchall() def", "query = query.order_by('-count') # Aggregate fields query = query.annotate(count=models.Count('id')) print query.query # Limit", "one while True: try: with transaction.atomic(): # Get or create a node with", "= query.filter(node__parent=self) # Aggregate fields query = query.annotate(count=models.Count('id')) return query def get_most_popular_child_chunk_by_country2(self, country_limit=10):", "node.\"\"\" try: chunks = cls.objects.filter(tz_country=country_name, node=node) count = chunks.count() chunk = chunks[random.randint(0, count", "a list. \"\"\" # Group by tz_country query = TweetChunk.objects.values('tz_country') # Only non-empty", "with 0s, alphabetic max is the same as numeric max maxquery = \"\"\"", "... deleted batch of %d\", batch_deleted) batch_deleted = TweetChunk.delete_before(trailing_edge_date, batch_size=batch_size) total_deleted += batch_deleted", "[self.id, country_limit]) return cursor.fetchall() def get_most_popular_child_chunk_by_country(self, country_limit=10): \"\"\" Get tuples of country, word,", "total_orphaned = batch_orphaned while batch_orphaned == batch_size: logger.info(\" ... orphaned batch of %d\",", "batch_size=batch_size) total_deleted += batch_deleted if reset and settings.DEBUG: # Prevent apparent memory leaks", "and word node, created = TreeNode.objects.get_or_create(parent=parent, word=word) if not created: # If it", "result['node__word'], result['count'] except ObjectDoesNotExist: return None def get_subquery(self): \"\"\" Generates SQL like the", "the concat/max/split trick count_padding = 10 # Get the name of the stupid", "the top 10 countries following this node. More specifically: - Look at all", "root: raise Exception(\"No root node in tweet tree\") prefix_node = root.get_child(prefix) if prefix_node" ]
[ "0: #MC-AWGN model gauss = np.zeros((w, h, c)) for chn in range(c): gauss[:,:,chn]", "0: # original out = out elif mode == 1: # flip up", "n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm", "c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy) RF_tensor =", "# mpl.use('Agg') # import matplotlib.pyplot as plt def weights_init_kaiming(m): classname = m.__class__.__name__ if", "print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False):", "#RF_tensor = NM_tensor.clone() #get a clone version of NM tensor without changing the", "!= 0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two print(score)", "(2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0,", "in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h)) noisy = image + gauss", "(lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F = np.cumsum(H)*dx", "= 35 noisy_img = generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map,", "noisy = image.copy() if noise_type == 0: #MC-AWGN model gauss = np.zeros((w, h,", "= NM_tensor.clone() #get a clone version of NM tensor without changing the original", "1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for n in range(lm_numpy.shape[0]): for c in", "s_or_m == 0: #single noise type if val == 0: for chn in", "# def batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32)", "len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) / float(vals)", "noisy = noisy + noise_c return noisy #generate AWGN-RVIN noise together def generate_comp_noisy(image,", ":, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor =", "= cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:, :, c] =", "w, h, c= x.shape x_ts = x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if", "gau_std, (w, h)) noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre ] +", "and gaussian blur [Input] a multi-channel tensor of noise map [Output] a multi-channel", "estimated noise level) [1] Get the maximum value of noise level [2] Gaussian", "Given a mosaic image of subsampling, recombine it to a full image [Input]:", "normalized noise level value to a map [Input]: level: a scaler noise level(0-1),", "#real image wf = 0 hf = 0 for ws in range(scale): hf", "(0, 2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for n in range(lm_numpy.shape[0]):", "wc return real def scal2map(level, h, w, min_v=0., max_v=255.): ''' Change a single", "lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation", "------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot the middle vector score_seq: the", "= np.rot90(out, k=2) out = np.flipud(out) elif mode == 6: # rotate 270", "map for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy)", "= np.random.uniform(0.0, 1.0, (w, h)) noise_map = np.random.uniform(0.0, 1.0, (w, h)) noisy_chn =", "noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor", "mode == 1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy", "def get_pdf_in_maps(lm, mark, chn=1): ''' Description: get the noise estimation cdf of each", "''' w, h, c = image.shape noisy = image.copy() for chn in range(c):", "#sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3", "if band.size else temp mosaic = np.concatenate((mosaic, band), axis = 0) if mosaic.size", "salient (the most frequent estimated noise level) [1] Get the maximum value of", "torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a, len_v, min_v, max_v): '''", "np.zeros((w, h, c)) for chn in range(3): noise_c [:, :, chn] = np.random.normal(0,", "1, 1)) if mode == 0 or mode == 1: out_numpy = (np.transpose(out_numpy,", "range(scale): temp = image[ws::scale, hs::scale, :] #get the sub-sampled image band = np.concatenate((band,", "limit_set[0][0], limit_set[0][1]) #normalize the level value noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw", "---------- [Input] image : ndarray of float type: [0,1] just one image, current", "#MC-AWGN model gauss = np.zeros((w, h, c)) for chn in range(c): gauss[:,:,chn] =", "to compute the variance noise_s = np.random.randn(w, h, c) * noise_s_map #use the", "we use the original clean image x to compute the variance noise_s =", "x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0] F = H *", "zeroing out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0)))", "print(nl_list) elif ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5:", "and cdf of AWGN channel Compare the changes of the density function and", "scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot the middle vector", "NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True)", "estimation cdf of each channel ---------- [Input] a multi-channel tensor of noise map", "ref_mode == 0 or ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if", "noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]),", "+ noise_s #add signal_independent noise to L noise_c = np.zeros((w, h, c)) for", "np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre ]", "0))) * 255.0 * scal else: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) if", "a noisy image and the noise estimation model, keep multiscaling the image\\\\ using", "average maximum value of the noise level [5] Get the CDF thresholded value", "= 1 score_seq = [] Pre_CDF = None flag = 0 for pss", "matplotlib.pyplot as plt def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data,", "def pixelshuffle(image, scale): ''' Discription: Given an image, return a reversible sub-sampling [Input]:", "def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph): for chn in range(c):", "== 1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor,", "image + gauss elif noise_type == 1: #MC-RVIN model for chn in range(c):", "= np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx =", "generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list = np.zeros((c, 1)) if s_or_m == 0:", "from the input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level to", "torch.autograd import Variable import cv2 import scipy.ndimage import scipy.io as sio # import", "opt_scale: the optimal scaling factor ''' if color == 1: c = 3", "1: for chn in range(c): noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image, 0, noise_level_list", "value for the map if ref_mode == 0 or ref_mode == 4: nl_list", "- nl_list print(nl_list) elif ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode", "band return mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given a", "h, c= x.shape x_ts = x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode", "== 4 or ref_mode==5: #if we use a single value for the map", "In): pass def np2ts(x, mode=0): #now assume the input only has one channel", "elif ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn,", "= ( lb + up ) * 0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2],", "1)) if s_or_m == 0: #single noise type if val == 0: for", "original clean image x to compute the variance noise_s = np.random.randn(w, h, c)", "prob_map = np.random.uniform( 0, 1, (w, h) ) #the prob map noise_map =", "3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for n in range(lm_numpy.shape[0]): for c", "c] = nl return nl_list def get_smooth_maps(lm, dilk = 50, gsd = 10):", "np.zeros((c, 1)) if s_or_m == 0: #single noise type if val == 0:", "%d:' % (pss-1)) return (pss-1, score_seq) Pre_PDF = EPDF flag = 1 return", "(3,)) sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1, c))", "rotate 270 degree out = np.rot90(out, k=3) elif mode == 7: # rotate", "a refined map tensor with four channels ''' #RF_tensor = NM_tensor.clone() #get a", "np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2.", "ISource = np2ts(image) ISource = torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise", "level: a scaler noise level(0-1), h, w [Return]: a pytorch tensor of the", "[2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and signal independent noise [Output]", "= m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1:", ":, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)", "the limit noisy_image = pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(),", "range(chn): nl = np.amax(lm_numpy[n, :, :, c]) nl_list[n, c] = nl return nl_list", "map if ref_mode == 0 or ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0.,", "image[ws::scale, hs::scale, :] #get the sub-sampled image band = np.concatenate((band, temp), axis =", "image x to compute the variance noise_s = np.random.randn(w, h, c) * noise_s_map", "image : ndarray of float type: [0,1] just one image, current support gray", "w, h, c = image.shape real = np.zeros((w, h, c)) #real image wf", "def get_pdf_matching_score(F1, F2): ''' Description: Given two sets of CDF, get the overall", ") * 0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map", "c] = nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To find", "Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise", "the changed pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and signal", "0)) return out_numpy #TODO: two pixel shuffle functions to process the images def", "visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]):", "''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy()", "# rotate 180 degree and flip out = np.rot90(out, k=2) out = np.flipud(out)", "* NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag:", "(chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag: RF_tensor =", "] elif noise_type == 2: #sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0,", "matching score for each channel ----------- [Input] F1, F2 [Output] score for each", "the optimal scaling factor ''' if color == 1: c = 3 elif", "multi-channel tensor of refined noise map ''' kernel = np.ones((dilk, dilk)) lm_numpy =", "if s_or_m == 0: #single noise type if val == 0: for chn", "cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor", "sequence of factors ''' norm_a = np.reshape(a, (len_v,1)) norm_a = (norm_a - float(min_v))", "x_ts = x_ts.unsqueeze(0) elif mode == 2: x_ts = x_ts.unsqueeze(1) return x_ts def", "np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F =", "flag = 0 for pss in range(1, stopping+1): #scaling factor from 1 to", "image.shape noisy = image.copy() for chn in range(c): mix_thre = noise_level_list[c+chn] #get the", "density function and decide the optimal scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag,", "mosaic image of subsampling, recombine it to a full image [Input]: Image [Return]:", "RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): '''", "2, 0))) if ps == 1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind)", "[Output] score for each channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre", "= np.rot90(out, k=2) elif mode == 5: # rotate 180 degree and flip", "Only Keep one channel and zero out other channels [Input] a multi-channel tensor", "prob map noise_map = np.random.uniform( 0, 1, (w, h) ) #the noisy map", "nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To find out the", "of pixels ''' w, h, c = image.shape real = np.zeros((w, h, c))", "= Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap, mark +", "!= -1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img,", "nl_list[n, c] = nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To", "mix ratio of AWGN and RVIN gau_std = noise_level_list[chn] #get the gaussian std", "#use the new variance to shift the normal distribution noisy = image +", "gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h)) noisy = image + gauss elif noise_type", "multi-channel tensor of noise map [Output] A list of noise level value '''", "image [Input]: Image [Return]: Recombine it using different portions of pixels ''' w,", "plot_flag = 1, stopping = 4, mark=''): ''' Description: Given a noisy image", "3 elif color == 0: c = 1 score_seq = [] Pre_CDF =", "after dilation and gaussian blur [Input] a multi-channel tensor of noise map [Output]", "noise level map to make the regional estimation more smooth [3] Get the", "noisy image ''' w, h, c = image.shape #Some unused noise type: Poisson", "np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h, c) #noisy = image + uni", "for c in range(chn): nl = np.amax(lm_numpy[n, :, :, c]) nl_list[n, c] =", "channel separately prob_map = np.random.uniform(0.0, 1.0, (w, h)) noise_map = np.random.uniform(0.0, 1.0, (w,", "1: c = 3 elif color == 0: c = 1 score_seq =", "2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): if", "for chn in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h)) noisy = image", "= np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre", "channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list", "Description: To generate mixed AWGN and RVIN noise together ---------- [Input] image: a", "(1, 2, 0))) * 255.0 * scal else: out_numpy = (np.transpose(out_numpy, (1, 2,", "= np.transpose(out_numpy, (1, 2, 0)) return out_numpy #TODO: two pixel shuffle functions to", "0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or mode == 1:", "color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap,", "range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre]", "level [2] Gaussian smooth the noise level map to make the regional estimation", "= (norm_a - float(min_v)) / float(max_v - min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m,", "h, c = image.shape real = np.zeros((w, h, c)) #real image wf =", "#Some unused noise type: Poisson and Uniform #if noise_type == *: #vals =", "Description: get the noise estimation cdf of each channel ---------- [Input] a multi-channel", "* image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur", "image wf = 0 hf = 0 for ws in range(scale): hf =", "= lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn,", "c, pn, pw, ph): for chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0],", "normalize the sequence of factors ''' norm_a = np.reshape(a, (len_v,1)) norm_a = (norm_a", "tensor of noise map [Output] a multi-channel tensor of noise map after zeroing", "nl_list def get_smooth_maps(lm, dilk = 50, gsd = 10): ''' Description: To return", "uni.reshape(w, h, c) #noisy = image + uni noisy = image.copy() if noise_type", "level [5] Get the CDF thresholded value [Output] a refined map tensor with", "RVIN noise together ---------- [Input] image: a float image between [0,1] noise_level_list: AWGN", "ref_mode == 1 or ref_mode == 4 or ref_mode==5: #if we use a", "mix_thre ] = noise_map[prob_map < mix_thre ] gauss = np.random.normal(0, gau_std, (w, h))", "and each channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3,", "level of noise level in the images ---------- [Input] a multi-channel tensor of", "range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h)) noisy = image + gauss elif", "an image, return a reversible sub-sampling [Input]: Image ndarray float [Return]: A mosic", "lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10))", "gray or color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for", "h, c)) #real image wf = 0 hf = 0 for ws in", "np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor to [1,1,c] to multiply with", "compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out = np.transpose(image, (1,2,0))", "elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) #", "the channel number for gaussian [Output] CDF function of each sample and each", "None flag = 0 for pss in range(1, stopping+1): #scaling factor from 1", "ref_lm_numpy = lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:,", "noise_level_list[c+chn] #get the mix ratio of AWGN and RVIN gau_std = noise_level_list[chn] #get", "== 0: for chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val ==", "or ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode == 4:", "nl_list) def normalize(a, len_v, min_v, max_v): ''' normalize the sequence of factors '''", "h)) noise_map = np.random.uniform(0.0, 1.0, (w, h)) noisy_chn = noisy[: , :, chn]", "= image[wf:wf+wc, hf:hf+hc, :] hf = hf + hc wf = wf +", "NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:]", "color=1, thre = 0, plot_flag = 1, stopping = 4, mark=''): ''' Description:", "for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy) RF_tensor", "str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png') # plt.close()", "score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two print(score) score_seq.append(score) if", "dilation and gaussian blur [Input] a multi-channel tensor of noise map [Output] a", ":] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor", "in range(1, stopping+1): #scaling factor from 1 to the limit noisy_image = pixelshuffle(noisy_image,", "mode == 6: # rotate 270 degree out = np.rot90(out, k=3) elif mode", "(0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for n in range(lm_numpy.shape[0]):", "maximum value of the noise level [5] Get the CDF thresholded value [Output]", "= hf + hc wf = wf + wc return real def scal2map(level,", "out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2, 0)) return out_numpy #TODO: two", "with 0 or 1. noise_level: ratio of the occupation of the changed pixels", "== 2: #sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c", "1 score_seq = [] Pre_CDF = None flag = 0 for pss in", "nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean, data_range): #", "elif mode == 6: # rotate 270 degree out = np.rot90(out, k=3) elif", "chn in range(c): #process each channel separately prob_map = np.random.uniform(0.0, 1.0, (w, h))", "ws in range(scale): band = np.array([]) for hs in range(scale): temp = image[ws::scale,", "ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode == 4: #half", "tensor of the cacatenated noise level map ''' #get a tensor from the", "= Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy, (3, 1, 1)) if", "NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not", "pixels ''' w, h, c = image.shape real = np.zeros((w, h, c)) #real", "return nl_list def get_pdf_in_maps(lm, mark, chn=1): ''' Description: get the noise estimation cdf", "if score <= thre: print('optimal scale is %d:' % (pss-1)) return (pss-1, score_seq)", "noise_level_list = np.zeros((c, 1)) if s_or_m == 0: #single noise type if val", "NM_tensor.size()[3])) #initialize the noise map before concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] =", "the estimation nl_list = nl_list - nl_list print(nl_list) elif ref_mode == 1: nl_list", "c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:, :,", "= noise_map[prob_map < mix_thre ] gauss = np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >=", "w) return level_tensor def scal2map_spatial(level1, level2, h, w): stdN_t1 = scal2map(level1, int(h/2), w)", "separately prob_map = np.random.uniform(0.0, 1.0, (w, h)) noise_map = np.random.uniform(0.0, 1.0, (w, h))", "noise [Output] A noisy image ''' w, h, c = image.shape #Some unused", "images if fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc,", "down out = np.rot90(out) out = np.flipud(out) elif mode == 4: # rotate", "Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for", "the new variance to shift the normal distribution noisy = image + noise_s", "classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def", "noise_level_list): ''' Description: To generate mixed AWGN and RVIN noise together ---------- [Input]", "maps [Input] the noise map tensor, and a refinement mode Mode: [0] Get", "we use a single value for the map if ref_mode == 0 or", "use the original clean image x to compute the variance noise_s = np.random.randn(w,", ":] = fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :]", "normed=True) dx = x[1]-x[0] F = H * dx pdf_list[n, c, :] =", "noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor", "if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor =", "x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or mode ==", "imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR =", "the noise estimation model, keep multiscaling the image\\\\ using pixel-shuffle methods, and estimate", "hist[1][nl_ind+1] ) / 2. nl_list[n, c] = nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8,", "= temp.shape #get the shpae of the current images if fill==1 and ws==ind[0]", "#now assume the input only has one channel which is ignored w, h,", "with four channels ''' #RF_tensor = NM_tensor.clone() #get a clone version of NM", "c) * noise_s_map #use the new variance to shift the normal distribution noisy", "* NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor,", "and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode ==", "hf = 0 for hs in range(scale): temp = real[ws::scale, hs::scale, :] wc,", "= noisy[: , :, chn] noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map", "Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for i in range(Img.shape[0]): #", "out = np.rot90(out) out = np.flipud(out) elif mode == 4: # rotate 180", "chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value", "map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6,", "irradience) #print(noise_s_map) # different from the official code, here we use the original", "= lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :,", "= np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor =", "scal2map(level, h, w, min_v=0., max_v=255.): ''' Change a single normalized noise level value", "gaussian [Output] CDF function of each sample and each channel ''' lm_numpy =", "1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy()", "x_ts def get_salient_noise_in_maps(lm, thre = 0., chn=3): ''' Description: To find out the", "''' Description: To find out the maximum level of noise level in the", "fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :] hf =", "the overall matching score for each channel ----------- [Input] F1, F2 [Output] score", "+ str(c) + '.png') # plt.close() return pdf_list def get_pdf_matching_score(F1, F2): ''' Description:", "gsd = 10): ''' Description: To return the refined maps after dilation and", "np2ts(image) ISource = torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map", "only has one channel which is ignored w, h, c= x.shape x_ts =", "EPDF flag = 1 return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description: To", "print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark, chn=1): ''' Description: get the noise estimation", "in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, normed=True) dx", "of noise map [Output] A list of noise level value ''' lm_numpy =", "level map to make the regional estimation more smooth [3] Get the average", "noise_s_map #use the new variance to shift the normal distribution noisy = image", "def get_max_noise_in_maps(lm, chn=3): ''' Description: To find out the maximum level of noise", "channel and zero out other channels [Input] a multi-channel tensor of noise map", "= 1 return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description: To find out", "a multi-channel tensor of refined noise map ''' kernel = np.ones((dilk, dilk)) lm_numpy", "to clean image or irradience) #print(noise_s_map) # different from the official code, here", "h, c) * noise_s_map #use the new variance to shift the normal distribution", "np.rot90(out, k=2) elif mode == 5: # rotate 180 degree and flip out", "1) level_tensor = level_tensor.repeat(1, 1, h, w) return level_tensor def scal2map_spatial(level1, level2, h,", "h, c = image.shape noisy = image.copy() for chn in range(c): mix_thre =", "get_max_noise_in_maps(NM_tensor) nl_list = ( lb + up ) * 0.5 noise_map = np.zeros((1,", "estimation_model, plot_flag, stopping [Output] plot the middle vector score_seq: the matching score sequence", "Description: To refine the estimated noise level maps [Input] the noise map tensor,", "value of the noise level [5] Get the CDF thresholded value [Output] a", "to match these two print(score) score_seq.append(score) if score <= thre: print('optimal scale is", "*: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h, c) #noisy =", "image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images", "= Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1:", "match these two print(score) score_seq.append(score) if score <= thre: print('optimal scale is %d:'", "1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if rescale == 1: out_numpy", "*: #vals = len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image *", "mode == 5: # rotate 180 degree and flip out = np.rot90(out, k=2)", "two print(score) score_seq.append(score) if score <= thre: print('optimal scale is %d:' % (pss-1))", "= lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,", "map to make the regional estimation more smooth [3] Get the average maximum", "chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating", "== 1: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal else:", "[Return]: A mosic image of shuffled pixels ''' if scale == 1: return", "and a refinement mode Mode: [0] Get the most salient (the most frequent", "assume the input only has one channel which is ignored w, h, c=", "def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given a mosaic image of", "using pixel-shuffle methods, and estimate the pdf and cdf of AWGN channel Compare", "np.rot90(out) out = np.flipud(out) elif mode == 4: # rotate 180 degree out", "data_augmentation(image, mode): out = np.transpose(image, (1,2,0)) if mode == 0: # original out", "selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n,", "#get the sub-sampled image band = np.concatenate((band, temp), axis = 1) if band.size", "#noisy = image + uni noisy = image.copy() if noise_type == 0: #MC-AWGN", "sigma factor to [1,1,c] to multiply with the image noise_s_map = np.multiply(sigma_s, image)", "noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre]", "x or temp_x?? (according to clean image or irradience) #print(noise_s_map) # different from", "ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2],", "= np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10,", "[3] Get the average maximum value of the noise level [5] Get the", "= lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c] =", "prob_map = np.random.uniform(0.0, 1.0, (w, h)) noise_map = np.random.uniform(0.0, 1.0, (w, h)) noisy_chn", "level map ''' #get a tensor from the input level level_tensor = torch.from_numpy(np.reshape(level,", "original one if ref_mode == 0 or ref_mode == 1 or ref_mode ==", "a multi-channel tensor of noise map [Output] a multi-channel tensor of refined noise", "1: #MC-RVIN model for chn in range(c): #process each channel separately prob_map =", "np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind] +", "the map if ref_mode == 0 or ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor,", "== 1: out_numpy = np.tile(out_numpy, (3, 1, 1)) if mode == 0 or", "== *: #vals = len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image", "noise_s #add signal_independent noise to L noise_c = np.zeros((w, h, c)) for chn", "selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n, c] = 0 else: hist = np.histogram(selected_lm,", "and estimate the pdf and cdf of AWGN channel Compare the changes of", "flag != 0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two", "sigma_c[chn], (w, h)) noisy = noisy + noise_c return noisy #generate AWGN-RVIN noise", "np.array([]) for hs in range(scale): temp = image[ws::scale, hs::scale, :] #get the sub-sampled", "limit noisy_image = pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True)", "dx pdf_list[n, c, :] = F #sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10),", "np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy =", "return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description: To find out the maximum", "= np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n, c]", "= np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating noise_map[0, :,", "0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode,", "90 degree out = np.rot90(out) elif mode == 3: # rotate 90 degree", "normalization: only information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces", "out = np.transpose(image, (1,2,0)) if mode == 0: # original out = out", "noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To generate noisy images of different types ----------", "noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img, noise_level_list) def", "n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor", "in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, range=(0.,1.), bins=10,", "range(lm_numpy.shape[0]): for c in range(chn): nl = np.amax(lm_numpy[n, :, :, c]) nl_list[n, c]", "elif mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy =", "(6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res", "elif mode == 2: x_ts = x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts =", "] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre] return noisy def", "AWGN and RVIN noise together ---------- [Input] image: a float image between [0,1]", "NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list)", "level maps [Input] the noise map tensor, and a refinement mode Mode: [0]", "the normal distribution noisy = image + noise_s #add signal_independent noise to L", "0., 1.) EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0] if flag != 0:", "if scale == 1: return image w, h ,c = image.shape mosaic =", "= np.flipud(out) elif mode == 4: # rotate 180 degree out = np.rot90(out,", "as np # from skimage.measure.simple_metrics import compare_psnr from torch.autograd import Variable import cv2", "3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels with 0", "/ float(max_v - min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list", ":] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total number of channels", "get the overall matching score for each channel ----------- [Input] F1, F2 [Output]", "NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a, len_v,", "chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating for n in range(NM_tensor.shape[0]):", "noise map before concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] *", "just one image, current support gray or color image input (w,h,c) noise_type: 0,1,2,3", "out = np.rot90(out, k=2) out = np.flipud(out) elif mode == 6: # rotate", "image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res =", "band), axis = 0) if mosaic.size else band return mosaic def reverse_pixelshuffle(image, scale,", "(w, h)) noisy = noisy + noise_c return noisy #generate AWGN-RVIN noise together", "clean image x to compute the variance noise_s = np.random.randn(w, h, c) *", "of noise level [2] Gaussian smooth the noise level map to make the", "range(1, stopping+1): #scaling factor from 1 to the limit noisy_image = pixelshuffle(noisy_image, pss)", "= torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map = np.zeros((1,", "a map [Input]: level: a scaler noise level(0-1), h, w [Return]: a pytorch", "= (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined map for c", "To generate noisy images of different types ---------- [Input] image : ndarray of", "image, current support gray or color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined", "def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To refine the estimated noise level maps", "optimal scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot the middle", "0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1,", "10)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2],", "[sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor to [1,1,c]", "noise map [Output] A list of noise level value ''' lm_numpy = lm.data.cpu().numpy()", "if mode == 0 or mode == 1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy()", "as nn import numpy as np # from skimage.measure.simple_metrics import compare_psnr from torch.autograd", "* 0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before", "the noise map before concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2]", "NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else:", "noise_level_list[chn], (w, h)) noisy = image + gauss elif noise_type == 1: #MC-RVIN", "1: x_ts = x_ts.unsqueeze(0) elif mode == 2: x_ts = x_ts.unsqueeze(1) return x_ts", "weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear')", "noisy map noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre ] = noise_map[prob_map", "and down out = np.rot90(out) out = np.flipud(out) elif mode == 4: #", "color == 0: c = 1 score_seq = [] Pre_CDF = None flag", ", :, chn] noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn]", "np.random.uniform( 0, 1, (w, h) ) #the prob map noise_map = np.random.uniform( 0,", "return noise_map #Add noise to the original images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20,", "noise level value to a map [Input]: level: a scaler noise level(0-1), h,", "Given two sets of CDF, get the overall matching score for each channel", "= uni.reshape(w, h, c) #noisy = image + uni noisy = image.copy() if", "= np.transpose(image, (1,2,0)) if mode == 0: # original out = out elif", "255.0 * scal else: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) if ps ==", "occupation of the changed pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent", "with the image noise_s_map = np.multiply(sigma_s, image) #according to x or temp_x?? (according", "3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]): for c in", "np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating for n in", "image between [0,1] noise_level_list: AWGN and RVIN noise level [Output] A noisy image", "current support gray or color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise", "mosaic = np.array([]) for ws in range(scale): band = np.array([]) for hs in", "Keep one channel and zero out other channels [Input] a multi-channel tensor of", "To find out the most frequent estimated noise level in the images ----------", "[2] Gaussian smooth the noise level map to make the regional estimation more", "np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s =", "#normalize the level value noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw * ph),", "( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2. nl_list[n, c] = nl return nl_list", "== 0: #single noise type if val == 0: for chn in range(c):", "of the cacatenated noise level map ''' #get a tensor from the input", "1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map = np.zeros((1, 6, image.shape[0], image.shape[1]))", "= 0 else: hist = np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1])", "= Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a, len_v, min_v, max_v): ''' normalize the", "level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To refine the estimated noise level maps [Input]", "PSNR = 0 # for i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:],", "np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag = 1, stopping =", "to [1,1,c] to multiply with the image noise_s_map = np.multiply(sigma_s, image) #according to", "def data_augmentation(image, mode): out = np.transpose(image, (1,2,0)) if mode == 0: # original", "np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm,", "w, h, c = image.shape #Some unused noise type: Poisson and Uniform #if", "hf = hf + hc wf = wf + wc return real def", "up and down out = np.rot90(out) out = np.flipud(out) elif mode == 4:", "Poisson and Uniform #if noise_type == *: #vals = len(np.unique(image)) #vals = 2", "unused noise type: Poisson and Uniform #if noise_type == *: #vals = len(np.unique(image))", "chn in range(c): mix_thre = noise_level_list[c+chn] #get the mix ratio of AWGN and", "= noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre ]", "1)) if mode == 0 or mode == 1: out_numpy = (np.transpose(out_numpy, (1,", "for chn in range(c): mix_thre = noise_level_list[c+chn] #get the mix ratio of AWGN", "list of noise level value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0,", "import Variable import cv2 import scipy.ndimage import scipy.io as sio # import matplotlib", "= image.copy() if noise_type == 0: #MC-AWGN model gauss = np.zeros((w, h, c))", "noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre ] gauss = np.random.normal(0, gau_std,", "= x[1]-x[0] F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c])", "#the noisy map noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre ] =", "= noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre] return noisy def generate_denoise(image,", "batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR", "np2ts_4d(x): x_ts = x.transpose(0, 3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def", "#reshape the sigma factor to [1,1,c] to multiply with the image noise_s_map =", "elif mode == 2: # rotate counterwise 90 degree out = np.rot90(out) elif", "for each channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0,", "Denoised Blur Images ---------- [Input] image: model: noise_level_list: [Output] A blur image patch", "the middle vector score_seq: the matching score sequence between the two subsequent pdf", "RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To refine", "np.multiply(sigma_s, image) #according to x or temp_x?? (according to clean image or irradience)", "np.rot90(out, k=2) out = np.flipud(out) elif mode == 6: # rotate 270 degree", "noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value noise_map[n, :, :,", "] = noise_map[ prob_map < noise_level_list[chn] ] elif noise_type == 2: #sigma_s =", "image + noise_s #add signal_independent noise to L noise_c = np.zeros((w, h, c))", "for n in range(lm_numpy.shape[0]): for c in range(chn): nl = np.amax(lm_numpy[n, :, :,", "dilk = 50, gsd = 10): ''' Description: To return the refined maps", "lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n", "1.0, (w, h)) noisy_chn = noisy[: , :, chn] noisy_chn[ prob_map < noise_level_list[chn]", "return mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given a mosaic", "(pss-1, score_seq) Pre_PDF = EPDF flag = 1 return (stopping, score_seq) def get_max_noise_in_maps(lm,", "np.random.uniform(0.0, 1.0, (w, h)) noisy_chn = noisy[: , :, chn] noisy_chn[ prob_map <", "ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize", "blur [Input] a multi-channel tensor of noise map [Output] a multi-channel tensor of", ">= mix_thre ] + gauss[prob_map >= mix_thre] return noisy def generate_denoise(image, model, noise_level_list):", "image: model: noise_level_list: [Output] A blur image patch ''' #input images ISource =", "= (np.transpose(out_numpy, (1, 2, 0))) if ps == 1: out_numpy = reverse_pixelshuffle(out_numpy, pss,", "4, mark=''): ''' Description: Given a noisy image and the noise estimation model,", "after zeroing out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2,", "[Input] a multi-channel tensor of noise map [Output] A list of noise level", "mosic image of shuffled pixels ''' if scale == 1: return image w,", "pss, refill, refill_img, refill_ind) if rescale == 1: out_numpy = cv2.resize(out_numpy, (h, w))", "* 255.0 * scal else: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) if ps", "#initialize the noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0]", "''' if scale == 1: return image w, h ,c = image.shape mosaic", "== 5: # rotate 180 degree and flip out = np.rot90(out, k=2) out", "Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels with 0 or 1. noise_level: ratio", "range(c): noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img, noise_level_list)", "down out = np.flipud(out) elif mode == 2: # rotate counterwise 90 degree", "+ '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png') # plt.close() return", "5: # rotate 180 degree and flip out = np.rot90(out, k=2) out =", "ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5: nl_list =", ":, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total number of", "np.transpose(image, (1,2,0)) if mode == 0: # original out = out elif mode", "[Return]: Recombine it using different portions of pixels ''' w, h, c =", "[Output] a multi-channel tensor of noise map after zeroing out items ''' lm_numpy", "# for i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return", "images of different types ---------- [Input] image : ndarray of float type: [0,1]", "images ISource = np2ts(image) ISource = torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input", "compute the variance noise_s = np.random.randn(w, h, c) * noise_s_map #use the new", "-1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0,", "NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return", "H, x = np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F = np.cumsum(H)*dx F_ind =", "image and the noise estimation model, keep multiscaling the image\\\\ using pixel-shuffle methods,", "np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True)", "0 for hs in range(scale): temp = real[ws::scale, hs::scale, :] wc, hc, cc", "= get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map", "# import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot as plt def", "+ gauss[prob_map >= mix_thre] return noisy def generate_denoise(image, model, noise_level_list): ''' Description: Generate", "multi-channel tensor of noise map [Output] a multi-channel tensor of refined noise map", "# flip up and down out = np.flipud(out) elif mode == 2: #", "0., chn) if ref_mode == 4: #half the estimation nl_list = nl_list -", "ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif ref_mode == 3: lb", "and flip up and down out = np.rot90(out) out = np.flipud(out) elif mode", "mode == 0 or mode == 1: x_ts = x_ts.unsqueeze(0) elif mode ==", "which is ignored w, h, c= x.shape x_ts = x.transpose(2, 0, 1) x_ts", "value [Output] a refined map tensor with four channels ''' #RF_tensor = NM_tensor.clone()", "np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1: for chn in range(c): noise_level_list[chn] = 35", "= nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To find out", "#TODO: How to match these two print(score) score_seq.append(score) if score <= thre: print('optimal", "lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for", "wc, hc, cc = temp.shape #get the shpae of the current images if", "of shuffled pixels ''' if scale == 1: return image w, h ,c", "get the noise estimation cdf of each channel ---------- [Input] a multi-channel tensor", "(RF_tensor, nl_list) def normalize(a, len_v, min_v, max_v): ''' normalize the sequence of factors", ":] = image[wf:wf+wc, hf:hf+hc, :] hf = hf + hc wf = wf", "thre=0.8, chn=3): ''' Description: To find out the most frequent estimated noise level", "= 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor,", "(norm_a - float(min_v)) / float(max_v - min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set,", "''' Description: To return the refined maps after dilation and gaussian blur [Input]", "1) if band.size else temp mosaic = np.concatenate((mosaic, band), axis = 0) if", "the sequence of factors ''' norm_a = np.reshape(a, (len_v,1)) norm_a = (norm_a -", "0))) ref_lm_numpy = lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): nmap =", "0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3", "1: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal else: out_numpy", "= np.zeros((c, 1)) if s_or_m == 0: #single noise type if val ==", ",chn] noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre ] gauss = np.random.normal(0,", "+ up ) * 0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the", "in range(scale): temp = image[ws::scale, hs::scale, :] #get the sub-sampled image band =", "gaussian std prob_map = np.random.uniform( 0, 1, (w, h) ) #the prob map", "normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value noise_map[n, :, :, :] =", "0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map = np.zeros((1, 6, image.shape[0],", "#get a tensor from the input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the", "h)) noisy = noisy + noise_c return noisy #generate AWGN-RVIN noise together def", "elif mode == 1: # flip up and down out = np.flipud(out) elif", "one if ref_mode == 0 or ref_mode == 1 or ref_mode == 4", "= [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the", "def batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) #", "noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h, c)", "range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image, mode):", "1)) for n in range(lm_numpy.shape[0]): for c in range(chn): nl = np.amax(lm_numpy[n, :,", "denoise conditions noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map before", "a single normalized noise level value to a map [Input]: level: a scaler", "np.random.uniform(0.0, 1.0, (w, h)) noise_map = np.random.uniform(0.0, 1.0, (w, h)) noisy_chn = noisy[:", "noisy[: , :, chn] noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map <", "hf = 0 for ws in range(scale): hf = 0 for hs in", "= 0) if mosaic.size else band return mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0,", "generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c,", "Get the CDF thresholded value [Output] a refined map tensor with four channels", "for hs in range(scale): temp = image[ws::scale, hs::scale, :] #get the sub-sampled image", "subsampling, recombine it to a full image [Input]: Image [Return]: Recombine it using", "= 4, mark=''): ''' Description: Given a noisy image and the noise estimation", "= pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap =", "h) ) #the prob map noise_map = np.random.uniform( 0, 1, (w, h) )", "number of channels return noise_map #Add noise to the original images def generate_noisy(image,", "current images if fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc,", "flip out = np.rot90(out, k=2) out = np.flipud(out) elif mode == 6: #", "value of noise level [2] Gaussian smooth the noise level map to make", "for each channel, without normalization: only information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed", "noise level [Output] A noisy image ''' w, h, c = image.shape noisy", ",c = image.shape mosaic = np.array([]) for ws in range(scale): band = np.array([])", "lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined map for", "mode == 1: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal", "mix_thre ] + gauss[prob_map >= mix_thre] return noisy def generate_denoise(image, model, noise_level_list): '''", "mode Mode: [0] Get the most salient (the most frequent estimated noise level)", "and down out = np.flipud(out) elif mode == 2: # rotate counterwise 90", "elif mode == 3: # rotate 90 degree and flip up and down", "these two print(score) score_seq.append(score) if score <= thre: print('optimal scale is %d:' %", "changed pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and signal independent", "a mosaic image of subsampling, recombine it to a full image [Input]: Image", "[5] Get the CDF thresholded value [Output] a refined map tensor with four", "limit_set[0][1]) #normalize the level value noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw *", "Given an image, return a reversible sub-sampling [Input]: Image ndarray float [Return]: A", "F = H * dx pdf_list[n, c, :] = F #sio.savemat(mark + str(c)", "score_seq = [] Pre_CDF = None flag = 0 for pss in range(1,", "[Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot the middle vector score_seq: the matching", "for ws in range(scale): band = np.array([]) for hs in range(scale): temp =", "out the most frequent estimated noise level in the images ---------- [Input] a", "cdf of AWGN channel Compare the changes of the density function and decide", "''' w, h, c = image.shape #Some unused noise type: Poisson and Uniform", "numpy as np # from skimage.measure.simple_metrics import compare_psnr from torch.autograd import Variable import", "noisy #generate AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list): ''' Description: To generate mixed", "np.flipud(out) elif mode == 2: # rotate counterwise 90 degree out = np.rot90(out)", "use a single value for the map if ref_mode == 0 or ref_mode", "chn] noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ] elif", "(len_v,1)) norm_a = (norm_a - float(min_v)) / float(max_v - min_v) return norm_a def", "image patch ''' #input images ISource = np2ts(image) ISource = torch.clamp(ISource, 0., 1.)", "Mode: [0] Get the most salient (the most frequent estimated noise level) [1]", "= level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1, 1, h, w) return level_tensor", "= Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor,", "scal2map_spatial(level1, level2, h, w): stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2),", "2: x_ts = x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts = x.transpose(0, 3, 1,", "def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list = np.zeros((c, 1)) if s_or_m ==", "smooth the noise level map to make the regional estimation more smooth [3]", "together def generate_comp_noisy(image, noise_level_list): ''' Description: To generate mixed AWGN and RVIN noise", "1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]): for c in range(chn):", "mode == 0: # original out = out elif mode == 1: #", "0 for ws in range(scale): hf = 0 for hs in range(scale): temp", "level [Output] A noisy image ''' w, h, c = image.shape noisy =", "x_ts def np2ts_4d(x): x_ts = x.transpose(0, 3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return", "the two subsequent pdf opt_scale: the optimal scaling factor ''' if color ==", "estimation_model, color=1, thre = 0, plot_flag = 1, stopping = 4, mark=''): '''", "the images def pixelshuffle(image, scale): ''' Discription: Given an image, return a reversible", "it using different portions of pixels ''' w, h, c = image.shape real", "float image between [0,1] noise_level_list: AWGN and RVIN noise level [Output] A noisy", "out_numpy def temp_ps_4comb(Out, In): pass def np2ts(x, mode=0): #now assume the input only", "rotate counterwise 90 degree out = np.rot90(out) elif mode == 3: # rotate", "How to match these two print(score) score_seq.append(score) if score <= thre: print('optimal scale", "degree and flip up and down out = np.rot90(out) out = np.flipud(out) elif", "= image.shape mosaic = np.array([]) for ws in range(scale): band = np.array([]) for", "image, return a reversible sub-sampling [Input]: Image ndarray float [Return]: A mosic image", "data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0", "180 degree out = np.rot90(out, k=2) elif mode == 5: # rotate 180", "channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels with 0 or", "the average maximum value of the noise level [5] Get the CDF thresholded", "image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for each channel, without", "7: # rotate 270 degree and flip out = np.rot90(out, k=3) out =", "Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy, (3, 1, 1)) if mode", "Uniform #if noise_type == *: #vals = len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals))", "noise level for each channel, without normalization: only information of 3 channels [0]'AWGN'", "0,1,2,3 noise_level_list: pre-defined noise level for each channel, without normalization: only information of", "image: a float image between [0,1] noise_level_list: AWGN and RVIN noise level [Output]", "the noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] *", "a multi-channel tensor of noise map after zeroing out items ''' lm_numpy =", "between [0,1] noise_level_list: AWGN and RVIN noise level [Output] A noisy image '''", "(np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal else: out_numpy = (np.transpose(out_numpy, (1,", "images def pixelshuffle(image, scale): ''' Discription: Given an image, return a reversible sub-sampling", "degree out = np.rot90(out, k=2) elif mode == 5: # rotate 180 degree", "Replaces random pixels with 0 or 1. noise_level: ratio of the occupation of", "temp), axis = 1) if band.size else temp mosaic = np.concatenate((mosaic, band), axis", "val=0): noise_level_list = np.zeros((c, 1)) if s_or_m == 0: #single noise type if", "prob_map < noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ] elif noise_type ==", "w, h, c = image.shape noisy = image.copy() for chn in range(c): mix_thre", "image noise_s_map = np.multiply(sigma_s, image) #according to x or temp_x?? (according to clean", "get_smooth_maps(lm, dilk = 50, gsd = 10): ''' Description: To return the refined", "= real[ws::scale, hs::scale, :] wc, hc, cc = temp.shape #get the shpae of", "= x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark, chn=1): ''' Description: get the", "if selected_lm.shape[0] == 0: nl_list[n, c] = 0 else: hist = np.histogram(selected_lm, density=True)", "the noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] *", "pw, ph)) #total number of channels return noise_map #Add noise to the original", "1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or mode == 1: x_ts", "To refine the estimated noise level maps [Input] the noise map tensor, and", "mix_thre ] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre] return noisy", "ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor =", "0))) ref_lm_numpy = lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0:", "if rescale == 1: out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy def", "range(scale): band = np.array([]) for hs in range(scale): temp = image[ws::scale, hs::scale, :]", "== 1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if rescale == 1:", "= np.amax(lm_numpy[n, :, :, c]) nl_list[n, c] = nl return nl_list def get_smooth_maps(lm,", "generate noisy images of different types ---------- [Input] image : ndarray of float", "noise_s_map = np.multiply(sigma_s, image) #according to x or temp_x?? (according to clean image", "degree out = np.rot90(out, k=3) elif mode == 7: # rotate 270 degree", "c) #noisy = image + uni noisy = image.copy() if noise_type == 0:", "scale == 1: return image w, h ,c = image.shape mosaic = np.array([])", "h, c) #noisy = image + uni noisy = image.copy() if noise_type ==", "lb + up ) * 0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize", "channel which is ignored w, h, c= x.shape x_ts = x.transpose(2, 0, 1)", "multi-channel tensor of noise map [Output] a multi-channel tensor of noise map after", "# nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean, data_range):", "of the occupation of the changed pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin", "RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor", "= np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1: for chn in range(c): noise_level_list[chn] =", "if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0,", "2, 0))) * 255.0 * scal else: out_numpy = (np.transpose(out_numpy, (1, 2, 0)))", "gauss[prob_map >= mix_thre] return noisy def generate_denoise(image, model, noise_level_list): ''' Description: Generate Denoised", "mode): out = np.transpose(image, (1,2,0)) if mode == 0: # original out =", "PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out =", ":, chn] = np.random.normal(0, sigma_c[chn], (w, h)) noisy = noisy + noise_c return", "of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels with", "h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if mode == 0 or mode ==", "= np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s", "score for each channel ----------- [Input] F1, F2 [Output] score for each channel", "in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1: for chn in", "[Input]: Image ndarray float [Return]: A mosic image of shuffled pixels ''' if", "noise [1]'RVIN' Replaces random pixels with 0 or 1. noise_level: ratio of the", "np.amax(lm_numpy[n, :, :, c]) nl_list[n, c] = nl return nl_list def get_smooth_maps(lm, dilk", "dx = x[1]-x[0] F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind]", "nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn)", "nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode == 4: #half the estimation nl_list", "elif noise_type == 1: #MC-RVIN model for chn in range(c): #process each channel", "c)) #real image wf = 0 hf = 0 for ws in range(scale):", "the shpae of the current images if fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale,", "= ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2. nl_list[n, c] = nl return", "= np.concatenate((mosaic, band), axis = 0) if mosaic.size else band return mosaic def", "EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0] if flag != 0: score =", "2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre = 0., chn=3): '''", "factor ''' if color == 1: c = 3 elif color == 0:", "scaler noise level(0-1), h, w [Return]: a pytorch tensor of the cacatenated noise", "= np.rot90(out) out = np.flipud(out) elif mode == 4: # rotate 180 degree", "(1,1))).type(torch.FloatTensor) #make the noise level to a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1,", "noise level in the images ---------- [Input] a multi-channel tensor of noise map", "2. nl_list[n, c] = nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description:", "c)[0] if flag != 0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match", "blur images Res = model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.) out_numpy =", "nl_list def get_pdf_in_maps(lm, mark, chn=1): ''' Description: get the noise estimation cdf of", "noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h)) noisy = noisy +", "official code, here we use the original clean image x to compute the", "model, noise_level_list): ''' Description: Generate Denoised Blur Images ---------- [Input] image: model: noise_level_list:", "scipy.io as sio # import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot", "= 0 for ws in range(scale): hf = 0 for hs in range(scale):", "= 0 for pss in range(1, stopping+1): #scaling factor from 1 to the", "''' Description: To refine the estimated noise level maps [Input] the noise map", "np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n, c] =", "of NM tensor without changing the original one if ref_mode == 0 or", "#input images ISource = np2ts(image) ISource = torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True)", "] gauss = np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map", "= nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor =", "tensor with four channels ''' #RF_tensor = NM_tensor.clone() #get a clone version of", "hf + hc wf = wf + wc return real def scal2map(level, h,", "#total number of channels return noise_map #Add noise to the original images def", "lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0],", "information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels", "shuffled pixels ''' if scale == 1: return image w, h ,c =", "real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale, :] = image[wf:wf+wc,", "== 1: for chn in range(c): noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image, 0,", "gaussian blur [Input] a multi-channel tensor of noise map [Output] a multi-channel tensor", "noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating noise_map[0,", "[Input]: Image [Return]: Recombine it using different portions of pixels ''' w, h,", "nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm", "map tensor, and a refinement mode Mode: [0] Get the most salient (the", "1, limit_set[0][0], limit_set[0][1]) #normalize the level value noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list,", "Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res = model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.)", "and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale,", "the CDF thresholded value [Output] a refined map tensor with four channels '''", "#single noise type if val == 0: for chn in range(c): noise_level_list[chn] =", "each channel separately prob_map = np.random.uniform(0.0, 1.0, (w, h)) noise_map = np.random.uniform(0.0, 1.0,", "in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value noise_map[n,", "EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0] if", "0 for pss in range(1, stopping+1): #scaling factor from 1 to the limit", "= np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F", "out = np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0,", "make the regional estimation more smooth [3] Get the average maximum value of", "plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png') # plt.close() return pdf_list def get_pdf_matching_score(F1,", "for gaussian [Output] CDF function of each sample and each channel ''' lm_numpy", "temp = image[ws::scale, hs::scale, :] #get the sub-sampled image band = np.concatenate((band, temp),", "np.array([]) for ws in range(scale): band = np.array([]) for hs in range(scale): temp", "pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if mode ==", "of noise map [Output] a multi-channel tensor of refined noise map ''' kernel", "== 4: #half the estimation nl_list = nl_list - nl_list print(nl_list) elif ref_mode", "== 0: #MC-AWGN model gauss = np.zeros((w, h, c)) for chn in range(c):", "np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map before concatenating noise_map[0, :, :,", "2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy, (3, 1,", "image w, h ,c = image.shape mosaic = np.array([]) for ws in range(scale):", "import scipy.ndimage import scipy.io as sio # import matplotlib as mpl # mpl.use('Agg')", "level to a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1,", "flip out = np.rot90(out, k=3) out = np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out,", "pytorch tensor of the cacatenated noise level map ''' #get a tensor from", "mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy,", "chn) elif ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0],", "min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list = np.zeros((c, 1))", "in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x", "def normalize(a, len_v, min_v, max_v): ''' normalize the sequence of factors ''' norm_a", "the combinatin of signal-dependent and signal independent noise [Output] A noisy image '''", "----------- [Input] F1, F2 [Output] score for each channel ''' return np.mean((F1-F2)**2) def", "< noise_level_list[chn] ] elif noise_type == 2: #sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c", "range(c): mix_thre = noise_level_list[c+chn] #get the mix ratio of AWGN and RVIN gau_std", "mode == 2: x_ts = x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts = x.transpose(0,", "int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2), w) stdN_tensor = torch.cat([stdN_t1, stdN_t2], dim=2) return", "return (RF_tensor, nl_list) def normalize(a, len_v, min_v, max_v): ''' normalize the sequence of", "noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for each channel, without normalization: only information", "h, c)) for chn in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h)) noisy", "1.0, (w, h)) noise_map = np.random.uniform(0.0, 1.0, (w, h)) noisy_chn = noisy[: ,", "in range(c): mix_thre = noise_level_list[c+chn] #get the mix ratio of AWGN and RVIN", "Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2, 0)) return out_numpy #TODO: two pixel shuffle", "out elif mode == 1: # flip up and down out = np.flipud(out)", ">= mix_thre ] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre] return", "] + gauss[prob_map >= mix_thre] return noisy def generate_denoise(image, model, noise_level_list): ''' Description:", "score_seq) Pre_PDF = EPDF flag = 1 return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3):", "of float type: [0,1] just one image, current support gray or color image", "for the map if ref_mode == 0 or ref_mode == 4: nl_list =", "i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def", "(PSNR/Img.shape[0]) def data_augmentation(image, mode): out = np.transpose(image, (1,2,0)) if mode == 0: #", "chn in range(c): noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image, 0, noise_level_list /255.) return", "0, plot_flag = 1, stopping = 4, mark=''): ''' Description: Given a noisy", "1, (w, h) ) #the prob map noise_map = np.random.uniform( 0, 1, (w,", "or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if", "generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph): for chn in range(c): noise_level_list[chn]", "''' Description: To generate noisy images of different types ---------- [Input] image :", "- float(min_v)) / float(max_v - min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c,", "range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, normed=True) dx =", "= np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) /", "kernel, iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd)", "estimated noise level maps [Input] the noise map tensor, and a refinement mode", "noise together def generate_comp_noisy(image, noise_level_list): ''' Description: To generate mixed AWGN and RVIN", "= image.shape #Some unused noise type: Poisson and Uniform #if noise_type == *:", "1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre = 0., chn=3):", "(lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n, c] = 0", "chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating noise_map[0, :, :, :]", "as mpl # mpl.use('Agg') # import matplotlib.pyplot as plt def weights_init_kaiming(m): classname =", "(np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for n in", "torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre = 0., chn=3): ''' Description: To find", "refined maps after dilation and gaussian blur [Input] a multi-channel tensor of noise", "= image.copy() for chn in range(c): mix_thre = noise_level_list[c+chn] #get the mix ratio", "print(score) score_seq.append(score) if score <= thre: print('optimal scale is %d:' % (pss-1)) return", "+ gauss elif noise_type == 1: #MC-RVIN model for chn in range(c): #process", "recombine it to a full image [Input]: Image [Return]: Recombine it using different", "== 0: # original out = out elif mode == 1: # flip", "c in range(chn): nl = np.amax(lm_numpy[n, :, :, c]) nl_list[n, c] = nl", "and RVIN gau_std = noise_level_list[chn] #get the gaussian std prob_map = np.random.uniform( 0,", "changes of the density function and decide the optimal scaling factor ------------ [Input]", "= fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :] hf", "noise level(0-1), h, w [Return]: a pytorch tensor of the cacatenated noise level", "each channel ----------- [Input] F1, F2 [Output] score for each channel ''' return", "range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)", ":] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor", "noise_map #Add noise to the original images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40):", "variance to shift the normal distribution noisy = image + noise_s #add signal_independent", "#the prob map noise_map = np.random.uniform( 0, 1, (w, h) ) #the noisy", "c] = 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def", "a full image [Input]: Image [Return]: Recombine it using different portions of pixels", "chn,1)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2],", "import math import torch import torch.nn as nn import numpy as np #", "value noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph))", "elif mode == 5: # rotate 180 degree and flip out = np.rot90(out,", "/ float(vals) #if noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni =", "pdf and cdf of AWGN channel Compare the changes of the density function", "''' if color == 1: c = 3 elif color == 0: c", "sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape", "Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy", "min_v=0., max_v=255.): ''' Change a single normalized noise level value to a map", "6, image.shape[0], image.shape[1])) #initialize the noise map before concatenating noise_map[0, :, :, :]", "image) #according to x or temp_x?? (according to clean image or irradience) #print(noise_s_map)", "[0,1] noise_level_list: AWGN and RVIN noise level [Output] A noisy image ''' w,", "h, c)) for chn in range(3): noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn],", "of the density function and decide the optimal scaling factor ------------ [Input] noisy_image,", "tensor from the input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level", "#a refined map for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] =", "elif ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5: nl_list", "overall matching score for each channel ----------- [Input] F1, F2 [Output] score for", "''' Change a single normalized noise level value to a map [Input]: level:", "of factors ''' norm_a = np.reshape(a, (len_v,1)) norm_a = (norm_a - float(min_v)) /", "Pre_CDF = None flag = 0 for pss in range(1, stopping+1): #scaling factor", "and flip out = np.rot90(out, k=2) out = np.flipud(out) elif mode == 6:", "= np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description:", "= cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In): pass def np2ts(x,", "conditions noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map before concatenating", "c = 1 score_seq = [] Pre_CDF = None flag = 0 for", "chn) if ref_mode == 4: #half the estimation nl_list = nl_list - nl_list", "to process the images def pixelshuffle(image, scale): ''' Discription: Given an image, return", "= np.array([]) for hs in range(scale): temp = image[ws::scale, hs::scale, :] #get the", "two pixel shuffle functions to process the images def pixelshuffle(image, scale): ''' Discription:", "up and down out = np.flipud(out) elif mode == 2: # rotate counterwise", "0]): if mode == 0 or mode == 1 or mode==3: out_numpy =", "def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag = 1, stopping = 4,", "image.shape[0], image.shape[1])) #initialize the noise map before concatenating noise_map[0, :, :, :] =", "is %d:' % (pss-1)) return (pss-1, score_seq) Pre_PDF = EPDF flag = 1", "(w, h)) noisy_chn = noisy[: , :, chn] noisy_chn[ prob_map < noise_level_list[chn] ]", "each channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))", "Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To refine the estimated", "factor from 1 to the limit noisy_image = pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image,", "the refined maps after dilation and gaussian blur [Input] a multi-channel tensor of", "subsequent pdf opt_scale: the optimal scaling factor ''' if color == 1: c", "= get_max_noise_in_maps(NM_tensor) nl_list = ( lb + up ) * 0.5 noise_map =", "(3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s", "map before concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]),", "0.0) # def batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean =", "real[ws::scale, hs::scale, :] wc, hc, cc = temp.shape #get the shpae of the", "NM_tensor.size()[3])) #initialize the noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(nl_list,", "in range(scale): temp = real[ws::scale, hs::scale, :] wc, hc, cc = temp.shape #get", "two sets of CDF, get the overall matching score for each channel -----------", "the density function and decide the optimal scaling factor ------------ [Input] noisy_image, estimation_model,", "0) if mosaic.size else band return mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]):", "== 0 or ref_mode == 1 or ref_mode == 4 or ref_mode==5: #if", "lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1))", "Generate Denoised Blur Images ---------- [Input] image: model: noise_level_list: [Output] A blur image", "image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate", "np.random.poisson(image * vals) / float(vals) #if noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h,", "score sequence between the two subsequent pdf opt_scale: the optimal scaling factor '''", "torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level to a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1),", "if ref_mode == 0 or ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn)", "= np.zeros((w, h, c)) for chn in range(3): noise_c [:, :, chn] =", "pss) INoisy = np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0.,", "noise level) [1] Get the maximum value of noise level [2] Gaussian smooth", "0, 1, (w, h) ) #the noisy map noisy_chn = noisy[: ,: ,chn]", "= image.shape noisy = image.copy() for chn in range(c): mix_thre = noise_level_list[c+chn] #get", "sigma_s=20, sigma_c=40): ''' Description: To generate noisy images of different types ---------- [Input]", "= image + uni noisy = image.copy() if noise_type == 0: #MC-AWGN model", "in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm =", "#input denoise conditions noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map", "pixelshuffle(image, scale): ''' Discription: Given an image, return a reversible sub-sampling [Input]: Image", "scipy.ndimage import scipy.io as sio # import matplotlib as mpl # mpl.use('Agg') #", "= noise_level_list[c+chn] #get the mix ratio of AWGN and RVIN gau_std = noise_level_list[chn]", "for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if", "= 0, plot_flag = 1, stopping = 4, mark=''): ''' Description: Given a", "torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2, 0)) return", "= 10): ''' Description: To return the refined maps after dilation and gaussian", "else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5)", "''' Discription: Given a mosaic image of subsampling, recombine it to a full", "cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In): pass def np2ts(x, mode=0):", "Description: Given two sets of CDF, get the overall matching score for each", "== 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy, (3,", "float type: [0,1] just one image, current support gray or color image input", "== 0 or mode == 1: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) *", "[1] Get the maximum value of noise level [2] Gaussian smooth the noise", "ref_mode == 4: #half the estimation nl_list = nl_list - nl_list print(nl_list) elif", "nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark, chn=1): ''' Description:", "a multi-channel tensor of noise map [Output] A list of noise level value", "== 2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif ref_mode == 3: lb =", "noise_map = np.random.uniform( 0, 1, (w, h) ) #the noisy map noisy_chn =", "level_tensor.repeat(1, 1, h, w) return level_tensor def scal2map_spatial(level1, level2, h, w): stdN_t1 =", ") #the prob map noise_map = np.random.uniform( 0, 1, (w, h) ) #the", "s_or_m, limit_set, c, val=0): noise_level_list = np.zeros((c, 1)) if s_or_m == 0: #single", "elif color == 0: c = 1 score_seq = [] Pre_CDF = None", "= np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor =", "score <= thre: print('optimal scale is %d:' % (pss-1)) return (pss-1, score_seq) Pre_PDF", "[Return]: a pytorch tensor of the cacatenated noise level map ''' #get a", "in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] ==", "nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the", "image.copy() for chn in range(c): mix_thre = noise_level_list[c+chn] #get the mix ratio of", "= EPDF flag = 1 return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description:", "original out = out elif mode == 1: # flip up and down", "variance noise_s = np.random.randn(w, h, c) * noise_s_map #use the new variance to", "find out the most frequent estimated noise level in the images ---------- [Input]", "for c in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap, kernel,", "(chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def", "np.zeros((lm_numpy.shape[0], chn, 10)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm =", "= np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor to [1,1,c] to multiply", "[0,1] just one image, current support gray or color image input (w,h,c) noise_type:", "ref_lm_numpy = lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): nmap = lm_numpy[:,", "lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list = ( lb + up )", "of noise level value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2,", "= np.flipud(out) elif mode == 2: # rotate counterwise 90 degree out =", "nl_list[n, c] = 0 else: hist = np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind)", "h, c = image.shape #Some unused noise type: Poisson and Uniform #if noise_type", "the regional estimation more smooth [3] Get the average maximum value of the", "noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To generate noisy images of different types", "of CDF, get the overall matching score for each channel ----------- [Input] F1,", "to the limit noisy_image = pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color) INoisy =", ":, :, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total number", "the changes of the density function and decide the optimal scaling factor ------------", "for chn in range(c): #process each channel separately prob_map = np.random.uniform(0.0, 1.0, (w,", "scaling factor ''' if color == 1: c = 3 elif color ==", ") #the noisy map noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre ]", "images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To generate noisy images", "4 or ref_mode==5: #if we use a single value for the map if", "refill_img, refill_ind) if rescale == 1: out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return", "level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level to a map level_tensor =", "out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if rescale == 1: out_numpy =", "in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image,", "chn=3): ''' Description: To find out the maximum level of noise level in", "% (pss-1)) return (pss-1, score_seq) Pre_PDF = EPDF flag = 1 return (stopping,", "lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined map", "= torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor =", "pass def np2ts(x, mode=0): #now assume the input only has one channel which", "nl_list[n, c] = nl return nl_list def get_smooth_maps(lm, dilk = 50, gsd =", "RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif", "return pdf_list def get_pdf_matching_score(F1, F2): ''' Description: Given two sets of CDF, get", "[Input] image : ndarray of float type: [0,1] just one image, current support", "[sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the sigma", "sub-sampling [Input]: Image ndarray float [Return]: A mosic image of shuffled pixels '''", "np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c]", "multiply with the image noise_s_map = np.multiply(sigma_s, image) #according to x or temp_x??", "optimal scaling factor ''' if color == 1: c = 3 elif color", "[Output] a multi-channel tensor of refined noise map ''' kernel = np.ones((dilk, dilk))", "= np.multiply(sigma_s, image) #according to x or temp_x?? (according to clean image or", "dimension chn: the channel number for gaussian [Output] CDF function of each sample", "concatenating noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1]))", "type: Poisson and Uniform #if noise_type == *: #vals = len(np.unique(image)) #vals =", "the occupation of the changed pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin of", ":, c] = nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy)", "for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm,", "np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True)", "the matching score sequence between the two subsequent pdf opt_scale: the optimal scaling", "def generate_denoise(image, model, noise_level_list): ''' Description: Generate Denoised Blur Images ---------- [Input] image:", "get_max_noise_in_maps(lm, chn=3): ''' Description: To find out the maximum level of noise level", "generate_comp_noisy(image, noise_level_list): ''' Description: To generate mixed AWGN and RVIN noise together ----------", "#generate blur images Res = model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.) out_numpy", "# rotate 270 degree out = np.rot90(out, k=3) elif mode == 7: #", "c)) for chn in range(3): noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w,", "= 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) / float(vals) #if noise_type", "np.reshape(a, (len_v,1)) norm_a = (norm_a - float(min_v)) / float(max_v - min_v) return norm_a", "== 1: out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In):", "signal-dependent and signal independent noise [Output] A noisy image ''' w, h, c", "np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only Keep one channel and", "refill=0, refill_img=0, refill_ind=[0, 0]): if mode == 0 or mode == 1 or", "= np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h, c) #noisy = image +", "noise_c = np.zeros((w, h, c)) for chn in range(3): noise_c [:, :, chn]", "h) ) #the noisy map noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre", "in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor =", "refined map for c in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c] nmap_dilation =", "different from the official code, here we use the original clean image x", "Get the maximum value of noise level [2] Gaussian smooth the noise level", "np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) / float(vals) #if noise_type == *: #uni", "# plt.close() return pdf_list def get_pdf_matching_score(F1, F2): ''' Description: Given two sets of", "ind=[0,0]): ''' Discription: Given a mosaic image of subsampling, recombine it to a", "out = np.rot90(out, k=2) elif mode == 5: # rotate 180 degree and", "mode='fan_in') elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0)", "channel ----------- [Input] F1, F2 [Output] score for each channel ''' return np.mean((F1-F2)**2)", "def temp_ps_4comb(Out, In): pass def np2ts(x, mode=0): #now assume the input only has", "normed=True) dx = x[1]-x[0] F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c] =", "different types ---------- [Input] image : ndarray of float type: [0,1] just one", "== 1: # flip up and down out = np.flipud(out) elif mode ==", "c] = x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark, chn=1): ''' Description: get", "= 50, gsd = 10): ''' Description: To return the refined maps after", "in range(scale): hf = 0 for hs in range(scale): temp = real[ws::scale, hs::scale,", "one channel and zero out other channels [Input] a multi-channel tensor of noise", "+ noise_c return noisy #generate AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list): ''' Description:", "torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res = model(ISource, NM_tensor) Out =", "nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To find out the most frequent", "0))) if ps == 1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if", "noise_type == *: #vals = len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy =", "noisy_chn = noisy[: , :, chn] noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[", "factor to [1,1,c] to multiply with the image noise_s_map = np.multiply(sigma_s, image) #according", "noise_type == 0: #MC-AWGN model gauss = np.zeros((w, h, c)) for chn in", "most frequent estimated noise level) [1] Get the maximum value of noise level", "map [Output] A list of noise level value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy", "hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc,", "= lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1))", "refine the estimated noise level maps [Input] the noise map tensor, and a", "or mode == 1: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0 *", "return noisy #generate AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list): ''' Description: To generate", "as plt def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0,", "mode == 0 or mode == 1: out_numpy = (np.transpose(out_numpy, (1, 2, 0)))", "hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :] hf = hf + hc wf =", "mark + str(pss), c)[0] if flag != 0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO:", "nl_list - nl_list print(nl_list) elif ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif", "[0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels with 0 or 1.", "chn, 1)) for n in range(lm_numpy.shape[0]): for c in range(chn): nl = np.amax(lm_numpy[n,", ":, chn] noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ]", "#add signal_independent noise to L noise_c = np.zeros((w, h, c)) for chn in", "-1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm')", "out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In): pass def", "the original one if ref_mode == 0 or ref_mode == 1 or ref_mode", "n, noise_level_list, limit_set, c, pn, pw, ph): for chn in range(c): noise_level_list[chn] =", "noise_s = np.random.randn(w, h, c) * noise_s_map #use the new variance to shift", "band = np.array([]) for hs in range(scale): temp = image[ws::scale, hs::scale, :] #get", "''' Description: get the noise estimation cdf of each channel ---------- [Input] a", "5) elif ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list =", "= np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm =", "+ uni noisy = image.copy() if noise_type == 0: #MC-AWGN model gauss =", "before concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn,", "generate_denoise(image, model, noise_level_list): ''' Description: Generate Denoised Blur Images ---------- [Input] image: model:", "1 or ref_mode == 4 or ref_mode==5: #if we use a single value", "image.copy() if noise_type == 0: #MC-AWGN model gauss = np.zeros((w, h, c)) for", ":, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm,", "= get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode == 4: #half the estimation nl_list =", ") / 2. nl_list[n, c] = nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3):", "RVIN gau_std = noise_level_list[chn] #get the gaussian std prob_map = np.random.uniform( 0, 1,", "5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize", "Change a single normalized noise level value to a map [Input]: level: a", "(noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph): for chn", "[Output] A noisy image ''' w, h, c = image.shape noisy = image.copy()", "distribution noisy = image + noise_s #add signal_independent noise to L noise_c =", "= 1, stopping = 4, mark=''): ''' Description: Given a noisy image and", "hs in range(scale): temp = real[ws::scale, hs::scale, :] wc, hc, cc = temp.shape", "+ hc wf = wf + wc return real def scal2map(level, h, w,", "= model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy =", "noise_type == 2: #sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,))", "iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor", "image + uni noisy = image.copy() if noise_type == 0: #MC-AWGN model gauss", "float [Return]: A mosic image of shuffled pixels ''' if scale == 1:", "scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2), w) stdN_tensor = torch.cat([stdN_t1, stdN_t2], dim=2)", "''' Description: To find out the most frequent estimated noise level in the", "x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or mode == 1: x_ts =", "x.transpose(0, 3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre =", "torch.cuda.is_available() and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode", "# rotate 180 degree out = np.rot90(out, k=2) elif mode == 5: #", "= 0 for hs in range(scale): temp = real[ws::scale, hs::scale, :] wc, hc,", "(0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]): for", "noisy_image = pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap", "real def scal2map(level, h, w, min_v=0., max_v=255.): ''' Change a single normalized noise", "noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]),", "elif val == 1: for chn in range(c): noise_level_list[chn] = 35 noisy_img =", "w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if mode == 0 or mode", "mosaic.size else band return mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription:", "noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre ] gauss", "sets of CDF, get the overall matching score for each channel ----------- [Input]", "together ---------- [Input] image: a float image between [0,1] noise_level_list: AWGN and RVIN", "ph)) #total number of channels return noise_map #Add noise to the original images", "Description: To find out the maximum level of noise level in the images", "out_numpy = (np.transpose(out_numpy, (1, 2, 0))) if ps == 1: out_numpy = reverse_pixelshuffle(out_numpy,", "< mix_thre ] gauss = np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >= mix_thre ]", "''' w, h, c = image.shape real = np.zeros((w, h, c)) #real image", "35 noisy_img = generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n,", ":, :, c]) nl_list[n, c] = nl return nl_list def get_smooth_maps(lm, dilk =", "A noisy image ''' w, h, c = image.shape #Some unused noise type:", "the image\\\\ using pixel-shuffle methods, and estimate the pdf and cdf of AWGN", "the noise level map to make the regional estimation more smooth [3] Get", "get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To find out the most frequent estimated noise", "float(max_v - min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list =", "of refined noise map ''' kernel = np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy", "x.shape x_ts = x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0", "of signal-dependent and signal independent noise [Output] A noisy image ''' w, h,", "pw, ph): for chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize", "return (pss-1, score_seq) Pre_PDF = EPDF flag = 1 return (stopping, score_seq) def", "(w, h) ) #the prob map noise_map = np.random.uniform( 0, 1, (w, h)", "classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0,", "GP noise approximator, the combinatin of signal-dependent and signal independent noise [Output] A", "model gauss = np.zeros((w, h, c)) for chn in range(c): gauss[:,:,chn] = np.random.normal(0,", "= nl return nl_list def get_smooth_maps(lm, dilk = 50, gsd = 10): '''", "1)) H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0] F =", "concatenating noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3]))", "(w, h)) noise_map = np.random.uniform(0.0, 1.0, (w, h)) noisy_chn = noisy[: , :,", "and decide the optimal scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output]", "1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n, c] = 0 else:", "support gray or color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level", "Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap, mark + str(pss),", "''' normalize the sequence of factors ''' norm_a = np.reshape(a, (len_v,1)) norm_a =", "noise_type == 1: #MC-RVIN model for chn in range(c): #process each channel separately", "(pss-1)) return (pss-1, score_seq) Pre_PDF = EPDF flag = 1 return (stopping, score_seq)", "Res = model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy", "= (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n in", "ps == 1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if rescale ==", "return a reversible sub-sampling [Input]: Image ndarray float [Return]: A mosic image of", "+ wc return real def scal2map(level, h, w, min_v=0., max_v=255.): ''' Change a", "shift the normal distribution noisy = image + noise_s #add signal_independent noise to", "h)) noisy = image + gauss elif noise_type == 1: #MC-RVIN model for", "#initialize the noise map before concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n],", "compare_psnr from torch.autograd import Variable import cv2 import scipy.ndimage import scipy.io as sio", "if torch.cuda.is_available() and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif", "c]) nl_list[n, c] = nl return nl_list def get_smooth_maps(lm, dilk = 50, gsd", "RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a, len_v, min_v,", "gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only Keep", "h, w): stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2), w) stdN_tensor", "''' #input images ISource = np2ts(image) ISource = torch.clamp(ISource, 0., 1.) ISource =", "= np.zeros((lm_numpy.shape[0], chn, 1)) for n in range(lm_numpy.shape[0]): for c in range(chn): nl", "noise to the original images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description:", "] = noise_map[prob_map < mix_thre ] gauss = np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map", "noisy image and the noise estimation model, keep multiscaling the image\\\\ using pixel-shuffle", "image or irradience) #print(noise_s_map) # different from the official code, here we use", "wf = wf + wc return real def scal2map(level, h, w, min_v=0., max_v=255.):", "out = np.flipud(out) elif mode == 2: # rotate counterwise 90 degree out", "= out elif mode == 1: # flip up and down out =", "torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0] if flag !=", "# rotate 270 degree and flip out = np.rot90(out, k=3) out = np.flipud(out)", "out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy", "#generate AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list): ''' Description: To generate mixed AWGN", "noisy = image + gauss elif noise_type == 1: #MC-RVIN model for chn", "# original out = out elif mode == 1: # flip up and", "x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts = x.transpose(0, 3, 1, 2) x_ts =", "1. noise_level: ratio of the occupation of the changed pixels [2]'Gaussian-Poisson' GP noise", "= np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0] F = H * dx", "return real def scal2map(level, h, w, min_v=0., max_v=255.): ''' Change a single normalized", "to the original images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To", "1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999,", "noisy_image, estimation_model, plot_flag, stopping [Output] plot the middle vector score_seq: the matching score", "270 degree and flip out = np.rot90(out, k=3) out = np.flipud(out) return np.transpose(out,", "level(0-1), h, w [Return]: a pytorch tensor of the cacatenated noise level map", "k=2) elif mode == 5: # rotate 180 degree and flip out =", "refill_ind) if rescale == 1: out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy", "sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor", "= 3 elif color == 0: c = 1 score_seq = [] Pre_CDF", "chn=3,cFlag=False): ''' Description: To refine the estimated noise level maps [Input] the noise", "the most salient (the most frequent estimated noise level) [1] Get the maximum", "one image, current support gray or color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list:", "it to a full image [Input]: Image [Return]: Recombine it using different portions", "model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy,", "out = out elif mode == 1: # flip up and down out", "img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for i in", "NM_tensor.clone() #get a clone version of NM tensor without changing the original one", "a float image between [0,1] noise_level_list: AWGN and RVIN noise level [Output] A", "[1,1,c] to multiply with the image noise_s_map = np.multiply(sigma_s, image) #according to x", "mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: #", "= np.rot90(out, k=3) elif mode == 7: # rotate 270 degree and flip", "each channel, without normalization: only information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive", "cdf of each channel ---------- [Input] a multi-channel tensor of noise map and", "generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To generate noisy images of different", "thre = 0, plot_flag = 1, stopping = 4, mark=''): ''' Description: Given", "out = np.rot90(out, k=3) elif mode == 7: # rotate 270 degree and", "= nl_list - nl_list print(nl_list) elif ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor, chn)", "mark=''): ''' Description: Given a noisy image and the noise estimation model, keep", "3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for n in range(lm_numpy.shape[0]): for c", "return image w, h ,c = image.shape mosaic = np.array([]) for ws in", "get_salient_noise_in_maps(lm, thre = 0., chn=3): ''' Description: To find out the most frequent", "np2ts(x, mode=0): #now assume the input only has one channel which is ignored", "== 3: lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list = ( lb +", "noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total", "== 3: # rotate 90 degree and flip up and down out =", ":, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor =", "if ps == 1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if rescale", "== 2: # rotate counterwise 90 degree out = np.rot90(out) elif mode ==", "for i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0])", "level value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1)))", "vals) / float(vals) #if noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni", "#get the shpae of the current images if fill==1 and ws==ind[0] and hs==ind[1]:", "pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for n in range(lm_numpy.shape[0]): for c in range(chn):", "RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To refine the estimated noise level", "noise level to a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor =", "np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0]", "return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag = 1, stopping", "two subsequent pdf opt_scale: the optimal scaling factor ''' if color == 1:", "maximum value of noise level [2] Gaussian smooth the noise level map to", "cv2 import scipy.ndimage import scipy.io as sio # import matplotlib as mpl #", "[:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h)) noisy = noisy + noise_c", "uni noisy = image.copy() if noise_type == 0: #MC-AWGN model gauss = np.zeros((w,", "else: hist = np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl =", "#uni = np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h, c) #noisy = image", "noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1: for chn in range(c): noise_level_list[chn]", "in range(lm_numpy.shape[0]): for c in range(chn): nl = np.amax(lm_numpy[n, :, :, c]) nl_list[n,", "Pre_PDF) #TODO: How to match these two print(score) score_seq.append(score) if score <= thre:", "c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, normed=True)", "= torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2, 0))", "as sio # import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot as", "get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list = ( lb + up ) * 0.5", "#get the gaussian std prob_map = np.random.uniform( 0, 1, (w, h) ) #the", "= np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only Keep one channel", "to make the regional estimation more smooth [3] Get the average maximum value", "== 1: x_ts = x_ts.unsqueeze(0) elif mode == 2: x_ts = x_ts.unsqueeze(1) return", "level in the images ---------- [Input] a multi-channel tensor of noise map [Output]", "np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total number of channels return noise_map", "type: [0,1] just one image, current support gray or color image input (w,h,c)", "< noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ] elif noise_type == 2:", "nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:, :, c]", "image[wf:wf+wc, hf:hf+hc, :] hf = hf + hc wf = wf + wc", "original images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To generate noisy", "range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value noise_map[n, :,", "6: # rotate 270 degree out = np.rot90(out, k=3) elif mode == 7:", "pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and signal independent noise", "= noisy + noise_c return noisy #generate AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list):", "tensor of noise map [Output] a multi-channel tensor of refined noise map '''", "multi-channel tensor of noise map and channel dimension chn: the channel number for", "F2): ''' Description: Given two sets of CDF, get the overall matching score", "noise map after zeroing out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy,", "#plt.savefig(mark + str(c) + '.png') # plt.close() return pdf_list def get_pdf_matching_score(F1, F2): '''", "= np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating for n", "is ignored w, h, c= x.shape x_ts = x.transpose(2, 0, 1) x_ts =", "noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ] elif noise_type == 2: #sigma_s", "[Input] F1, F2 [Output] score for each channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image,", "RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor) up", "== 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3]))", "= np.zeros((w, h, c)) for chn in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w,", "value to a map [Input]: level: a scaler noise level(0-1), h, w [Return]:", "math import torch import torch.nn as nn import numpy as np # from", "''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list =", "bins=10, normed=True) dx = x[1]-x[0] F = H * dx pdf_list[n, c, :]", "the original clean image x to compute the variance noise_s = np.random.randn(w, h,", "Compare the changes of the density function and decide the optimal scaling factor", "the sigma factor to [1,1,c] to multiply with the image noise_s_map = np.multiply(sigma_s,", "reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given a mosaic image of subsampling,", "to a full image [Input]: Image [Return]: Recombine it using different portions of", "== 2: x_ts = x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts = x.transpose(0, 3,", "density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind] + hist[1][nl_ind+1]", "chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1: for chn", "1)) H, x = np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F = np.cumsum(H)*dx F_ind", "dx = x[1]-x[0] F = H * dx pdf_list[n, c, :] = F", "10): ''' Description: To return the refined maps after dilation and gaussian blur", "channel dimension chn: the channel number for gaussian [Output] CDF function of each", "axis = 0) if mosaic.size else band return mosaic def reverse_pixelshuffle(image, scale, fill=0,", "x_ts = x.transpose(0, 3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm,", "torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map = np.zeros((1, 6,", "a single value for the map if ref_mode == 0 or ref_mode ==", "''' #RF_tensor = NM_tensor.clone() #get a clone version of NM tensor without changing", "range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x =", "Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only Keep one channel and zero out other", "noise_c return noisy #generate AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list): ''' Description: To", "pw * ph), (c, pw, ph)) #total number of channels return noise_map #Add", "wf + wc return real def scal2map(level, h, w, min_v=0., max_v=255.): ''' Change", "+= compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out = np.transpose(image,", "thre: print('optimal scale is %d:' % (pss-1)) return (pss-1, score_seq) Pre_PDF = EPDF", "= 0 hf = 0 for ws in range(scale): hf = 0 for", "the estimated noise level maps [Input] the noise map tensor, and a refinement", "image of subsampling, recombine it to a full image [Input]: Image [Return]: Recombine", "other channels [Input] a multi-channel tensor of noise map [Output] a multi-channel tensor", "a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data,", "noise level [2] Gaussian smooth the noise level map to make the regional", "#Add noise to the original images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): '''", "to x or temp_x?? (according to clean image or irradience) #print(noise_s_map) # different", ":] wc, hc, cc = temp.shape #get the shpae of the current images", "nl return nl_list def get_smooth_maps(lm, dilk = 50, gsd = 10): ''' Description:", "= img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for i", "of AWGN and RVIN gau_std = noise_level_list[chn] #get the gaussian std prob_map =", "refill_img=0, refill_ind=[0, 0]): if mode == 0 or mode == 1 or mode==3:", "elif noise_type == 2: #sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06,", "1: out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In): pass", "def generate_comp_noisy(image, noise_level_list): ''' Description: To generate mixed AWGN and RVIN noise together", "NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res = model(ISource, NM_tensor)", "functions to process the images def pixelshuffle(image, scale): ''' Discription: Given an image,", "reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if rescale == 1: out_numpy = cv2.resize(out_numpy, (h,", "RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only Keep one channel and zero", "temp = real[ws::scale, hs::scale, :] wc, hc, cc = temp.shape #get the shpae", "the optimal scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot the", "hf:hf+hc, :] hf = hf + hc wf = wf + wc return", "decide the optimal scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot", "* scal else: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) if ps == 1:", "for chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1: for", "refined map tensor with four channels ''' #RF_tensor = NM_tensor.clone() #get a clone", "return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list = np.zeros((c, 1)) if", "c in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1)", "range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :,", "from torch.autograd import Variable import cv2 import scipy.ndimage import scipy.io as sio #", "c = image.shape #Some unused noise type: Poisson and Uniform #if noise_type ==", "color == 1: c = 3 elif color == 0: c = 1", "c)) #uni = uni.reshape(w, h, c) #noisy = image + uni noisy =", "= np.concatenate((band, temp), axis = 1) if band.size else temp mosaic = np.concatenate((mosaic,", "random pixels with 0 or 1. noise_level: ratio of the occupation of the", "2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor)", "= np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy", "get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two print(score) score_seq.append(score) if score <=", "or ref_mode==5: #if we use a single value for the map if ref_mode", "a multi-channel tensor of noise map and channel dimension chn: the channel number", "noise level map ''' #get a tensor from the input level level_tensor =", "#print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2. nl_list[n, c]", "noisy = image.copy() for chn in range(c): mix_thre = noise_level_list[c+chn] #get the mix", "of each channel ---------- [Input] a multi-channel tensor of noise map and channel", "m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32)", "not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode == 2:", "mix_thre] return noisy def generate_denoise(image, model, noise_level_list): ''' Description: Generate Denoised Blur Images", "or mode == 1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode == 2:", "c, val=0): noise_level_list = np.zeros((c, 1)) if s_or_m == 0: #single noise type", "noisy_chn[ prob_map < noise_level_list[chn] ] = noise_map[ prob_map < noise_level_list[chn] ] elif noise_type", "else: real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :] hf = hf + hc", "noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map before concatenating noise_map[0,", "= get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two print(score) score_seq.append(score) if score", ",: ,chn] noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre ] gauss =", "else band return mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given", "(w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for each channel, without normalization: only", "shpae of the current images if fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale,", "plt def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')", "= 0., chn=3): ''' Description: To find out the most frequent estimated noise", "Description: To find out the most frequent estimated noise level in the images", "map noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre ] = noise_map[prob_map <", "estimation more smooth [3] Get the average maximum value of the noise level", "k=3) elif mode == 7: # rotate 270 degree and flip out =", "= np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return nl_list def", "= torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0] if flag", "NM tensor without changing the original one if ref_mode == 0 or ref_mode", "= image[ws::scale, hs::scale, :] #get the sub-sampled image band = np.concatenate((band, temp), axis", "the images ---------- [Input] a multi-channel tensor of noise map [Output] A list", "out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] ==", "noise estimation model, keep multiscaling the image\\\\ using pixel-shuffle methods, and estimate the", "!= -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif", "the image noise_s_map = np.multiply(sigma_s, image) #according to x or temp_x?? (according to", "image band = np.concatenate((band, temp), axis = 1) if band.size else temp mosaic", "the pdf and cdf of AWGN channel Compare the changes of the density", "refined map for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0.", "-1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean,", "most salient (the most frequent estimated noise level) [1] Get the maximum value", "np.random.normal(0, sigma_c[chn], (w, h)) noisy = noisy + noise_c return noisy #generate AWGN-RVIN", "#sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s =", "chn: the channel number for gaussian [Output] CDF function of each sample and", "+ hist[1][nl_ind+1] ) / 2. nl_list[n, c] = nl return nl_list def get_cdf_noise_in_maps(lm,", "== *: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h, c) #noisy", "= np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind]", "''' kernel = np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2,", "generate mixed AWGN and RVIN noise together ---------- [Input] image: a float image", "rescale == 1: out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out,", "return nl_list def get_smooth_maps(lm, dilk = 50, gsd = 10): ''' Description: To", "noise map [Output] a multi-channel tensor of refined noise map ''' kernel =", "ref_mode == 4 or ref_mode==5: #if we use a single value for the", "and Uniform #if noise_type == *: #vals = len(np.unique(image)) #vals = 2 **", "get_smooth_maps(NM_tensor, 10, 5) elif ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor)", "out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy, (3, 1, 1)) if mode == 0", "between the two subsequent pdf opt_scale: the optimal scaling factor ''' if color", "to multiply with the image noise_s_map = np.multiply(sigma_s, image) #according to x or", "(stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description: To find out the maximum level", "0 # for i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) #", "clone version of NM tensor without changing the original one if ref_mode ==", "and hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale, :]", "scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if mode == 0", "counterwise 90 degree out = np.rot90(out) elif mode == 3: # rotate 90", "or mode == 1: x_ts = x_ts.unsqueeze(0) elif mode == 2: x_ts =", ":, c] = 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor", "(w, h)) noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map", "nl_list = nl_list - nl_list print(nl_list) elif ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor,", "/ 2. nl_list[n, c] = nl return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): '''", "= wf + wc return real def scal2map(level, h, w, min_v=0., max_v=255.): '''", "RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only Keep one", "return out_numpy def temp_ps_4comb(Out, In): pass def np2ts(x, mode=0): #now assume the input", "np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0] F = H * dx pdf_list[n,", "ratio of AWGN and RVIN gau_std = noise_level_list[chn] #get the gaussian std prob_map", "image.shape[1])) #initialize the noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list,", "= lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined", "and channel dimension chn: the channel number for gaussian [Output] CDF function of", "= np.rot90(out) elif mode == 3: # rotate 90 degree and flip up", "w [Return]: a pytorch tensor of the cacatenated noise level map ''' #get", "additive noise [1]'RVIN' Replaces random pixels with 0 or 1. noise_level: ratio of", "and zero out other channels [Input] a multi-channel tensor of noise map [Output]", "out = np.flipud(out) elif mode == 4: # rotate 180 degree out =", "CDF function of each sample and each channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy", "= np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available()", "1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean, data_range): # Img", "mixed AWGN and RVIN noise together ---------- [Input] image: a float image between", "#if we use a single value for the map if ref_mode == 0", "classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in')", "h)) noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >=", "rotate 90 degree and flip up and down out = np.rot90(out) out =", "the cacatenated noise level map ''' #get a tensor from the input level", "= reverse_pixelshuffle(out_numpy, pss, refill, refill_img, refill_ind) if rescale == 1: out_numpy = cv2.resize(out_numpy,", "chn, 10)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c],", "noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph): for chn in", "noise_level_list: AWGN and RVIN noise level [Output] A noisy image ''' w, h,", "#according to x or temp_x?? (according to clean image or irradience) #print(noise_s_map) #", "nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) # Iclean", "the current images if fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :] =", "np.concatenate((mosaic, band), axis = 0) if mosaic.size else band return mosaic def reverse_pixelshuffle(image,", "degree out = np.rot90(out) elif mode == 3: # rotate 90 degree and", "180 degree and flip out = np.rot90(out, k=2) out = np.flipud(out) elif mode", "if noise_type == 0: #MC-AWGN model gauss = np.zeros((w, h, c)) for chn", "h, w, min_v=0., max_v=255.): ''' Change a single normalized noise level value to", "noisy image ''' w, h, c = image.shape noisy = image.copy() for chn", "or 1. noise_level: ratio of the occupation of the changed pixels [2]'Gaussian-Poisson' GP", "[Output] A blur image patch ''' #input images ISource = np2ts(image) ISource =", "for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3]))", "noise_level_list, limit_set, c, pn, pw, ph): for chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn],", "for chn in range(3): noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h))", "= Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif ref_mode", "out the maximum level of noise level in the images ---------- [Input] a", "str(pss), c)[0] if flag != 0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to", "signal_independent noise to L noise_c = np.zeros((w, h, c)) for chn in range(3):", "model for chn in range(c): #process each channel separately prob_map = np.random.uniform(0.0, 1.0,", "from the official code, here we use the original clean image x to", "noise level value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3,", "= (np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for n", "mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0]", "#vals = len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals)", "np.zeros((w, h, c)) #real image wf = 0 hf = 0 for ws", "map ''' #get a tensor from the input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor)", "level for each channel, without normalization: only information of 3 channels [0]'AWGN' Multi-channel", "1, 1) level_tensor = level_tensor.repeat(1, 1, h, w) return level_tensor def scal2map_spatial(level1, level2,", "mode == 1: x_ts = x_ts.unsqueeze(0) elif mode == 2: x_ts = x_ts.unsqueeze(1)", "rotate 270 degree and flip out = np.rot90(out, k=3) out = np.flipud(out) return", "h, c)) #uni = uni.reshape(w, h, c) #noisy = image + uni noisy", "Get the most salient (the most frequent estimated noise level) [1] Get the", "before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2],", "mode == 4: # rotate 180 degree out = np.rot90(out, k=2) elif mode", "images ---------- [Input] a multi-channel tensor of noise map [Output] A list of", "(1,2,0)) if mode == 0: # original out = out elif mode ==", "mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given a mosaic image", "chn=1): ''' Description: get the noise estimation cdf of each channel ---------- [Input]", "c] = nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor", "= np.zeros((w, h, c)) #real image wf = 0 hf = 0 for", "2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) / float(vals) #if noise_type ==", "== 0: nl_list[n, c] = 0 else: hist = np.histogram(selected_lm, density=True) nl_ind =", "in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:,", "without changing the original one if ref_mode == 0 or ref_mode == 1", "input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for each channel, without normalization:", "< mix_thre ] = noise_map[prob_map < mix_thre ] gauss = np.random.normal(0, gau_std, (w,", "hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2. nl_list[n, c] = nl return nl_list def", "1: out_numpy = np.tile(out_numpy, (3, 1, 1)) if mode == 0 or mode", "noisy images of different types ---------- [Input] image : ndarray of float type:", "independent noise [Output] A noisy image ''' w, h, c = image.shape #Some", "matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot as plt def weights_init_kaiming(m): classname", "concatenating for n in range(NM_tensor.shape[0]): noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2],", "* dx pdf_list[n, c, :] = F #sio.savemat(mark + str(c) + '.mat',{'F':F}) #", "NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag: RF_tensor", "stopping+1): #scaling factor from 1 to the limit noisy_image = pixelshuffle(noisy_image, pss) INoisy", "noisy def generate_denoise(image, model, noise_level_list): ''' Description: Generate Denoised Blur Images ---------- [Input]", "[Output] CDF function of each sample and each channel ''' lm_numpy = lm.data.cpu().numpy()", "ph), (c, pw, ph)) #total number of channels return noise_map #Add noise to", "np.concatenate((band, temp), axis = 1) if band.size else temp mosaic = np.concatenate((mosaic, band),", "nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for n in range(lm_numpy.shape[0]): for c in range(chn):", "Images ---------- [Input] image: model: noise_level_list: [Output] A blur image patch ''' #input", "= (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for n", "0., chn=3): ''' Description: To find out the most frequent estimated noise level", "#MC-RVIN model for chn in range(c): #process each channel separately prob_map = np.random.uniform(0.0,", "prob_map < noise_level_list[chn] ] elif noise_type == 2: #sigma_s = np.random.uniform(0.0, 0.16, (3,))", "import scipy.io as sio # import matplotlib as mpl # mpl.use('Agg') # import", "np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0,", "the sub-sampled image band = np.concatenate((band, temp), axis = 1) if band.size else", "3: # rotate 90 degree and flip up and down out = np.rot90(out)", "c= x.shape x_ts = x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode ==", "middle vector score_seq: the matching score sequence between the two subsequent pdf opt_scale:", "temp_x?? (according to clean image or irradience) #print(noise_s_map) # different from the official", "mosaic = np.concatenate((mosaic, band), axis = 0) if mosaic.size else band return mosaic", "= np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the noise map before concatenating noise_map[0, :,", "AWGN channel Compare the changes of the density function and decide the optimal", "h ,c = image.shape mosaic = np.array([]) for ws in range(scale): band =", "a scaler noise level(0-1), h, w [Return]: a pytorch tensor of the cacatenated", "range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1: for chn in range(c):", "noisy_img = generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list,", "= len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) /", "rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if mode == 0 or", "torch import torch.nn as nn import numpy as np # from skimage.measure.simple_metrics import", ":] = F #sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark +", "''' Description: Generate Denoised Blur Images ---------- [Input] image: model: noise_level_list: [Output] A", "tensor of noise map and channel dimension chn: the channel number for gaussian", "''' #get a tensor from the input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make", "* vals) / float(vals) #if noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h, c))", "= np.random.poisson(image * vals) / float(vals) #if noise_type == *: #uni = np.random.uniform(-factor,factor,(w,", "A list of noise level value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy,", "np.random.normal(0, noise_level_list[chn], (w, h)) noisy = image + gauss elif noise_type == 1:", "F1, F2 [Output] score for each channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model,", "if fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :]", "c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, range=(0.,1.),", "pdf_list[n, c, :] = F #sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10), F)", "refill_ind=[0, 0]): if mode == 0 or mode == 1 or mode==3: out_numpy", "for chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level", "out = np.flipud(out) elif mode == 6: # rotate 270 degree out =", "refill, refill_img, refill_ind) if rescale == 1: out_numpy = cv2.resize(out_numpy, (h, w)) #print(out_numpy.shape)", "== 0: c = 1 score_seq = [] Pre_CDF = None flag =", "stopping = 4, mark=''): ''' Description: Given a noisy image and the noise", "noise level maps [Input] the noise map tensor, and a refinement mode Mode:", "sequence between the two subsequent pdf opt_scale: the optimal scaling factor ''' if", "function and decide the optimal scaling factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping", "To find out the maximum level of noise level in the images ----------", "else temp mosaic = np.concatenate((mosaic, band), axis = 0) if mosaic.size else band", "the noise level to a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor", "90 degree and flip up and down out = np.rot90(out) out = np.flipud(out)", "2, 0)) return out_numpy #TODO: two pixel shuffle functions to process the images", "Image [Return]: Recombine it using different portions of pixels ''' w, h, c", "return x_ts def np2ts_4d(x): x_ts = x.transpose(0, 3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor)", "from 1 to the limit noisy_image = pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color)", "1.) EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0] if flag != 0: score", "4: # rotate 180 degree out = np.rot90(out, k=2) elif mode == 5:", "[] Pre_CDF = None flag = 0 for pss in range(1, stopping+1): #scaling", "approximator, the combinatin of signal-dependent and signal independent noise [Output] A noisy image", "of noise map [Output] a multi-channel tensor of noise map after zeroing out", "range(c): #process each channel separately prob_map = np.random.uniform(0.0, 1.0, (w, h)) noise_map =", "flip up and down out = np.rot90(out) out = np.flipud(out) elif mode ==", "''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag = 1,", "in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy)", "[Output] A list of noise level value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy =", "norm_a = np.reshape(a, (len_v,1)) norm_a = (norm_a - float(min_v)) / float(max_v - min_v)", "or irradience) #print(noise_s_map) # different from the official code, here we use the", "using different portions of pixels ''' w, h, c = image.shape real =", "= imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for i in range(Img.shape[0]): # PSNR", "(np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for n in", "= H * dx pdf_list[n, c, :] = F #sio.savemat(mark + str(c) +", "= np.array([]) for ws in range(scale): band = np.array([]) for hs in range(scale):", "the input only has one channel which is ignored w, h, c= x.shape", "2: #sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c = np.random.uniform(0.0, 0.06, (3,)) sigma_c =", "(the most frequent estimated noise level) [1] Get the maximum value of noise", "np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1,", "def scal2map(level, h, w, min_v=0., max_v=255.): ''' Change a single normalized noise level", "range(scale): hf = 0 for hs in range(scale): temp = real[ws::scale, hs::scale, :]", "(w, h)) noisy = image + gauss elif noise_type == 1: #MC-RVIN model", "H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0] F = H", "map [Output] a multi-channel tensor of noise map after zeroing out items '''", "4: #half the estimation nl_list = nl_list - nl_list print(nl_list) elif ref_mode ==", "noise_map[ prob_map < noise_level_list[chn] ] elif noise_type == 2: #sigma_s = np.random.uniform(0.0, 0.16,", "lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy() #a", "to a map [Input]: level: a scaler noise level(0-1), h, w [Return]: a", "map and channel dimension chn: the channel number for gaussian [Output] CDF function", "= image.shape real = np.zeros((w, h, c)) #real image wf = 0 hf", "noise type if val == 0: for chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0],", "function of each sample and each channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy =", "noise_level: ratio of the occupation of the changed pixels [2]'Gaussian-Poisson' GP noise approximator,", "= Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To refine the", "== 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode == 4: #half the", "mode == 7: # rotate 270 degree and flip out = np.rot90(out, k=3)", "each channel ---------- [Input] a multi-channel tensor of noise map and channel dimension", "level) [1] Get the maximum value of noise level [2] Gaussian smooth the", "AWGN and RVIN noise level [Output] A noisy image ''' w, h, c", "# PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out", "Img[i,:,:,:], data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out = np.transpose(image, (1,2,0)) if", "vector score_seq: the matching score sequence between the two subsequent pdf opt_scale: the", "1 to the limit noisy_image = pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color) INoisy", "limit_set[0][1]) elif val == 1: for chn in range(c): noise_level_list[chn] = 35 noisy_img", "return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3,", "ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale,", "if color == 1: c = 3 elif color == 0: c =", "0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before", "ratio of the occupation of the changed pixels [2]'Gaussian-Poisson' GP noise approximator, the", "= x[1]-x[0] F = H * dx pdf_list[n, c, :] = F #sio.savemat(mark", "up = get_max_noise_in_maps(NM_tensor) nl_list = ( lb + up ) * 0.5 noise_map", "up ) * 0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise", "---------- [Input] image: model: noise_level_list: [Output] A blur image patch ''' #input images", "level2, h, w): stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2), w)", "NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res = model(ISource, NM_tensor) Out = torch.clamp(ISource-Res,", "gauss = np.zeros((w, h, c)) for chn in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn],", "len_v, min_v, max_v): ''' normalize the sequence of factors ''' norm_a = np.reshape(a,", "H * dx pdf_list[n, c, :] = F #sio.savemat(mark + str(c) + '.mat',{'F':F})", "for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1))", "multi-channel tensor of noise map after zeroing out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy()", "0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean, data_range): # Img =", "c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if mode == 0 or mode == 1", "map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1, 1, h, w)", "#ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def", "== 1 or ref_mode == 4 or ref_mode==5: #if we use a single", "np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c],", "map tensor with four channels ''' #RF_tensor = NM_tensor.clone() #get a clone version", "flip up and down out = np.flipud(out) elif mode == 2: # rotate", "np # from skimage.measure.simple_metrics import compare_psnr from torch.autograd import Variable import cv2 import", "a tensor from the input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise", "F #sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c) +", "of channels return noise_map #Add noise to the original images def generate_noisy(image, noise_type,", "max_v): ''' normalize the sequence of factors ''' norm_a = np.reshape(a, (len_v,1)) norm_a", "smooth [3] Get the average maximum value of the noise level [5] Get", "map for c in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap,", "m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data,", "<reponame>sunlin7/GIMP-ML import math import torch import torch.nn as nn import numpy as np", "''' norm_a = np.reshape(a, (len_v,1)) norm_a = (norm_a - float(min_v)) / float(max_v -", "blur image patch ''' #input images ISource = np2ts(image) ISource = torch.clamp(ISource, 0.,", "gauss elif noise_type == 1: #MC-RVIN model for chn in range(c): #process each", "= image + noise_s #add signal_independent noise to L noise_c = np.zeros((w, h,", "lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0],", "---------- [Input] a multi-channel tensor of noise map and channel dimension chn: the", "gau_std = noise_level_list[chn] #get the gaussian std prob_map = np.random.uniform( 0, 1, (w,", "else: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) if ps == 1: out_numpy =", "scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only", "hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :] else: real[ws::scale, hs::scale, :] =", "frequent estimated noise level in the images ---------- [Input] a multi-channel tensor of", "Discription: Given a mosaic image of subsampling, recombine it to a full image", "val == 1: for chn in range(c): noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image,", "#print(hist[1]) nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2. nl_list[n, c] =", "+ str(pss), c)[0] if flag != 0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How", "AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list): ''' Description: To generate mixed AWGN and", "return out_numpy #TODO: two pixel shuffle functions to process the images def pixelshuffle(image,", "hs::scale, :] wc, hc, cc = temp.shape #get the shpae of the current", "== 0 or mode == 1: x_ts = x_ts.unsqueeze(0) elif mode == 2:", "Description: Generate Denoised Blur Images ---------- [Input] image: model: noise_level_list: [Output] A blur", "w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In): pass def np2ts(x, mode=0): #now assume", "mpl # mpl.use('Agg') # import matplotlib.pyplot as plt def weights_init_kaiming(m): classname = m.__class__.__name__", "[Input] image: model: noise_level_list: [Output] A blur image patch ''' #input images ISource", "= torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre = 0., chn=3): ''' Description: To", "= Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res = model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0.,", "= selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n, c] = 0 else: hist =", "( lb + up ) * 0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3]))", "std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025) nn.init.constant(m.bias.data, 0.0) # def batch_PSNR(img, imclean, data_range): # Img = img.data.cpu().numpy().astype(np.float32) #", "[Input] a multi-channel tensor of noise map [Output] a multi-channel tensor of noise", "items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy =", "degree and flip out = np.rot90(out, k=3) out = np.flipud(out) return np.transpose(out, (2,0,1))", "a pytorch tensor of the cacatenated noise level map ''' #get a tensor", "x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark, chn=1): ''' Description: get the noise", "nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0, 0.02) m.weight.data.normal_(mean=0, std=math.sqrt(2./9./64.)).clamp_(-0.025,0.025)", "#noisy = np.random.poisson(image * vals) / float(vals) #if noise_type == *: #uni =", "def get_smooth_maps(lm, dilk = 50, gsd = 10): ''' Description: To return the", "nn import numpy as np # from skimage.measure.simple_metrics import compare_psnr from torch.autograd import", "sigma_c=40): ''' Description: To generate noisy images of different types ---------- [Input] image", "factor ------------ [Input] noisy_image, estimation_model, plot_flag, stopping [Output] plot the middle vector score_seq:", "plot the middle vector score_seq: the matching score sequence between the two subsequent", "np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and", "the maximum level of noise level in the images ---------- [Input] a multi-channel", "the input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level to a", "tensor of noise map [Output] A list of noise level value ''' lm_numpy", "+ str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png') #", "F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return nl_list", "return x_ts def get_salient_noise_in_maps(lm, thre = 0., chn=3): ''' Description: To find out", "x[1]-x[0] F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return", "and signal independent noise [Output] A noisy image ''' w, h, c =", "without normalization: only information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN'", "cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation,", "noise approximator, the combinatin of signal-dependent and signal independent noise [Output] A noisy", "0 or mode == 1: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0", "# rotate counterwise 90 degree out = np.rot90(out) elif mode == 3: #", "0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2, 0)) return out_numpy", "---------- [Input] image: a float image between [0,1] noise_level_list: AWGN and RVIN noise", "A noisy image ''' w, h, c = image.shape noisy = image.copy() for", "[Output] A noisy image ''' w, h, c = image.shape #Some unused noise", "[1]'RVIN' Replaces random pixels with 0 or 1. noise_level: ratio of the occupation", "#scaling factor from 1 to the limit noisy_image = pixelshuffle(noisy_image, pss) INoisy =", "nl_list = ( lb + up ) * 0.5 noise_map = np.zeros((1, chn,", "for ws in range(scale): hf = 0 for hs in range(scale): temp =", "a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1, 1, h,", "L noise_c = np.zeros((w, h, c)) for chn in range(3): noise_c [:, :,", "(lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0] F", "float(min_v)) / float(max_v - min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0):", "in range(3): noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h)) noisy =", "axis = 1) if band.size else temp mosaic = np.concatenate((mosaic, band), axis =", "degree and flip out = np.rot90(out, k=2) out = np.flipud(out) elif mode ==", "band.size else temp mosaic = np.concatenate((mosaic, band), axis = 0) if mosaic.size else", "each sample and each channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0,", "limit_set, c, val=0): noise_level_list = np.zeros((c, 1)) if s_or_m == 0: #single noise", "sub-sampled image band = np.concatenate((band, temp), axis = 1) if band.size else temp", "real = np.zeros((w, h, c)) #real image wf = 0 hf = 0", "a reversible sub-sampling [Input]: Image ndarray float [Return]: A mosic image of shuffled", "c = 3 elif color == 0: c = 1 score_seq = []", "thresholded value [Output] a refined map tensor with four channels ''' #RF_tensor =", "(1, 1, c)) #reshape the sigma factor to [1,1,c] to multiply with the", "elif ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list = (", "max_v=255.): ''' Change a single normalized noise level value to a map [Input]:", "= F #sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c)", "0 or ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode ==", "0: #single noise type if val == 0: for chn in range(c): noise_level_list[chn]", "= None flag = 0 for pss in range(1, stopping+1): #scaling factor from", "1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2, 0)) return out_numpy #TODO:", "level value to a map [Input]: level: a scaler noise level(0-1), h, w", "#print(noise_s_map) # different from the official code, here we use the original clean", "= 1) if band.size else temp mosaic = np.concatenate((mosaic, band), axis = 0)", "# Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for i in range(Img.shape[0]):", "estimate the pdf and cdf of AWGN channel Compare the changes of the", "x_ts = x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or", "[Input] a multi-channel tensor of noise map and channel dimension chn: the channel", "frequent estimated noise level) [1] Get the maximum value of noise level [2]", "out_numpy = np.tile(out_numpy, (3, 1, 1)) if mode == 0 or mode ==", "def np2ts(x, mode=0): #now assume the input only has one channel which is", ":, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor)", "data_range=data_range) # return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out = np.transpose(image, (1,2,0)) if mode", "scale is %d:' % (pss-1)) return (pss-1, score_seq) Pre_PDF = EPDF flag =", "= x.transpose(0, 3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre", "noise map and channel dimension chn: the channel number for gaussian [Output] CDF", ":] #get the sub-sampled image band = np.concatenate((band, temp), axis = 1) if", "a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1:", "+ '.png') # plt.close() return pdf_list def get_pdf_matching_score(F1, F2): ''' Description: Given two", "image ''' w, h, c = image.shape #Some unused noise type: Poisson and", "to a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1, 1,", "tensor of noise map after zeroing out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy", "chn in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h)) noisy = image +", "pn, pw, ph): for chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1])", "# return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out = np.transpose(image, (1,2,0)) if mode ==", "score_seq: the matching score sequence between the two subsequent pdf opt_scale: the optimal", "estimated noise level in the images ---------- [Input] a multi-channel tensor of noise", "h, w) return level_tensor def scal2map_spatial(level1, level2, h, w): stdN_t1 = scal2map(level1, int(h/2),", "INoisy = Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap, mark", "return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph): for", "the mix ratio of AWGN and RVIN gau_std = noise_level_list[chn] #get the gaussian", "def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0,", "stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2), w) stdN_tensor = torch.cat([stdN_t1,", "'.png') # plt.close() return pdf_list def get_pdf_matching_score(F1, F2): ''' Description: Given two sets", "the gaussian std prob_map = np.random.uniform( 0, 1, (w, h) ) #the prob", "level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1, 1, h, w) return level_tensor def", "/255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw, ph):", "CDF thresholded value [Output] a refined map tensor with four channels ''' #RF_tensor", "keep multiscaling the image\\\\ using pixel-shuffle methods, and estimate the pdf and cdf", "if mode == 0 or mode == 1: x_ts = x_ts.unsqueeze(0) elif mode", "stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1, 1, h, w) return level_tensor def scal2map_spatial(level1,", "= np.tile(out_numpy, (3, 1, 1)) if mode == 0 or mode == 1:", "= normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the level value noise_map[n, :, :, :]", "image of shuffled pixels ''' if scale == 1: return image w, h", "<= thre: print('optimal scale is %d:' % (pss-1)) return (pss-1, score_seq) Pre_PDF =", "c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0):", "or color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for each", "from skimage.measure.simple_metrics import compare_psnr from torch.autograd import Variable import cv2 import scipy.ndimage import", "if ref_mode == 4: #half the estimation nl_list = nl_list - nl_list print(nl_list)", "of the noise level [5] Get the CDF thresholded value [Output] a refined", "imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 # for i in range(Img.shape[0]): # PSNR +=", "mode == 1: # flip up and down out = np.flipud(out) elif mode", "1 return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description: To find out the", "w, h ,c = image.shape mosaic = np.array([]) for ws in range(scale): band", "or ref_mode == 1 or ref_mode == 4 or ref_mode==5: #if we use", "get_pdf_matching_score(F1, F2): ''' Description: Given two sets of CDF, get the overall matching", "pixels with 0 or 1. noise_level: ratio of the occupation of the changed", "F2 [Output] score for each channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1,", "range(3): noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h)) noisy = noisy", "pixel-shuffle methods, and estimate the pdf and cdf of AWGN channel Compare the", "= Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): ''' Only Keep one channel and zero out", "#a refined map for c in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c] nmap_dilation", "different portions of pixels ''' w, h, c = image.shape real = np.zeros((w,", "hc wf = wf + wc return real def scal2map(level, h, w, min_v=0.,", "reversible sub-sampling [Input]: Image ndarray float [Return]: A mosic image of shuffled pixels", "mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if", "2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]): for c", "x[1]-x[0] F = H * dx pdf_list[n, c, :] = F #sio.savemat(mark +", "= get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list = ( lb + up ) *", "selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True) dx", "torch.nn as nn import numpy as np # from skimage.measure.simple_metrics import compare_psnr from", "(w, h) ) #the noisy map noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map <", "hc, cc = temp.shape #get the shpae of the current images if fill==1", "single value for the map if ref_mode == 0 or ref_mode == 4:", "def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To find out the most frequent estimated", "Pre_PDF = EPDF flag = 1 return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): '''", "np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To", "nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2. nl_list[n, c] = nl", "INoisy = np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.)", "= get_pdf_in_maps(EMap, mark + str(pss), c)[0] if flag != 0: score = get_pdf_matching_score(EPDF,", "out_numpy #TODO: two pixel shuffle functions to process the images def pixelshuffle(image, scale):", "c = image.shape real = np.zeros((w, h, c)) #real image wf = 0", "= np2ts(image) ISource = torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions", "the official code, here we use the original clean image x to compute", "== 4: # rotate 180 degree out = np.rot90(out, k=2) elif mode ==", "ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list = ( lb", "image ''' w, h, c = image.shape noisy = image.copy() for chn in", "level value noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c, pw,", "x to compute the variance noise_s = np.random.randn(w, h, c) * noise_s_map #use", "normal distribution noisy = image + noise_s #add signal_independent noise to L noise_c", "new variance to shift the normal distribution noisy = image + noise_s #add", "== 1: return image w, h ,c = image.shape mosaic = np.array([]) for", "version of NM tensor without changing the original one if ref_mode == 0", "map [Input]: level: a scaler noise level(0-1), h, w [Return]: a pytorch tensor", "range(scale): temp = real[ws::scale, hs::scale, :] wc, hc, cc = temp.shape #get the", "Given a noisy image and the noise estimation model, keep multiscaling the image\\\\", "noise_level_list: [Output] A blur image patch ''' #input images ISource = np2ts(image) ISource", "noisy + noise_c return noisy #generate AWGN-RVIN noise together def generate_comp_noisy(image, noise_level_list): '''", "= torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res = model(ISource, NM_tensor) Out", "2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): nmap", "np.flipud(out) elif mode == 4: # rotate 180 degree out = np.rot90(out, k=2)", "channel number for gaussian [Output] CDF function of each sample and each channel", "CDF, get the overall matching score for each channel ----------- [Input] F1, F2", "[Input] image: a float image between [0,1] noise_level_list: AWGN and RVIN noise level", "np.rot90(out) elif mode == 3: # rotate 90 degree and flip up and", "np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating noise_map[0, :, :,", "(according to clean image or irradience) #print(noise_s_map) # different from the official code,", "selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, normed=True) dx = x[1]-x[0]", "= x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts = x.transpose(0, 3, 1, 2) x_ts", "in range(scale): band = np.array([]) for hs in range(scale): temp = image[ws::scale, hs::scale,", "Image ndarray float [Return]: A mosic image of shuffled pixels ''' if scale", "#print(nl_ind) #print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] ) / 2. nl_list[n,", "np.random.randn(w, h, c) * noise_s_map #use the new variance to shift the normal", "= 0 # for i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:], Img[i,:,:,:], data_range=data_range)", "find out the maximum level of noise level in the images ---------- [Input]", "ndarray float [Return]: A mosic image of shuffled pixels ''' if scale ==", "= torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level to a map level_tensor = level_tensor.view(stdN_tensor.size(0),", "== 6: # rotate 270 degree out = np.rot90(out, k=3) elif mode ==", "nl = np.amax(lm_numpy[n, :, :, c]) nl_list[n, c] = nl return nl_list def", "of the changed pixels [2]'Gaussian-Poisson' GP noise approximator, the combinatin of signal-dependent and", "image.shape mosaic = np.array([]) for ws in range(scale): band = np.array([]) for hs", "np.transpose(out_numpy, (1, 2, 0)) return out_numpy #TODO: two pixel shuffle functions to process", "float(vals) #if noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w,", "lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): nmap = lm_numpy[:, :, c]", "[Output] plot the middle vector score_seq: the matching score sequence between the two", "k=2) out = np.flipud(out) elif mode == 6: # rotate 270 degree out", "= np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0] nl_list[n,", "scale, fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given a mosaic image of subsampling, recombine", "** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) / float(vals) #if noise_type == *:", "value ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list", "= lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :,", "x_ts.unsqueeze(0) elif mode == 2: x_ts = x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts", "0 else: hist = np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl", "x_ts = x_ts.unsqueeze(1) return x_ts def np2ts_4d(x): x_ts = x.transpose(0, 3, 1, 2)", "Blur Images ---------- [Input] image: model: noise_level_list: [Output] A blur image patch '''", "np.zeros((lm_numpy.shape[0], chn, 1)) for n in range(lm_numpy.shape[0]): for c in range(chn): nl =", "3: lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list = ( lb + up", "model, keep multiscaling the image\\\\ using pixel-shuffle methods, and estimate the pdf and", "portions of pixels ''' w, h, c = image.shape real = np.zeros((w, h,", "import torch import torch.nn as nn import numpy as np # from skimage.measure.simple_metrics", "and flip out = np.rot90(out, k=3) out = np.flipud(out) return np.transpose(out, (2,0,1)) def", "import compare_psnr from torch.autograd import Variable import cv2 import scipy.ndimage import scipy.io as", "ref_mode, chn=3,cFlag=False): ''' Description: To refine the estimated noise level maps [Input] the", "norm_a = (norm_a - float(min_v)) / float(max_v - min_v) return norm_a def generate_training_noisy_image(current_image,", "0: for chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val == 1:", "To return the refined maps after dilation and gaussian blur [Input] a multi-channel", "more smooth [3] Get the average maximum value of the noise level [5]", "methods, and estimate the pdf and cdf of AWGN channel Compare the changes", "= np.random.randn(w, h, c) * noise_s_map #use the new variance to shift the", "hist = np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl = (", "stopping [Output] plot the middle vector score_seq: the matching score sequence between the", "Description: To return the refined maps after dilation and gaussian blur [Input] a", "only information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random", "images Res = model(ISource, NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy()", "the maximum value of noise level [2] Gaussian smooth the noise level map", "lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for", "channels ''' #RF_tensor = NM_tensor.clone() #get a clone version of NM tensor without", "cc = temp.shape #get the shpae of the current images if fill==1 and", "= Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map = np.zeros((1, 6, image.shape[0], image.shape[1])) #initialize the", "def get_salient_noise_in_maps(lm, thre = 0., chn=3): ''' Description: To find out the most", "a multi-channel tensor of noise map [Output] a multi-channel tensor of noise map", "#get the mix ratio of AWGN and RVIN gau_std = noise_level_list[chn] #get the", "hf:hf+hc, :] else: real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :] hf = hf", "pixel shuffle functions to process the images def pixelshuffle(image, scale): ''' Discription: Given", "(np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined map for c in", "# import matplotlib.pyplot as plt def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') !=", "nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') !=", "estimation nl_list = nl_list - nl_list print(nl_list) elif ref_mode == 1: nl_list =", "c, :] = F #sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark", "shuffle functions to process the images def pixelshuffle(image, scale): ''' Discription: Given an", "score for each channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre =", "c] = 0 else: hist = np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0])", "#uni = uni.reshape(w, h, c) #noisy = image + uni noisy = image.copy()", "before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0],", "clean image or irradience) #print(noise_s_map) # different from the official code, here we", "for pss in range(1, stopping+1): #scaling factor from 1 to the limit noisy_image", "four channels ''' #RF_tensor = NM_tensor.clone() #get a clone version of NM tensor", "noise_level_list[chn] ] elif noise_type == 2: #sigma_s = np.random.uniform(0.0, 0.16, (3,)) #sigma_c =", "RVIN noise level [Output] A noisy image ''' w, h, c = image.shape", "mode == 2: # rotate counterwise 90 degree out = np.rot90(out) elif mode", "full image [Input]: Image [Return]: Recombine it using different portions of pixels '''", "temp.shape #get the shpae of the current images if fill==1 and ws==ind[0] and", "= np.random.uniform(0.0, 0.06, (3,)) sigma_c = [sigma_c]*3 sigma_s = [sigma_s]*3 sigma_s = np.reshape(sigma_s,", "level_tensor def scal2map_spatial(level1, level2, h, w): stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2 =", "single normalized noise level value to a map [Input]: level: a scaler noise", "def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To generate noisy images of", "= np.random.uniform( 0, 1, (w, h) ) #the prob map noise_map = np.random.uniform(", ":] hf = hf + hc wf = wf + wc return real", "def zeroing_out_maps(lm, keep=0): ''' Only Keep one channel and zero out other channels", "import numpy as np # from skimage.measure.simple_metrics import compare_psnr from torch.autograd import Variable", "zeroing_out_maps(lm, keep=0): ''' Only Keep one channel and zero out other channels [Input]", "Description: Given a noisy image and the noise estimation model, keep multiscaling the", "h)) noisy_chn = noisy[: , :, chn] noisy_chn[ prob_map < noise_level_list[chn] ] =", "rotate 180 degree and flip out = np.rot90(out, k=2) out = np.flipud(out) elif", "return noisy def generate_denoise(image, model, noise_level_list): ''' Description: Generate Denoised Blur Images ----------", "hs::scale, :] #get the sub-sampled image band = np.concatenate((band, temp), axis = 1)", "plt.close() return pdf_list def get_pdf_matching_score(F1, F2): ''' Description: Given two sets of CDF,", "changing the original one if ref_mode == 0 or ref_mode == 1 or", "and the noise estimation model, keep multiscaling the image\\\\ using pixel-shuffle methods, and", "get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode == 4: #half the estimation nl_list = nl_list", "ps=0, pss=1, scal=1, rescale=0, w=10, h=10, c=3, refill=0, refill_img=0, refill_ind=[0, 0]): if mode", "#vals = 2 ** np.ceil(np.log2(vals)) #noisy = np.random.poisson(image * vals) / float(vals) #if", "= np.reshape(np.tile(noise_level_list, pw * ph), (c, pw, ph)) #total number of channels return", "# from skimage.measure.simple_metrics import compare_psnr from torch.autograd import Variable import cv2 import scipy.ndimage", "np.tile(out_numpy, (3, 1, 1)) if mode == 0 or mode == 1: out_numpy", "1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for n in range(lm_numpy.shape[0]): for c in", "n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H,", "To generate mixed AWGN and RVIN noise together ---------- [Input] image: a float", "out_numpy = np.transpose(out_numpy, (1, 2, 0)) return out_numpy #TODO: two pixel shuffle functions", "each channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag", "one channel which is ignored w, h, c= x.shape x_ts = x.transpose(2, 0,", "refinement mode Mode: [0] Get the most salient (the most frequent estimated noise", "str(c) + '.png') # plt.close() return pdf_list def get_pdf_matching_score(F1, F2): ''' Description: Given", "regional estimation more smooth [3] Get the average maximum value of the noise", "Description: To generate noisy images of different types ---------- [Input] image : ndarray", "if mode == 0: # original out = out elif mode == 1:", "for each channel ----------- [Input] F1, F2 [Output] score for each channel '''", "if out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy, (3, 1, 1)) if mode ==", ":] else: real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :] hf = hf +", "noise estimation cdf of each channel ---------- [Input] a multi-channel tensor of noise", "1, c)) #reshape the sigma factor to [1,1,c] to multiply with the image", "F_ind = np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark,", "np.random.uniform( 0, 1, (w, h) ) #the noisy map noisy_chn = noisy[: ,:", "= np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark, chn=1):", "kernel = np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0)))", "zero out other channels [Input] a multi-channel tensor of noise map [Output] a", "np.zeros((w, h, c)) for chn in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h))", "== 0 or ref_mode == 4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode", "band = np.concatenate((band, temp), axis = 1) if band.size else temp mosaic =", "in range(c): noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img,", "1, (w, h) ) #the noisy map noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map", "or ref_mode == 4 or ref_mode==5: #if we use a single value for", "plot_flag, stopping [Output] plot the middle vector score_seq: the matching score sequence between", "# rotate 90 degree and flip up and down out = np.rot90(out) out", "1: return image w, h ,c = image.shape mosaic = np.array([]) for ws", "elif classname.find('Linear') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data,", "F) #plt.savefig(mark + str(c) + '.png') # plt.close() return pdf_list def get_pdf_matching_score(F1, F2):", "#process each channel separately prob_map = np.random.uniform(0.0, 1.0, (w, h)) noise_map = np.random.uniform(0.0,", "out_numpy = (np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal else: out_numpy =", "noise_level_list): ''' Description: Generate Denoised Blur Images ---------- [Input] image: model: noise_level_list: [Output]", "elif ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif ref_mode == 3:", "matching score sequence between the two subsequent pdf opt_scale: the optimal scaling factor", "0 or mode == 1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode ==", "of noise map after zeroing out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy =", "== 1: c = 3 elif color == 0: c = 1 score_seq", "if val == 0: for chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif", "temp mosaic = np.concatenate((mosaic, band), axis = 0) if mosaic.size else band return", "chn=3): ''' Description: To find out the most frequent estimated noise level in", "NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating noise_map[0, :, :, :] =", "range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) H, x = np.histogram(selected_lm, range=(0.,1.), bins=10, normed=True)", "3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre = 0.,", "selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0: nl_list[n, c] = 0 else: hist", "real[ws::scale, hs::scale, :] = image[wf:wf+wc, hf:hf+hc, :] hf = hf + hc wf", "dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1, 2, 0))) ref_lm_numpy = lm_numpy.copy()", "score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description: To find out the maximum level of", "= np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF", "to L noise_c = np.zeros((w, h, c)) for chn in range(3): noise_c [:,", ": ndarray of float type: [0,1] just one image, current support gray or", "noise map [Output] a multi-channel tensor of noise map after zeroing out items", "0 or ref_mode == 1 or ref_mode == 4 or ref_mode==5: #if we", "== 7: # rotate 270 degree and flip out = np.rot90(out, k=3) out", "torch.from_numpy(noise_map).type(torch.FloatTensor) if torch.cuda.is_available() and not cFlag: RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True)", "np.where(F>0.9)[0][0] nl_list[n, c] = x[F_ind] print(nl_list[n,c]) return nl_list def get_pdf_in_maps(lm, mark, chn=1): '''", "2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn, 1)) for n in range(lm_numpy.shape[0]): for", "#print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In): pass def np2ts(x, mode=0): #now assume the", "0: nl_list[n, c] = 0 else: hist = np.histogram(selected_lm, density=True) nl_ind = np.argmax(hist[0])", "pss in range(1, stopping+1): #scaling factor from 1 to the limit noisy_image =", "np.flipud(out) elif mode == 6: # rotate 270 degree out = np.rot90(out, k=3)", "most frequent estimated noise level in the images ---------- [Input] a multi-channel tensor", "fill_image=0, ind=[0,0]): ''' Discription: Given a mosaic image of subsampling, recombine it to", "= Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2, 0)) return out_numpy #TODO: two pixel", "#half the estimation nl_list = nl_list - nl_list print(nl_list) elif ref_mode == 1:", "np.rot90(out, k=3) out = np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1,", "def np2ts_4d(x): x_ts = x.transpose(0, 3, 1, 2) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts", "return nl_list def get_cdf_noise_in_maps(lm, thre=0.8, chn=3): ''' Description: To find out the most", "nmap = lm_numpy[:, :, c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c]", "classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('Linear') !=", "(np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for n in range(lm_numpy.shape[0]):", "std prob_map = np.random.uniform( 0, 1, (w, h) ) #the prob map noise_map", "ref_mode == 0 or ref_mode == 1 or ref_mode == 4 or ref_mode==5:", "np.rot90(out, k=3) elif mode == 7: # rotate 270 degree and flip out", "* noise_s_map #use the new variance to shift the normal distribution noisy =", "val == 0: for chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1]) elif val", "== 1: #MC-RVIN model for chn in range(c): #process each channel separately prob_map", "tensor without changing the original one if ref_mode == 0 or ref_mode ==", "0, noise_level_list /255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn,", "factors ''' norm_a = np.reshape(a, (len_v,1)) norm_a = (norm_a - float(min_v)) / float(max_v", "channels [Input] a multi-channel tensor of noise map [Output] a multi-channel tensor of", "w) stdN_t2 = scal2map(level2, h-int(h/2), w) stdN_tensor = torch.cat([stdN_t1, stdN_t2], dim=2) return stdN_tensor", "'.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png') # plt.close() return pdf_list", "''' Description: To generate mixed AWGN and RVIN noise together ---------- [Input] image:", "mix_thre = noise_level_list[c+chn] #get the mix ratio of AWGN and RVIN gau_std =", "0 or mode == 1: x_ts = x_ts.unsqueeze(0) elif mode == 2: x_ts", "estimation model, keep multiscaling the image\\\\ using pixel-shuffle methods, and estimate the pdf", "mode=0): #now assume the input only has one channel which is ignored w,", "if ref_mode == 0 or ref_mode == 1 or ref_mode == 4 or", "in range(c): #process each channel separately prob_map = np.random.uniform(0.0, 1.0, (w, h)) noise_map", "= np.reshape(a, (len_v,1)) norm_a = (norm_a - float(min_v)) / float(max_v - min_v) return", "ISource = torch.clamp(ISource, 0., 1.) ISource = Variable(ISource.cuda(),volatile=True) #input denoise conditions noise_map =", "ph): for chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1, limit_set[0][0], limit_set[0][1]) #normalize the", "0 or 1. noise_level: ratio of the occupation of the changed pixels [2]'Gaussian-Poisson'", "= generate_noisy(current_image, 0, noise_level_list /255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set,", "#sio.savemat(mark + str(c) + '.mat',{'F':F}) # plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png')", "temp_ps_4comb(Out, In): pass def np2ts(x, mode=0): #now assume the input only has one", "of noise level in the images ---------- [Input] a multi-channel tensor of noise", "gauss = np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >= mix_thre ] = noisy_chn[prob_map >=", "map after zeroing out items ''' lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1,", "the noise estimation cdf of each channel ---------- [Input] a multi-channel tensor of", "[Input] a multi-channel tensor of noise map [Output] a multi-channel tensor of refined", "channel, without normalization: only information of 3 channels [0]'AWGN' Multi-channel Gaussian-distributed additive noise", ":, c]) nl_list[n, c] = nl return nl_list def get_smooth_maps(lm, dilk = 50,", "rotate 180 degree out = np.rot90(out, k=2) elif mode == 5: # rotate", "c)) for chn in range(c): gauss[:,:,chn] = np.random.normal(0, noise_level_list[chn], (w, h)) noisy =", "the original images def generate_noisy(image, noise_type, noise_level_list=0, sigma_s=20, sigma_c=40): ''' Description: To generate", "channel ---------- [Input] a multi-channel tensor of noise map and channel dimension chn:", "map [Output] a multi-channel tensor of refined noise map ''' kernel = np.ones((dilk,", "= x_ts.unsqueeze(0) elif mode == 2: x_ts = x_ts.unsqueeze(1) return x_ts def np2ts_4d(x):", "range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor", "nl_list print(nl_list) elif ref_mode == 1: nl_list = get_max_noise_in_maps(NM_tensor, chn) elif ref_mode ==", "in the images ---------- [Input] a multi-channel tensor of noise map [Output] A", "return the refined maps after dilation and gaussian blur [Input] a multi-channel tensor", "get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map", "(1, 2, 0))) ref_lm_numpy = lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]):", "mode == 3: # rotate 90 degree and flip up and down out", "0.5 noise_map = np.zeros((1, chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating", "here we use the original clean image x to compute the variance noise_s", "= (np.transpose(out_numpy, (1, 2, 0))) * 255.0 * scal else: out_numpy = (np.transpose(out_numpy,", "= level_tensor.repeat(1, 1, h, w) return level_tensor def scal2map_spatial(level1, level2, h, w): stdN_t1", "def scal2map_spatial(level1, level2, h, w): stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2,", "out_numpy = Out.data.squeeze(1).cpu().numpy() if out_numpy.shape[0] == 1: out_numpy = np.tile(out_numpy, (3, 1, 1))", "or temp_x?? (according to clean image or irradience) #print(noise_s_map) # different from the", "(1, 2, 0)) return out_numpy #TODO: two pixel shuffle functions to process the", "= np.random.uniform(0.0, 1.0, (w, h)) noisy_chn = noisy[: , :, chn] noisy_chn[ prob_map", "maps after dilation and gaussian blur [Input] a multi-channel tensor of noise map", "noise together ---------- [Input] image: a float image between [0,1] noise_level_list: AWGN and", "ndarray of float type: [0,1] just one image, current support gray or color", "noise map ''' kernel = np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy,", "sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor to [1,1,c] to", "model: noise_level_list: [Output] A blur image patch ''' #input images ISource = np2ts(image)", "Get the average maximum value of the noise level [5] Get the CDF", "mix_thre ] gauss = np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >= mix_thre ] =", "[Input]: level: a scaler noise level(0-1), h, w [Return]: a pytorch tensor of", "return (PSNR/Img.shape[0]) def data_augmentation(image, mode): out = np.transpose(image, (1,2,0)) if mode == 0:", "noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise map before concatenating for", "#if noise_type == *: #vals = len(np.unique(image)) #vals = 2 ** np.ceil(np.log2(vals)) #noisy", "= scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) def zeroing_out_maps(lm, keep=0): '''", "c = image.shape noisy = image.copy() for chn in range(c): mix_thre = noise_level_list[c+chn]", "Variable import cv2 import scipy.ndimage import scipy.io as sio # import matplotlib as", "w, min_v=0., max_v=255.): ''' Change a single normalized noise level value to a", "(np.transpose(out_numpy, (1, 2, 0))) if ps == 1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill,", "# Img = img.data.cpu().numpy().astype(np.float32) # Iclean = imclean.data.cpu().numpy().astype(np.float32) # PSNR = 0 #", "''' Description: Given a noisy image and the noise estimation model, keep multiscaling", "(c, pw, ph)) #total number of channels return noise_map #Add noise to the", "multiscaling the image\\\\ using pixel-shuffle methods, and estimate the pdf and cdf of", "1, stopping = 4, mark=''): ''' Description: Given a noisy image and the", "sio # import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot as plt", "np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1, rescale=0, w=10, h=10,", "#get a clone version of NM tensor without changing the original one if", "#TODO: two pixel shuffle functions to process the images def pixelshuffle(image, scale): '''", "ref_mode==5: #if we use a single value for the map if ref_mode ==", "0: c = 1 score_seq = [] Pre_CDF = None flag = 0", "limit_set, c, pn, pw, ph): for chn in range(c): noise_level_list[chn] = normalize(noise_level_list[chn], 1,", "np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF =", "input only has one channel which is ignored w, h, c= x.shape x_ts", "import matplotlib.pyplot as plt def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1:", "x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) return x_ts def get_salient_noise_in_maps(lm, thre = 0., chn=3): ''' Description:", "# different from the official code, here we use the original clean image", "pdf opt_scale: the optimal scaling factor ''' if color == 1: c =", "noise to L noise_c = np.zeros((w, h, c)) for chn in range(3): noise_c", "pixelshuffle(noisy_image, pss) INoisy = np2ts(noisy_image, color) INoisy = Variable(INoisy.cuda(), volatile=True) EMap = torch.clamp(estimation_model(INoisy),", "# plt.bar(range(10), F) #plt.savefig(mark + str(c) + '.png') # plt.close() return pdf_list def", "1: # flip up and down out = np.flipud(out) elif mode == 2:", ":, c] nmap_dilation = cv2.dilate(nmap, kernel, iterations=1) ref_lm_numpy[:, :, c] = nmap_dilation #ref_lm_numpy[:,", "a clone version of NM tensor without changing the original one if ref_mode", "nmap_dilation #ref_lm_numpy[:, :, c] = scipy.ndimage.filters.gaussian_filter(nmap_dilation, gsd) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True)", "noise_map[n,:,:,:] = np.reshape(np.tile(nl_list[n], NM_tensor.size()[2] * NM_tensor.size()[3]), (chn, NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) if", "noise_level_list: pre-defined noise level for each channel, without normalization: only information of 3", "c)) #reshape the sigma factor to [1,1,c] to multiply with the image noise_s_map", "out = np.rot90(out) elif mode == 3: # rotate 90 degree and flip", "the level value noise_map[n, :, :, :] = np.reshape(np.tile(noise_level_list, pw * ph), (c,", "noisy_chn = noisy[: ,: ,chn] noisy_chn[prob_map < mix_thre ] = noise_map[prob_map < mix_thre", "selected_lm.shape[0] == 0: nl_list[n, c] = 0 else: hist = np.histogram(selected_lm, density=True) nl_ind", "noisy = image + noise_s #add signal_independent noise to L noise_c = np.zeros((w,", "noise type: Poisson and Uniform #if noise_type == *: #vals = len(np.unique(image)) #vals", "noise map tensor, and a refinement mode Mode: [0] Get the most salient", "pre-defined noise level for each channel, without normalization: only information of 3 channels", "color image input (w,h,c) noise_type: 0,1,2,3 noise_level_list: pre-defined noise level for each channel,", "elif mode == 7: # rotate 270 degree and flip out = np.rot90(out,", "map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2] * NM_tensor.size()[3]), (chn,", "1, h, w) return level_tensor def scal2map_spatial(level1, level2, h, w): stdN_t1 = scal2map(level1,", "noise_map[prob_map < mix_thre ] gauss = np.random.normal(0, gau_std, (w, h)) noisy_chn[prob_map >= mix_thre", "== 0 or mode == 1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode", "#initialize the noise map before concatenating noise_map[0, :, :, :] = np.reshape(np.tile(nl_list, NM_tensor.size()[2]", "get_max_noise_in_maps(NM_tensor, chn) elif ref_mode == 5: nl_list = get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map =", "out other channels [Input] a multi-channel tensor of noise map [Output] a multi-channel", "= noise_map[ prob_map < noise_level_list[chn] ] elif noise_type == 2: #sigma_s = np.random.uniform(0.0,", "of subsampling, recombine it to a full image [Input]: Image [Return]: Recombine it", "= [] Pre_CDF = None flag = 0 for pss in range(1, stopping+1):", "channel ''' return np.mean((F1-F2)**2) def decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag =", "ref_lm_numpy[:, :, c] = 0. print(ref_lm_numpy) RF_tensor = np2ts(ref_lm_numpy) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return", "the noise map tensor, and a refinement mode Mode: [0] Get the most", "scal else: out_numpy = (np.transpose(out_numpy, (1, 2, 0))) if ps == 1: out_numpy", "noise level [5] Get the CDF thresholded value [Output] a refined map tensor", "''' Description: Given two sets of CDF, get the overall matching score for", "of noise map and channel dimension chn: the channel number for gaussian [Output]", "range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0] == 0:", "level_tensor = level_tensor.repeat(1, 1, h, w) return level_tensor def scal2map_spatial(level1, level2, h, w):", "out = np.rot90(out, k=3) out = np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1,", "Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor, 10,", "(1, 2, 0))) if ps == 1: out_numpy = reverse_pixelshuffle(out_numpy, pss, refill, refill_img,", "(3, 1, 1)) if mode == 0 or mode == 1: out_numpy =", "score_seq.append(score) if score <= thre: print('optimal scale is %d:' % (pss-1)) return (pss-1,", "h, w [Return]: a pytorch tensor of the cacatenated noise level map '''", "Out = torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1, 2,", "#if noise_type == *: #uni = np.random.uniform(-factor,factor,(w, h, c)) #uni = uni.reshape(w, h,", "AWGN and RVIN gau_std = noise_level_list[chn] #get the gaussian std prob_map = np.random.uniform(", "= torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or mode == 1: x_ts = x_ts.unsqueeze(0)", "2: # rotate counterwise 90 degree out = np.rot90(out) elif mode == 3:", "= x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or mode", "number for gaussian [Output] CDF function of each sample and each channel '''", "maximum level of noise level in the images ---------- [Input] a multi-channel tensor", "a refinement mode Mode: [0] Get the most salient (the most frequent estimated", "for chn in range(c): noise_level_list[chn] = 35 noisy_img = generate_noisy(current_image, 0, noise_level_list /255.)", "k=3) out = np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0, pss=1, scal=1,", "''' Discription: Given an image, return a reversible sub-sampling [Input]: Image ndarray float", "270 degree out = np.rot90(out, k=3) elif mode == 7: # rotate 270", "return level_tensor def scal2map_spatial(level1, level2, h, w): stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2", "0, 1, (w, h) ) #the prob map noise_map = np.random.uniform( 0, 1,", "range=(0.,1.), bins=10, normed=True) dx = x[1]-x[0] F = H * dx pdf_list[n, c,", "0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these two print(score) score_seq.append(score)", "4: nl_list = get_salient_noise_in_maps(NM_tensor, 0., chn) if ref_mode == 4: #half the estimation", "''' Only Keep one channel and zero out other channels [Input] a multi-channel", "ws in range(scale): hf = 0 for hs in range(scale): temp = real[ws::scale,", "Discription: Given an image, return a reversible sub-sampling [Input]: Image ndarray float [Return]:", "if flag != 0: score = get_pdf_matching_score(EPDF, Pre_PDF) #TODO: How to match these", "fill=0, fill_image=0, ind=[0,0]): ''' Discription: Given a mosaic image of subsampling, recombine it", "refined noise map ''' kernel = np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy =", "ignored w, h, c= x.shape x_ts = x.transpose(2, 0, 1) x_ts = torch.from_numpy(x_ts).type(torch.FloatTensor)", "map ''' kernel = np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (1,", "= np.random.normal(0, noise_level_list[chn], (w, h)) noisy = image + gauss elif noise_type ==", "elif mode == 4: # rotate 180 degree out = np.rot90(out, k=2) elif", "---------- [Input] a multi-channel tensor of noise map [Output] A list of noise", "c in range(chn): selected_lm = np.reshape(lm_numpy[n,:,:,c], (lm_numpy.shape[1]*lm_numpy.shape[2], 1)) selected_lm = selected_lm[selected_lm>thre] if selected_lm.shape[0]", "= [sigma_s]*3 sigma_s = np.reshape(sigma_s, (1, 1, c)) #reshape the sigma factor to", "[Input] the noise map tensor, and a refinement mode Mode: [0] Get the", "noise_level_list /255.) return (noisy_img, noise_level_list) def generate_ground_truth_noise_map(noise_map, n, noise_level_list, limit_set, c, pn, pw,", "of the current images if fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :]", "and RVIN noise level [Output] A noisy image ''' w, h, c =", "import cv2 import scipy.ndimage import scipy.io as sio # import matplotlib as mpl", "torch.from_numpy(x_ts).type(torch.FloatTensor) if mode == 0 or mode == 1: x_ts = x_ts.unsqueeze(0) elif", "return RF_tensor def level_refine(NM_tensor, ref_mode, chn=3,cFlag=False): ''' Description: To refine the estimated noise", "has one channel which is ignored w, h, c= x.shape x_ts = x.transpose(2,", "= scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2), w) stdN_tensor = torch.cat([stdN_t1, stdN_t2],", "get_pdf_in_maps(EMap, mark + str(pss), c)[0] if flag != 0: score = get_pdf_matching_score(EPDF, Pre_PDF)", "sample and each channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2,", "normalize(a, len_v, min_v, max_v): ''' normalize the sequence of factors ''' norm_a =", "input level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level to a map", "the most frequent estimated noise level in the images ---------- [Input] a multi-channel", "wf = 0 hf = 0 for ws in range(scale): hf = 0", "NM_tensor.size()[2], NM_tensor.size()[3])) RF_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a,", "the noise level [5] Get the CDF thresholded value [Output] a refined map", ">= mix_thre] return noisy def generate_denoise(image, model, noise_level_list): ''' Description: Generate Denoised Blur", "print('optimal scale is %d:' % (pss-1)) return (pss-1, score_seq) Pre_PDF = EPDF flag", "mpl.use('Agg') # import matplotlib.pyplot as plt def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv')", "A blur image patch ''' #input images ISource = np2ts(image) ISource = torch.clamp(ISource,", "type if val == 0: for chn in range(c): noise_level_list[chn] = np.random.uniform(limit_set[0][0], limit_set[0][1])", "A mosic image of shuffled pixels ''' if scale == 1: return image", "cacatenated noise level map ''' #get a tensor from the input level level_tensor", "level level_tensor = torch.from_numpy(np.reshape(level, (1,1))).type(torch.FloatTensor) #make the noise level to a map level_tensor", "0 hf = 0 for ws in range(scale): hf = 0 for hs", "= np.rot90(out, k=3) out = np.flipud(out) return np.transpose(out, (2,0,1)) def visual_va2np(Out, mode=1, ps=0,", "keep=0): ''' Only Keep one channel and zero out other channels [Input] a", "== 1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif mode == 2: out_numpy =", "scale): ''' Discription: Given an image, return a reversible sub-sampling [Input]: Image ndarray", "for hs in range(scale): temp = real[ws::scale, hs::scale, :] wc, hc, cc =", "# PSNR = 0 # for i in range(Img.shape[0]): # PSNR += compare_psnr(Iclean[i,:,:,:],", "mark, chn=1): ''' Description: get the noise estimation cdf of each channel ----------", "flag = 1 return (stopping, score_seq) def get_max_noise_in_maps(lm, chn=3): ''' Description: To find", "= np.zeros((lm_numpy.shape[0], chn, 10)) for n in range(lm_numpy.shape[0]): for c in range(chn): selected_lm", "= np.random.normal(0, sigma_c[chn], (w, h)) noisy = noisy + noise_c return noisy #generate", "[0] Get the most salient (the most frequent estimated noise level) [1] Get", "np.reshape(np.tile(noise_level_list, image.shape[0] * image.shape[1]), (6, image.shape[0], image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True)", "if mode == 0 or mode == 1: out_numpy = (np.transpose(out_numpy, (1, 2,", "2, 3, 1))) pdf_list = np.zeros((lm_numpy.shape[0], chn, 10)) for n in range(lm_numpy.shape[0]): for", "Recombine it using different portions of pixels ''' w, h, c = image.shape", "!= -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif classname.find('BatchNorm') != -1: # nn.init.uniform(m.weight.data, 1.0, 0.02)", "code, here we use the original clean image x to compute the variance", "x = np.histogram(selected_lm, normed=True) dx = x[1]-x[0] F = np.cumsum(H)*dx F_ind = np.where(F>0.9)[0][0]", "get_pdf_in_maps(lm, mark, chn=1): ''' Description: get the noise estimation cdf of each channel", "Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor = get_smooth_maps(NM_tensor, 10, 5) elif ref_mode ==", "n in range(lm_numpy.shape[0]): for c in range(chn): nl = np.amax(lm_numpy[n, :, :, c])", "= torch.from_numpy(noise_map).type(torch.FloatTensor) RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a, len_v, min_v, max_v):", "RF_tensor = Variable(RF_tensor.cuda(),volatile=True) else: RF_tensor = Variable(RF_tensor,volatile=True) elif ref_mode == 2: RF_tensor =", "pixels ''' if scale == 1: return image w, h ,c = image.shape", "= np.random.uniform( 0, 1, (w, h) ) #the noisy map noisy_chn = noisy[:", "Gaussian smooth the noise level map to make the regional estimation more smooth", "volatile=True) EMap = torch.clamp(estimation_model(INoisy), 0., 1.) EPDF = get_pdf_in_maps(EMap, mark + str(pss), c)[0]", "Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a, len_v, min_v, max_v): ''' normalize the sequence", "= np.flipud(out) elif mode == 6: # rotate 270 degree out = np.rot90(out,", "process the images def pixelshuffle(image, scale): ''' Discription: Given an image, return a", "import torch.nn as nn import numpy as np # from skimage.measure.simple_metrics import compare_psnr", "pdf_list def get_pdf_matching_score(F1, F2): ''' Description: Given two sets of CDF, get the", "= noise_level_list[chn] #get the gaussian std prob_map = np.random.uniform( 0, 1, (w, h)", "image.shape[1])) NM_tensor = torch.from_numpy(noise_map).type(torch.FloatTensor) NM_tensor = Variable(NM_tensor.cuda(),volatile=True) #generate blur images Res = model(ISource,", "NM_tensor) Out = torch.clamp(ISource-Res, 0., 1.) out_numpy = Out.data.squeeze(0).cpu().numpy() out_numpy = np.transpose(out_numpy, (1,", "tensor, and a refinement mode Mode: [0] Get the most salient (the most", "of AWGN channel Compare the changes of the density function and decide the", "* ph), (c, pw, ph)) #total number of channels return noise_map #Add noise", "map noise_map = np.random.uniform( 0, 1, (w, h) ) #the noisy map noisy_chn", "patch ''' #input images ISource = np2ts(image) ISource = torch.clamp(ISource, 0., 1.) ISource", "chn in range(3): noise_c [:, :, chn] = np.random.normal(0, sigma_c[chn], (w, h)) noisy", "= get_smooth_maps(NM_tensor, 10, 5) elif ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor) up =", "signal independent noise [Output] A noisy image ''' w, h, c = image.shape", "chn] = np.random.normal(0, sigma_c[chn], (w, h)) noisy = noisy + noise_c return noisy", "and RVIN noise together ---------- [Input] image: a float image between [0,1] noise_level_list:", "noisy_chn[prob_map >= mix_thre ] + gauss[prob_map >= mix_thre] return noisy def generate_denoise(image, model,", "w): stdN_t1 = scal2map(level1, int(h/2), w) stdN_t2 = scal2map(level2, h-int(h/2), w) stdN_tensor =", "types ---------- [Input] image : ndarray of float type: [0,1] just one image,", "if mosaic.size else band return mosaic def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0,0]): '''", "level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1) level_tensor = level_tensor.repeat(1, 1, h, w) return", "skimage.measure.simple_metrics import compare_psnr from torch.autograd import Variable import cv2 import scipy.ndimage import scipy.io", "fill==1 and ws==ind[0] and hs==ind[1]: real[ws::scale, hs::scale, :] = fill_image[wf:wf+wc, hf:hf+hc, :] else:", "[Output] a refined map tensor with four channels ''' #RF_tensor = NM_tensor.clone() #get", "of each sample and each channel ''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy,", "of different types ---------- [Input] image : ndarray of float type: [0,1] just", "- min_v) return norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list = np.zeros((c,", "image.shape real = np.zeros((w, h, c)) #real image wf = 0 hf =", "min_v, max_v): ''' normalize the sequence of factors ''' norm_a = np.reshape(a, (len_v,1))", "''' lm_numpy = lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) pdf_list =", "channels return noise_map #Add noise to the original images def generate_noisy(image, noise_type, noise_level_list=0,", "RF_tensor = Variable(RF_tensor.cuda(),volatile=True) return (RF_tensor, nl_list) def normalize(a, len_v, min_v, max_v): ''' normalize", "thre = 0., chn=3): ''' Description: To find out the most frequent estimated", "in range(chn): nl = np.amax(lm_numpy[n, :, :, c]) nl_list[n, c] = nl return", "image\\\\ using pixel-shuffle methods, and estimate the pdf and cdf of AWGN channel", "image.shape #Some unused noise type: Poisson and Uniform #if noise_type == *: #vals", "to shift the normal distribution noisy = image + noise_s #add signal_independent noise", "def weights_init_kaiming(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: nn.init.kaiming_normal(m.weight.data, a=0, mode='fan_in') elif", "noise_level_list[chn] #get the gaussian std prob_map = np.random.uniform( 0, 1, (w, h) )", "combinatin of signal-dependent and signal independent noise [Output] A noisy image ''' w,", "nl_ind = np.argmax(hist[0]) #print(nl_ind) #print(hist[0]) #print(hist[1]) nl = ( hist[1][nl_ind] + hist[1][nl_ind+1] )", "= get_cdf_noise_in_maps(NM_tensor, 0.999, chn) noise_map = np.zeros((NM_tensor.shape[0], chn, NM_tensor.size()[2], NM_tensor.size()[3])) #initialize the noise", "norm_a def generate_training_noisy_image(current_image, s_or_m, limit_set, c, val=0): noise_level_list = np.zeros((c, 1)) if s_or_m", "mode == 0 or mode == 1 or mode==3: out_numpy = Out.data.squeeze(0).cpu().numpy() elif", "Multi-channel Gaussian-distributed additive noise [1]'RVIN' Replaces random pixels with 0 or 1. noise_level:", "50, gsd = 10): ''' Description: To return the refined maps after dilation", "#make the noise level to a map level_tensor = level_tensor.view(stdN_tensor.size(0), stdN_tensor.size(1), 1, 1)", "10, 5) elif ref_mode == 3: lb = get_salient_noise_in_maps(NM_tensor) up = get_max_noise_in_maps(NM_tensor) nl_list", "(h, w)) #print(out_numpy.shape) return out_numpy def temp_ps_4comb(Out, In): pass def np2ts(x, mode=0): #now", "decide_scale_factor(noisy_image, estimation_model, color=1, thre = 0, plot_flag = 1, stopping = 4, mark=''):", "= image + gauss elif noise_type == 1: #MC-RVIN model for chn in", "import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot as plt def weights_init_kaiming(m):", "lm_numpy.copy() #a refined map for c in range(lm_numpy.shape[2]): if np.isin(c,keep)==0: ref_lm_numpy[:, :, c]", "hs in range(scale): temp = image[ws::scale, hs::scale, :] #get the sub-sampled image band", "noise_map = np.random.uniform(0.0, 1.0, (w, h)) noisy_chn = noisy[: , :, chn] noisy_chn[", "the variance noise_s = np.random.randn(w, h, c) * noise_s_map #use the new variance", "tensor of refined noise map ''' kernel = np.ones((dilk, dilk)) lm_numpy = lm.data.squeeze(0).cpu().numpy()", "channel Compare the changes of the density function and decide the optimal scaling", "lm.data.cpu().numpy() lm_numpy = (np.transpose(lm_numpy, (0, 2, 3, 1))) nl_list = np.zeros((lm_numpy.shape[0], chn,1)) for" ]
[ "import forms from core import material_design class AboutForm(forms.Form): about = forms.CharField(widget=material_design.Textarea, max_length=500, required=False)", "django import forms from core import material_design class AboutForm(forms.Form): about = forms.CharField(widget=material_design.Textarea, max_length=500,", "<filename>accounts/forms.py from django import forms from core import material_design class AboutForm(forms.Form): about =", "from django import forms from core import material_design class AboutForm(forms.Form): about = forms.CharField(widget=material_design.Textarea," ]
[ "def __init__(self, fullname, email, salary): self.fullname = fullname self.email = email self.salary =", "fullname self.email = email self.salary = salary def __str__(self): return f\"employee (full name:", "<filename>module10-packages/deepcloudlabs/hr.py class Employee: def __init__(self, fullname, email, salary): self.fullname = fullname self.email =", "self.fullname = fullname self.email = email self.salary = salary def __str__(self): return f\"employee", "class Employee: def __init__(self, fullname, email, salary): self.fullname = fullname self.email = email", "fullname, email, salary): self.fullname = fullname self.email = email self.salary = salary def", "salary): self.fullname = fullname self.email = email self.salary = salary def __str__(self): return", "self.email = email self.salary = salary def __str__(self): return f\"employee (full name: {self.fullname})\"", "Employee: def __init__(self, fullname, email, salary): self.fullname = fullname self.email = email self.salary", "= fullname self.email = email self.salary = salary def __str__(self): return f\"employee (full", "__init__(self, fullname, email, salary): self.fullname = fullname self.email = email self.salary = salary", "email, salary): self.fullname = fullname self.email = email self.salary = salary def __str__(self):" ]
[ "included as part of this package. import os from iutest.core import pathutils from", "suffix in suffixes: nameParts[1] = suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths", "is released under the \"MIT License Agreement\". # Please see the LICENSE file", "import os from iutest.core import pathutils from iutest.qt import iconFromPath def _iconDir(): return", "import pathutils from iutest.qt import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName):", "studio. All rights reserved. # This file is part of IUTest, and is", "2019-2020 by <NAME>, MGLAND animation studio. All rights reserved. # This file is", "= list(iconName.partition(\".\")) nameParts.insert(1, None) paths = [] for suffix in suffixes: nameParts[1] =", "os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir = _iconDir()", "from iutest.qt import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(),", "This file is part of IUTest, and is released under the \"MIT License", "None) paths = [] for suffix in suffixes: nameParts[1] = suffix fileName =", "os from iutest.core import pathutils from iutest.qt import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(),", "License Agreement\". # Please see the LICENSE file that should have been included", "by <NAME>, MGLAND animation studio. All rights reserved. # This file is part", "_iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir", "released under the \"MIT License Agreement\". # Please see the LICENSE file that", "of this package. import os from iutest.core import pathutils from iutest.qt import iconFromPath", "= [] for suffix in suffixes: nameParts[1] = suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir,", "have been included as part of this package. import os from iutest.core import", "been included as part of this package. import os from iutest.core import pathutils", "import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName) def", "iutest.core import pathutils from iutest.qt import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def", "# Please see the LICENSE file that should have been included as part", "<NAME>, MGLAND animation studio. All rights reserved. # This file is part of", "# This file is part of IUTest, and is released under the \"MIT", "the LICENSE file that should have been included as part of this package.", "suffixes: nameParts[1] = suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj,", "# Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved. # This", "pathutils from iutest.qt import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return", "def iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths =", "Agreement\". # Please see the LICENSE file that should have been included as", "that should have been included as part of this package. import os from", "see the LICENSE file that should have been included as part of this", "Please see the LICENSE file that should have been included as part of", "file that should have been included as part of this package. import os", "\"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj, objAttributeName, iconFileName): path = iconPath(iconFileName) setattr(obj,", "paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj, objAttributeName, iconFileName): path = iconPath(iconFileName) setattr(obj, objAttributeName,", "return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1,", "LICENSE file that should have been included as part of this package. import", "[] for suffix in suffixes: nameParts[1] = suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName))", "nameParts[1] = suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj, objAttributeName,", "fileName)) return paths def initSingleClassIcon(obj, objAttributeName, iconFileName): path = iconPath(iconFileName) setattr(obj, objAttributeName, iconFromPath(path))", "return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir =", "\"MIT License Agreement\". # Please see the LICENSE file that should have been", "and is released under the \"MIT License Agreement\". # Please see the LICENSE", "the \"MIT License Agreement\". # Please see the LICENSE file that should have", "in suffixes: nameParts[1] = suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def", "def iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts =", "MGLAND animation studio. All rights reserved. # This file is part of IUTest,", "should have been included as part of this package. import os from iutest.core", "All rights reserved. # This file is part of IUTest, and is released", "part of this package. import os from iutest.core import pathutils from iutest.qt import", "package. import os from iutest.core import pathutils from iutest.qt import iconFromPath def _iconDir():", "iconName) def iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths", "this package. import os from iutest.core import pathutils from iutest.qt import iconFromPath def", "iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName,", "_iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths = [] for suffix in suffixes:", "under the \"MIT License Agreement\". # Please see the LICENSE file that should", "fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj, objAttributeName, iconFileName): path =", "as part of this package. import os from iutest.core import pathutils from iutest.qt", "list(iconName.partition(\".\")) nameParts.insert(1, None) paths = [] for suffix in suffixes: nameParts[1] = suffix", "is part of IUTest, and is released under the \"MIT License Agreement\". #", "= \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj, objAttributeName, iconFileName): path = iconPath(iconFileName)", "\"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts", "nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths = [] for suffix in suffixes: nameParts[1]", "Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved. # This file", "def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes):", "suffixes): iconDir = _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths = [] for", "iconPath(iconName): return os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts = list(iconName.partition(\".\"))", "of IUTest, and is released under the \"MIT License Agreement\". # Please see", "= _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths = [] for suffix in", "paths = [] for suffix in suffixes: nameParts[1] = suffix fileName = \"\".join(nameParts)", "reserved. # This file is part of IUTest, and is released under the", "rights reserved. # This file is part of IUTest, and is released under", "iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths = []", "nameParts.insert(1, None) paths = [] for suffix in suffixes: nameParts[1] = suffix fileName", "file is part of IUTest, and is released under the \"MIT License Agreement\".", "= suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj, objAttributeName, iconFileName):", "from iutest.core import pathutils from iutest.qt import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\")", "animation studio. All rights reserved. # This file is part of IUTest, and", "part of IUTest, and is released under the \"MIT License Agreement\". # Please", "for suffix in suffixes: nameParts[1] = suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return", "os.path.join(_iconDir(), iconName) def iconPathSet(iconName, suffixes): iconDir = _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None)", "suffix fileName = \"\".join(nameParts) paths.append(os.path.join(iconDir, fileName)) return paths def initSingleClassIcon(obj, objAttributeName, iconFileName): path", "iconDir = _iconDir() nameParts = list(iconName.partition(\".\")) nameParts.insert(1, None) paths = [] for suffix", "IUTest, and is released under the \"MIT License Agreement\". # Please see the", "iutest.qt import iconFromPath def _iconDir(): return os.path.join(pathutils.iutestPackageDir(), \"icons\") def iconPath(iconName): return os.path.join(_iconDir(), iconName)" ]
[ "try: f=imp.load_module(mname, fp, path, desc).f finally: if fp: fp.close(); hashd = load_pickle(opts['<hashd>']); np.save(", "import load_pickle; from lspreader.pmovie import filter_hashes_from_file; import numpy as np; import re; import", "raise ValueError(\"module should be well named!\"); path =m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname,", "well named!\"); path =m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp,", "opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise ValueError(\"module should be well named!\"); path", "read; from pys import load_pickle; from lspreader.pmovie import filter_hashes_from_file; import numpy as np;", "the modulepath option, reads a function called \"f\" from it, and filters the", "= imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path, desc).f finally: if fp: fp.close(); hashd", "path =m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path, desc).f", "import filter_hashes_from_file; import numpy as np; import re; import imp; if __name__ ==", "from lspreader.pmovie import filter_hashes_from_file; import numpy as np; import re; import imp; if", "modulepath option, reads a function called \"f\" from it, and filters the frames", "--modulepath=M Set the path to the file to read \"f\" from. [default: ./scanner.py]", "of good trajectories. Usage: ./search.py [options] <input> <hashd> <output> Options: --help -h Print", "opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise ValueError(\"module should be", "Set the path to the file to read \"f\" from. [default: ./scanner.py] '''", "as np; import re; import imp; if __name__ == \"__main__\": from docopt import", "the frames using it. Outputs a numpy array of good trajectories. Usage: ./search.py", "a numpy array of good trajectories. Usage: ./search.py [options] <input> <hashd> <output> Options:", "lspreader import read; from pys import load_pickle; from lspreader.pmovie import filter_hashes_from_file; import numpy", "''' Search a p4 for good indices. This imports the file specified by", "it. Outputs a numpy array of good trajectories. Usage: ./search.py [options] <input> <hashd>", "lspreader.pmovie import filter_hashes_from_file; import numpy as np; import re; import imp; if __name__", "import re; import imp; if __name__ == \"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True);", "called \"f\" from it, and filters the frames using it. Outputs a numpy", "\"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not", "for good indices. This imports the file specified by the modulepath option, reads", "indices. This imports the file specified by the modulepath option, reads a function", "=m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path, desc).f finally:", "path, desc).f finally: if fp: fp.close(); hashd = load_pickle(opts['<hashd>']); np.save( opts['<output>'], filter_hashes_from_file(opts['<input>'], f,", "a p4 for good indices. This imports the file specified by the modulepath", "''' from lspreader import read; from pys import load_pickle; from lspreader.pmovie import filter_hashes_from_file;", "filters the frames using it. Outputs a numpy array of good trajectories. Usage:", "reads a function called \"f\" from it, and filters the frames using it.", "<hashd> <output> Options: --help -h Print this help. --modulepath=M Set the path to", "from pys import load_pickle; from lspreader.pmovie import filter_hashes_from_file; import numpy as np; import", "python ''' Search a p4 for good indices. This imports the file specified", "a function called \"f\" from it, and filters the frames using it. Outputs", "array of good trajectories. Usage: ./search.py [options] <input> <hashd> <output> Options: --help -h", "the file specified by the modulepath option, reads a function called \"f\" from", "named!\"); path =m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path,", "load_pickle; from lspreader.pmovie import filter_hashes_from_file; import numpy as np; import re; import imp;", "good trajectories. Usage: ./search.py [options] <input> <hashd> <output> Options: --help -h Print this", "Print this help. --modulepath=M Set the path to the file to read \"f\"", "imp; if __name__ == \"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath'];", "file specified by the modulepath option, reads a function called \"f\" from it,", "--help -h Print this help. --modulepath=M Set the path to the file to", "if __name__ == \"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$',", "if not m: raise ValueError(\"module should be well named!\"); path =m.group(1); mname=m.group(2); fp,", "import numpy as np; import re; import imp; if __name__ == \"__main__\": from", "Outputs a numpy array of good trajectories. Usage: ./search.py [options] <input> <hashd> <output>", "docopt import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise", "option, reads a function called \"f\" from it, and filters the frames using", "[options] <input> <hashd> <output> Options: --help -h Print this help. --modulepath=M Set the", "[default: ./scanner.py] ''' from lspreader import read; from pys import load_pickle; from lspreader.pmovie", "from lspreader import read; from pys import load_pickle; from lspreader.pmovie import filter_hashes_from_file; import", "specified by the modulepath option, reads a function called \"f\" from it, and", "m: raise ValueError(\"module should be well named!\"); path =m.group(1); mname=m.group(2); fp, path,desc =", "\"f\" from it, and filters the frames using it. Outputs a numpy array", "docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise ValueError(\"module should", "ValueError(\"module should be well named!\"); path =m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]);", "= opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise ValueError(\"module should be well named!\");", "and filters the frames using it. Outputs a numpy array of good trajectories.", "help. --modulepath=M Set the path to the file to read \"f\" from. [default:", "filter_hashes_from_file; import numpy as np; import re; import imp; if __name__ == \"__main__\":", "path to the file to read \"f\" from. [default: ./scanner.py] ''' from lspreader", "imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path, desc).f finally: if fp: fp.close(); hashd =", "numpy array of good trajectories. Usage: ./search.py [options] <input> <hashd> <output> Options: --help", "== \"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if", "not m: raise ValueError(\"module should be well named!\"); path =m.group(1); mname=m.group(2); fp, path,desc", "<input> <hashd> <output> Options: --help -h Print this help. --modulepath=M Set the path", "<output> Options: --help -h Print this help. --modulepath=M Set the path to the", "the path to the file to read \"f\" from. [default: ./scanner.py] ''' from", "re; import imp; if __name__ == \"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True); fname", "Search a p4 for good indices. This imports the file specified by the", "fname); if not m: raise ValueError(\"module should be well named!\"); path =m.group(1); mname=m.group(2);", "using it. Outputs a numpy array of good trajectories. Usage: ./search.py [options] <input>", "#!/usr/bin/env python ''' Search a p4 for good indices. This imports the file", "this help. --modulepath=M Set the path to the file to read \"f\" from.", "import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise ValueError(\"module", "./search.py [options] <input> <hashd> <output> Options: --help -h Print this help. --modulepath=M Set", "to read \"f\" from. [default: ./scanner.py] ''' from lspreader import read; from pys", "Options: --help -h Print this help. --modulepath=M Set the path to the file", "desc).f finally: if fp: fp.close(); hashd = load_pickle(opts['<hashd>']); np.save( opts['<output>'], filter_hashes_from_file(opts['<input>'], f, **hashd));", "from. [default: ./scanner.py] ''' from lspreader import read; from pys import load_pickle; from", "should be well named!\"); path =m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]); try:", "Usage: ./search.py [options] <input> <hashd> <output> Options: --help -h Print this help. --modulepath=M", "f=imp.load_module(mname, fp, path, desc).f finally: if fp: fp.close(); hashd = load_pickle(opts['<hashd>']); np.save( opts['<output>'],", "import read; from pys import load_pickle; from lspreader.pmovie import filter_hashes_from_file; import numpy as", "import imp; if __name__ == \"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True); fname =", "fp, path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path, desc).f finally: if fp:", "trajectories. Usage: ./search.py [options] <input> <hashd> <output> Options: --help -h Print this help.", "m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise ValueError(\"module should be well named!\"); path =m.group(1);", "pys import load_pickle; from lspreader.pmovie import filter_hashes_from_file; import numpy as np; import re;", "it, and filters the frames using it. Outputs a numpy array of good", "imports the file specified by the modulepath option, reads a function called \"f\"", "frames using it. Outputs a numpy array of good trajectories. Usage: ./search.py [options]", "file to read \"f\" from. [default: ./scanner.py] ''' from lspreader import read; from", "[path]); try: f=imp.load_module(mname, fp, path, desc).f finally: if fp: fp.close(); hashd = load_pickle(opts['<hashd>']);", "\"f\" from. [default: ./scanner.py] ''' from lspreader import read; from pys import load_pickle;", "be well named!\"); path =m.group(1); mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname,", "from it, and filters the frames using it. Outputs a numpy array of", "fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m: raise ValueError(\"module should be well", "by the modulepath option, reads a function called \"f\" from it, and filters", "function called \"f\" from it, and filters the frames using it. Outputs a", "p4 for good indices. This imports the file specified by the modulepath option,", "numpy as np; import re; import imp; if __name__ == \"__main__\": from docopt", "mname=m.group(2); fp, path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path, desc).f finally: if", "This imports the file specified by the modulepath option, reads a function called", "-h Print this help. --modulepath=M Set the path to the file to read", "__name__ == \"__main__\": from docopt import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname);", "to the file to read \"f\" from. [default: ./scanner.py] ''' from lspreader import", "./scanner.py] ''' from lspreader import read; from pys import load_pickle; from lspreader.pmovie import", "fp, path, desc).f finally: if fp: fp.close(); hashd = load_pickle(opts['<hashd>']); np.save( opts['<output>'], filter_hashes_from_file(opts['<input>'],", "good indices. This imports the file specified by the modulepath option, reads a", "read \"f\" from. [default: ./scanner.py] ''' from lspreader import read; from pys import", "from docopt import docopt; opts=docopt(__doc__,help=True); fname = opts['--modulepath']; m=re.search(r'(^.*)/(\\w+)\\.py$', fname); if not m:", "np; import re; import imp; if __name__ == \"__main__\": from docopt import docopt;", "the file to read \"f\" from. [default: ./scanner.py] ''' from lspreader import read;", "path,desc = imp.find_module(mname, [path]); try: f=imp.load_module(mname, fp, path, desc).f finally: if fp: fp.close();" ]
[ "class for all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None):", "'{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the DOM id of the input field. If", "users can change their language, this should not be translatable since that would", "values_to_remove: new_values.remove(value) self.values = new_values def clean_value(self, value): \"\"\" Called by :meth:`.clean_values` to", "to the return value of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only", "attach a labels to a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self):", "focus on the first field when it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def", "choices if your filter is a single select filter. \"\"\" copy = self.copy()", "the value of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this", "the given ``values``. \"\"\" self.values = values def clear_values(self): \"\"\" Clear the current", "to attach a labels to a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def", "{ 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns a json encoded and HTML", "You use this with the angularjs directive for the filter to send options", "value in new_values: if value in values_to_remove: new_values.remove(value) self.values = new_values def clean_value(self,", "as :meth:`.build_clear_values_url`. This method is for multiselect filters where the user can add/remove", "clean_value(self, value): \"\"\" Called by :meth:`.clean_values` to clean a single value. \"\"\" return", "not need to override this, but you will use it in your template", "stored in the filter. \"\"\" new_values = list(self.values) values_to_remove = values for value", "it may be used as the ID of the first field to make", "slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You can send the slug as a", "uses multiselect. \"\"\" copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\"", "Django ORM, this will typically be a QuerySet, but for other backends such", "\"\"\" Called by :meth:`.clean_values` to clean a single value. \"\"\" return value def", "you do not want to show the label, you should specify one, and", "You can set this as a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values =", "of this filter. The base template adds this to the wrapping DIV, but", "queryobject): \"\"\" Add the current values to the given ``queryobject``. This is always", "the user can add valuess to the filter (typically via checkboxes). You should", "filter allows the user to type in a values, or if you want", "target element content. \"\"\" return pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get", "of the first field to make the label focus on the first field", "the given values for this filter to the current url. This is not", "<reponame>appressoas/cradmin_legacy<filename>cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.py from __future__ import unicode_literals import json from xml.sax.saxutils import quoteattr from django.utils.translation", "the values unchanged, but you will typically want to override this if your", "values): \"\"\" Get the URL that adds this filter with the given values", "from a string into something that makes sense for your :meth:`.filter`. If you", "the angularjs directive for the filter to send options into the directive:: <someelement", "the label will be styled to only make it visible to screenreaders. This", "such as a dict. Returns: An object of the same type as the", "given list of ``values`` from the values currently stored in the filter. \"\"\"", "Get the DOM ID of the label for this filter. \"\"\" return '{}_label'.format(self.get_dom_id())", "for components of a filter (E.g.: Field ID to attach a labels to", "that we do not include any ``\"`` or ``'`` around the directives HTML", "slug: You can send the slug as a parameter, or override :meth:`.get_slug`. label:", "raise NotImplementedError('You must override get_slug(), or send a slug to __init__().') def get_label(self):", "send the label as a parameter, or override :meth:`.get_label`. label_is_screenreader_only: You can set", "return value def get_cleaned_values(self): \"\"\" Clean the values, to prepare them for usage", "usage in :meth:`.filter`. Defaults to returning the values unchanged, but you will typically", "filter. This is typically set as the ``<label>`` for the filter input field.", "in :meth:`.filter`. Defaults to returning the values unchanged, but you will typically want", "Change the current values stored in the filter to the given ``values``. \"\"\"", "this defaults to the return value of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\"", "filter to the given ``values``. \"\"\" self.values = values def clear_values(self): \"\"\" Clear", "NotImplementedError('You must override get_slug(), or send a slug to __init__().') def get_label(self): \"\"\"", "self.slug copy.label = self.label return copy def get_slug(self): \"\"\" Get the slug for", ":meth:`.filter` if you expect a single value. \"\"\" clean_values = self.get_cleaned_values() if len(clean_values)", "cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for all filters. \"\"\"", "from the current url. You should not need to override this, but you", "them for usage in :meth:`.filter`. Defaults to returning the values unchanged, but you", "= self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label = self.label return copy def get_slug(self):", "should call ``super`` to get the default options. \"\"\" return { 'loadingmessage': self.get_loadingmessage()", "Parameters: slug: You can send the slug as a parameter, or override :meth:`.get_slug`.", "use this with the angularjs directive for the filter to send options into", "filter. If your users can change their language, this should not be translatable", "a labels to a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\"", "all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters:", "Returns a json encoded and HTML attribute quoted version of :meth:`.get_angularjs_options_dict`. You use", "something that makes sense for your :meth:`.filter`. If you want validation, you should", ":class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self,", "you expect a single value. \"\"\" clean_values = self.get_cleaned_values() if len(clean_values) > 0:", "render urls for choices if your filter uses multiselect. \"\"\" copy = self.copy()", "but highly recommended. Even if you do not want to show the label,", ":meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug = slug self.label = label self.label_is_screenreader_only =", "the query backend. If you are filtering against the Django ORM, this will", "other backends such as ElasticSearch, MongoDB, etc. this can be something completely different", "likely not add anything to the queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value) for", "build_remove_values_url(self, values): \"\"\" Get the URL that removes the given values for this", "used as the ID of the first field to make the label focus", "import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for all filters. \"\"\" template_name", "type of the queryobject depends on the query backend. If you are filtering", "in :meth:`.filter`). \"\"\" return [self.clean_value(value) for value in self.values] def get_cleaned_value(self): \"\"\" Returns", "of the label for this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get", "hide it from everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def", "should not need to override this, but you will use it in your", "but you will typically want to override this if your filter allows the", "parameter, or override :meth:`.get_label`. label_is_screenreader_only: You can set this as a parameter, or", "of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default()", "ID of this filter. The base template adds this to the wrapping DIV,", "be used, or it may be used as the ID of the first", "stored in the filter. \"\"\" self.values = [] def add_values(self, values): \"\"\" Add", ":meth:`.filter`). \"\"\" return [self.clean_value(value) for value in self.values] def get_cleaned_value(self): \"\"\" Returns the", "pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for all", "} def get_angularjs_options_json(self): \"\"\" Returns a json encoded and HTML attribute quoted version", "in a values, or if you want to convert the values from a", "would make an URL unusable by a user with a different language (if", "to the current url. This is not the same as :meth:`.build_set_values_url`. This method", "a copy of this filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug", "for multiselect filters where the user can add/remove valuess to the filter (typically", "multiselect filters where the user can add/remove valuess to the filter (typically via", "return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the DOM id of the target of", "``label_is_screenreader_only`` is ``None``, this defaults to the return value of the ``get_label_is_screenreader_only_by_default()`` method", "If you are filtering against the Django ORM, this will typically be a", "``\"`` or ``'`` around the directives HTML attribute - that is included by", "self.slug = slug self.label = label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self):", "from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for all filters.", "\"\"\" Returns a json encoded and HTML attribute quoted version of :meth:`.get_angularjs_options_dict`. You", "an URL with another user). \"\"\" if self.slug: return self.slug else: raise NotImplementedError('You", "this should not be translatable since that would make an URL unusable by", "self.values = [] def add_values(self, values): \"\"\" Add the given list of ``values``", "field to make the label focus on the first field when it is", "user). \"\"\" if self.slug: return self.slug else: raise NotImplementedError('You must override get_slug(), or", "the filter input field. A label is optional, but highly recommended. Even if", "You can send the slug as a parameter, or override :meth:`.get_slug`. label: You", "filter (and most likely not add anything to the queryobject in :meth:`.filter`). \"\"\"", "you should call ``super`` to get the default options. \"\"\" return { 'loadingmessage':", "such as ElasticSearch, MongoDB, etc. this can be something completely different such as", "are filtering against the Django ORM, this will typically be a QuerySet, but", "default options. \"\"\" return { 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns a", "or it may be used as the ID of the first field to", "any ``\"`` or ``'`` around the directives HTML attribute - that is included", "queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value) for value in self.values] def get_cleaned_value(self): \"\"\"", "= self.get_cleaned_values() if len(clean_values) > 0: return clean_values[0] else: return None def build_set_values_url(self,", "the values currently stored in the filter. \"\"\" new_values = list(self.values) values_to_remove =", "this filter. The base template adds this to the wrapping DIV, but you", "\"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label = self.label return copy", "\"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\" Get the DOM ID of this filter.", "same as :meth:`.build_set_values_url`. This method is for multiselect filters where the user can", "clean_values = self.get_cleaned_values() if len(clean_values) > 0: return clean_values[0] else: return None def", "copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get the URL that removes the", "a values, or if you want to convert the values from a string", "in your template context to render urls for choices if your filter is", "expect a single value. \"\"\" clean_values = self.get_cleaned_values() if len(clean_values) > 0: return", "a label since that would break accessibility. Defaults to the value of the", "field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get the DOM ID of", "fields, this will most likely not be used, or it may be used", "to the given ``values``. \"\"\" self.values = values def clear_values(self): \"\"\" Clear the", "a slug to __init__().') def get_label(self): \"\"\" Get the label of the filter.", "a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get the DOM", "not be used, or it may be used as the ID of the", "your filter uses multiselect. \"\"\" copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self):", "copy.slug = self.slug copy.label = self.label return copy def get_slug(self): \"\"\" Get the", "def get_dom_id(self): \"\"\" Get the DOM ID of this filter. The base template", "select filter. \"\"\" copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get", "self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label = self.label return copy def get_slug(self): \"\"\"", "change their language, this should not be translatable since that would make an", "from everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\"", "this filter from the current url. You should not need to override this,", "by setting some attribute on ``self``, and handle the error in the template", "the URL that adds this filter with the given values to the current", "not include any ``\"`` or ``'`` around the directives HTML attribute - that", "the ``filter_string`` is None or empty string. Parameters: queryobject: The type of the", "to render urls for choices if your filter supports \"clear\". \"\"\" copy =", "as the ID of the first field to make the label focus on", "the filter. This is just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id()", "the ID of the first field to make the label focus on the", "uses multiple input fields, this will most likely not be used, or it", "filter (typically via checkboxes). You should not need to override this, but you", "filter to the current url. This is not the same as :meth:`.build_clear_values_url`. This", ":meth:`.filter`. If you want validation, you should handle that by setting some attribute", "= values def clear_values(self): \"\"\" Clear the current values stored in the filter.", "self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the URL that clears this", "URL that adds this filter with the given values to the current url.", "in the filter to the given ``values``. \"\"\" self.values = values def clear_values(self):", "the input field. If the filter uses multiple input fields, this will most", "build_set_values_url(self, values): \"\"\" Get the URL that adds this filter with the given", "0: return clean_values[0] else: return None def build_set_values_url(self, values): \"\"\" Get the URL", "of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def", "etc. this can be something completely different such as a dict. Returns: An", "typically want to override this if your filter allows the user to type", "\"\"\" return [self.clean_value(value) for value in self.values] def get_cleaned_value(self): \"\"\" Returns the first", ":meth:`.get_cleaned_values`, or ``None`` if there is no values. Use this in :meth:`.filter` if", "self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add the current values", "on the query backend. If you are filtering against the Django ORM, this", "values_to_remove = values for value in new_values: if value in values_to_remove: new_values.remove(value) self.values", "self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns a copy of this", "import json from xml.sax.saxutils import quoteattr from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import", "values): \"\"\" Change the current values stored in the filter to the given", "filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the DOM id of the", "If ``label_is_screenreader_only`` is ``None``, this defaults to the return value of the ``get_label_is_screenreader_only_by_default()``", "return self.slug else: raise NotImplementedError('You must override get_slug(), or send a slug to", "multiselect filters where the user can add valuess to the filter (typically via", "template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You can", "this filter. The slug is used in the URL to identify the filter.", "options dict. You can override this in your filters, but you should call", "send options into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we do", "to clean a single value. \"\"\" return value def get_cleaned_values(self): \"\"\" Clean the", "include any ``\"`` or ``'`` around the directives HTML attribute - that is", "directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we do not include any ``\"``", "given ``queryobject``. This is always called unless the ``filter_string`` is None or empty", "Returns: An object of the same type as the given ``queryobject``. \"\"\" raise", "given values to the current url. You should not need to override this,", "the same as :meth:`.build_clear_values_url`. This method is for multiselect filters where the user", "add_values(self, values): \"\"\" Add the given list of ``values`` to the values currently", "values to the current url. You should not need to override this, but", ":meth:`.filter`. Defaults to returning the values unchanged, but you will typically want to", "given values for this filter to the current url. This is not the", "will use it in your template context to render urls for choices if", "this with the angularjs directive for the filter to send options into the", "values, or if you want to convert the values from a string into", "of the filter. This is just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return", "Notice that we do not include any ``\"`` or ``'`` around the directives", "the filter. \"\"\" self.values = [] def add_values(self, values): \"\"\" Add the given", "directives HTML attribute - that is included by this method. \"\"\" return quoteattr(json.dumps(self.get_angularjs_options_dict()))", "to show when the filter loads the updated target element content. \"\"\" return", "\"\"\" Returns a copy of this filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug", "in your template context to render urls for choices if your filter supports", "this if you need DOM IDs for components of a filter (E.g.: Field", "\"\"\" Get the DOM id of the input field. If the filter uses", "If the filter uses multiple input fields, this will most likely not be", "filter loads the updated target element content. \"\"\" return pgettext('listfilter loading message', 'Loading')", "filter is a single select filter. \"\"\" copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)", "return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the URL that clears this filter from", "we do not include any ``\"`` or ``'`` around the directives HTML attribute", "visible to screenreaders. This is recommended over simply not setting a label since", "override this in your filters, but you should call ``super`` to get the", "return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add the current", "quoteattr from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract", "the label for this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the", "the filter. \"\"\" self.values += values def remove_values(self, values): \"\"\" Remove the given", "input fields, this will most likely not be used, or it may be", "will most likely not be used, or it may be used as the", "label, you should specify one, and hide it from everyone except for screenreaders", "return value of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None:", "values currently stored in the filter. \"\"\" new_values = list(self.values) values_to_remove = values", "values for value in new_values: if value in values_to_remove: new_values.remove(value) self.values = new_values", "when it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the DOM", "a string into something that makes sense for your :meth:`.filter`. If you want", "do not include any ``\"`` or ``'`` around the directives HTML attribute -", "set_values(self, values): \"\"\" Change the current values stored in the filter to the", "a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the", "single select filter. \"\"\" copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\"", "else: raise NotImplementedError('You must override get_slug(), or send a slug to __init__().') def", "get_label_is_screenreader_only(self): \"\"\" If this returns ``True``, the label will be styled to only", "if your filter allows the user to type in a values, or if", "build_add_values_url(self, values): \"\"\" Get the URL that adds the given values for this", "this filter to the current url. This is not the same as :meth:`.build_clear_values_url`.", "The base template adds this to the wrapping DIV, but you can also", "= 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You can send", "method is for multiselect filters where the user can add valuess to the", "values for this filter to the current url. This is not the same", "``values`` to the values currently stored in the filter. \"\"\" self.values += values", "get_target_dom_id(self): \"\"\" Get the DOM id of the target of the filter. This", "directive for the filter to send options into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe", "context to render urls for choices if your filter is a single select", "if you do not want to show the label, you should specify one,", "depends on the query backend. If you are filtering against the Django ORM,", "\"\"\" if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self, values):", "and HTML attribute quoted version of :meth:`.get_angularjs_options_dict`. You use this with the angularjs", "would break accessibility. Defaults to the value of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`).", "Get the URL that clears this filter from the current url. You should", "can also use this if you need DOM IDs for components of a", "there is no values. Use this in :meth:`.filter` if you expect a single", "form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get the DOM ID", "copy = self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label = self.label return copy def", "self.get_slug()) def get_label_dom_id(self): \"\"\" Get the DOM ID of the label for this", "DOM id of the target of the filter. This is just a shortcut", "the URL that clears this filter from the current url. You should not", "multiselect. \"\"\" copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def", "choices if your filter uses multiselect. \"\"\" copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)", "Abstract base class for all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None,", "Defaults to the value of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is", "this returns ``True``, the label will be styled to only make it visible", "\"\"\" Clean the values, to prepare them for usage in :meth:`.filter`. Defaults to", "handle that by setting some attribute on ``self``, and handle the error in", "and handle the error in the template rendering the filter (and most likely", "not the same as :meth:`.build_clear_values_url`. This method is for multiselect filters where the", "empty string. Parameters: queryobject: The type of the queryobject depends on the query", "a QuerySet, but for other backends such as ElasticSearch, MongoDB, etc. this can", "\"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get the DOM ID of the", "show when the filter loads the updated target element content. \"\"\" return pgettext('listfilter", "\"\"\" Change the current values stored in the filter to the given ``values``.", "def clean_value(self, value): \"\"\" Called by :meth:`.clean_values` to clean a single value. \"\"\"", "simply not setting a label since that would break accessibility. Defaults to the", "import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for", "Defaults to returning the values unchanged, but you will typically want to override", "multiselect. \"\"\" copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get", "This is always called unless the ``filter_string`` is None or empty string. Parameters:", "into something that makes sense for your :meth:`.filter`. If you want validation, you", "typically be a QuerySet, but for other backends such as ElasticSearch, MongoDB, etc.", "copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get the URL", "it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the DOM id", "need to override this, but you will use it in your template context", "return self.label def get_label_is_screenreader_only(self): \"\"\" If this returns ``True``, the label will be", "template context to render urls for choices if your filter supports \"clear\". \"\"\"", "the current url. This is not the same as :meth:`.build_clear_values_url`. This method is", "can add valuess to the filter (typically via checkboxes). You should not need", "this can be something completely different such as a dict. Returns: An object", "your filter supports \"clear\". \"\"\" copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self,", "label focus on the first field when it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id())", "``values`` from the values currently stored in the filter. \"\"\" new_values = list(self.values)", "call ``super`` to get the default options. \"\"\" return { 'loadingmessage': self.get_loadingmessage() }", "as a parameter, or override :meth:`.get_label`. label_is_screenreader_only: You can set this as a", "to screenreaders. This is recommended over simply not setting a label since that", "your template context to render urls for choices if your filter is a", "your users can change their language, this should not be translatable since that", "[] def add_values(self, values): \"\"\" Add the given list of ``values`` to the", "if your filter uses multiselect. \"\"\" copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def", "not setting a label since that would break accessibility. Defaults to the value", "\"\"\" self.values = [] def add_values(self, values): \"\"\" Add the given list of", "something completely different such as a dict. Returns: An object of the same", "the DOM id of the target of the filter. This is just a", "\"\"\" return { 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns a json encoded", "some attribute on ``self``, and handle the error in the template rendering the", "\"\"\" self.values += values def remove_values(self, values): \"\"\" Remove the given list of", "of ``values`` to the values currently stored in the filter. \"\"\" self.values +=", "is for multiselect filters where the user can add/remove valuess to the filter", "dict. Returns: An object of the same type as the given ``queryobject``. \"\"\"", "the target of the filter. This is just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`.", "DOM IDs for components of a filter (E.g.: Field ID to attach a", "}}> Notice that we do not include any ``\"`` or ``'`` around the", "DIV, but you can also use this if you need DOM IDs for", "the values from a string into something that makes sense for your :meth:`.filter`.", "of the filter. This is typically set as the ``<label>`` for the filter", "values, to prepare them for usage in :meth:`.filter`. Defaults to returning the values", "value. \"\"\" clean_values = self.get_cleaned_values() if len(clean_values) > 0: return clean_values[0] else: return", "from __future__ import unicode_literals import json from xml.sax.saxutils import quoteattr from django.utils.translation import", "your :meth:`.filter`. If you want validation, you should handle that by setting some", "to the queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value) for value in self.values] def", "show the label, you should specify one, and hide it from everyone except", "filter. \"\"\" new_values = list(self.values) values_to_remove = values for value in new_values: if", "len(clean_values) > 0: return clean_values[0] else: return None def build_set_values_url(self, values): \"\"\" Get", "this, but you will use it in your template context to render urls", "values): \"\"\" Get the URL that removes the given values for this filter", "you are filtering against the Django ORM, this will typically be a QuerySet,", "object of the same type as the given ``queryobject``. \"\"\" raise NotImplementedError() def", "just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get", "styled to only make it visible to screenreaders. This is recommended over simply", "user with a different language (if a user shares an URL with another", "you want validation, you should handle that by setting some attribute on ``self``,", "on the first field when it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self):", "\"\"\" Get the URL that adds this filter with the given values to", "field when it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the", "current values stored in the filter. \"\"\" self.values = [] def add_values(self, values):", "or empty string. Parameters: queryobject: The type of the queryobject depends on the", "target of the filter. This is just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\"", "filter input field. A label is optional, but highly recommended. Even if you", "label as a parameter, or override :meth:`.get_label`. label_is_screenreader_only: You can set this as", "self.values] def get_cleaned_value(self): \"\"\" Returns the first value returned by :meth:`.get_cleaned_values`, or ``None``", "but you can also use this if you need DOM IDs for components", "if you want to convert the values from a string into something that", "method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only", "urls for choices if your filter uses multiselect. \"\"\" copy = self.copy() copy.remove_values(values)", "This is not the same as :meth:`.build_clear_values_url`. This method is for multiselect filters", "def build_remove_values_url(self, values): \"\"\" Get the URL that removes the given values for", "value in values_to_remove: new_values.remove(value) self.values = new_values def clean_value(self, value): \"\"\" Called by", "optional, but highly recommended. Even if you do not want to show the", "this filter with the given values to the current url. You should not", "'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs directive options dict. You can override this", "self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns a json encoded and HTML attribute quoted", "copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add the", "can change their language, this should not be translatable since that would make", "of a filter (E.g.: Field ID to attach a labels to a form", "a different language (if a user shares an URL with another user). \"\"\"", "base class for all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None,", "ID to attach a labels to a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug())", "id of the input field. If the filter uses multiple input fields, this", "\"\"\" Add the given list of ``values`` to the values currently stored in", "version of :meth:`.get_angularjs_options_dict`. You use this with the angularjs directive for the filter", "options into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we do not", "self.get_cleaned_values() if len(clean_values) > 0: return clean_values[0] else: return None def build_set_values_url(self, values):", "override get_slug(), or send a slug to __init__().') def get_label(self): \"\"\" Get the", "slug as a parameter, or override :meth:`.get_slug`. label: You can send the label", "adds this to the wrapping DIV, but you can also use this if", "the filter to the given ``values``. \"\"\" self.values = values def clear_values(self): \"\"\"", "the URL that removes the given values for this filter to the current", "except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\" If this", "\"\"\" clean_values = self.get_cleaned_values() if len(clean_values) > 0: return clean_values[0] else: return None", "the first field when it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\"", "user to type in a values, or if you want to convert the", "by a user with a different language (if a user shares an URL", "\"\"\" Get the URL that clears this filter from the current url. You", "get_cleaned_value(self): \"\"\" Returns the first value returned by :meth:`.get_cleaned_values`, or ``None`` if there", "def filter(self, queryobject): \"\"\" Add the current values to the given ``queryobject``. This", "removes the given values for this filter to the current url. This is", "identify the filter. If your users can change their language, this should not", "self.slug else: raise NotImplementedError('You must override get_slug(), or send a slug to __init__().')", "this as a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug =", "around the directives HTML attribute - that is included by this method. \"\"\"", "self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get the URL that removes", "for choices if your filter uses multiselect. \"\"\" copy = self.copy() copy.remove_values(values) return", "is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the DOM id of", "allows the user to type in a values, or if you want to", "loads the updated target element content. \"\"\" return pgettext('listfilter loading message', 'Loading') def", "\"\"\" Add the current values to the given ``queryobject``. This is always called", "value returned by :meth:`.get_cleaned_values`, or ``None`` if there is no values. Use this", "['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add the current values to the given ``queryobject``.", "the label focus on the first field when it is clicked. \"\"\" return", "the filter loads the updated target element content. \"\"\" return pgettext('listfilter loading message',", "render urls for choices if your filter is a single select filter. \"\"\"", "this to the wrapping DIV, but you can also use this if you", "not be translatable since that would make an URL unusable by a user", "your template context to render urls for choices if your filter uses multiselect.", "to prepare them for usage in :meth:`.filter`. Defaults to returning the values unchanged,", "json encoded and HTML attribute quoted version of :meth:`.get_angularjs_options_dict`. You use this with", "clear_values(self): \"\"\" Clear the current values stored in the filter. \"\"\" self.values =", "self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get the URL that adds", "This is not the same as :meth:`.build_set_values_url`. This method is for multiselect filters", "clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the DOM id of the", "you want to convert the values from a string into something that makes", "value in self.values] def get_cleaned_value(self): \"\"\" Returns the first value returned by :meth:`.get_cleaned_values`,", "label_is_screenreader_only: You can set this as a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values", "Get the slug for this filter. The slug is used in the URL", "recommended over simply not setting a label since that would break accessibility. Defaults", "handle the error in the template rendering the filter (and most likely not", "filters where the user can add valuess to the filter (typically via checkboxes).", "Get the DOM ID of this filter. The base template adds this to", "you need DOM IDs for components of a filter (E.g.: Field ID to", "for choices if your filter supports \"clear\". \"\"\" copy = self.copy() copy.clear_values() return", "def get_label(self): \"\"\" Get the label of the filter. This is typically set", "to render urls for choices if your filter is a single select filter.", "from xml.sax.saxutils import quoteattr from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class", "current values stored in the filter to the given ``values``. \"\"\" self.values =", "is just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\"", "\"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You", "self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get the URL that removes the given values", "in the URL to identify the filter. If your users can change their", "error in the template rendering the filter (and most likely not add anything", "urls for choices if your filter supports \"clear\". \"\"\" copy = self.copy() copy.clear_values()", "will be styled to only make it visible to screenreaders. This is recommended", "filter(self, queryobject): \"\"\" Add the current values to the given ``queryobject``. This is", "get_angularjs_options_dict(self): \"\"\" Get angularjs directive options dict. You can override this in your", "(if a user shares an URL with another user). \"\"\" if self.slug: return", "\"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\" If this returns ``True``, the label will", "queryobject: The type of the queryobject depends on the query backend. If you", "of the input field. If the filter uses multiple input fields, this will", "else: return None def build_set_values_url(self, values): \"\"\" Get the URL that adds this", "loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs directive options dict. You can", "pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs directive options dict. You", "likely not be used, or it may be used as the ID of", "\"\"\" return pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs directive options", "as a dict. Returns: An object of the same type as the given", "filter (E.g.: Field ID to attach a labels to a form field). \"\"\"", "\"\"\" Get the label of the filter. This is typically set as the", "angularjs directive for the filter to send options into the directive:: <someelement my-filter-directive={{", "\"\"\" self.values = values def clear_values(self): \"\"\" Clear the current values stored in", "is optional, but highly recommended. Even if you do not want to show", "current url. You should not need to override this, but you will use", "copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the URL that", ":meth:`.clean_values` to clean a single value. \"\"\" return value def get_cleaned_values(self): \"\"\" Clean", "prepare them for usage in :meth:`.filter`. Defaults to returning the values unchanged, but", "__init__().') def get_label(self): \"\"\" Get the label of the filter. This is typically", "MongoDB, etc. this can be something completely different such as a dict. Returns:", "a single value. \"\"\" clean_values = self.get_cleaned_values() if len(clean_values) > 0: return clean_values[0]", "be something completely different such as a dict. Returns: An object of the", "values): \"\"\" Get the URL that adds the given values for this filter", "an URL unusable by a user with a different language (if a user", "filter from the current url. You should not need to override this, but", "override :meth:`.get_slug`. label: You can send the label as a parameter, or override", "def copy(self): \"\"\" Returns a copy of this filter. \"\"\" copy = self.__class__()", "URL to identify the filter. If your users can change their language, this", "``None``, this defaults to the return value of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`.", "override this if your filter allows the user to type in a values,", "\"\"\" self.values = [] self.slug = slug self.label = label self.label_is_screenreader_only = label_is_screenreader_only", "(see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this defaults to the return value of", "valuess to the filter (typically via checkboxes). You should not need to override", "the values currently stored in the filter. \"\"\" self.values += values def remove_values(self,", "from the values currently stored in the filter. \"\"\" new_values = list(self.values) values_to_remove", "always called unless the ``filter_string`` is None or empty string. Parameters: queryobject: The", "Get the URL that removes the given values for this filter to the", "set as the ``<label>`` for the filter input field. A label is optional,", "'{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the DOM id of the target of the", "defaults to the return value of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if", "it visible to screenreaders. This is recommended over simply not setting a label", "the filter uses multiple input fields, this will most likely not be used,", "when the filter loads the updated target element content. \"\"\" return pgettext('listfilter loading", "user shares an URL with another user). \"\"\" if self.slug: return self.slug else:", "you should specify one, and hide it from everyone except for screenreaders with", "the given values to the current url. You should not need to override", "by :meth:`.clean_values` to clean a single value. \"\"\" return value def get_cleaned_values(self): \"\"\"", "to returning the values unchanged, but you will typically want to override this", "given list of ``values`` to the values currently stored in the filter. \"\"\"", "to make the label focus on the first field when it is clicked.", "a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug = slug self.label", "stored in the filter. \"\"\" self.values += values def remove_values(self, values): \"\"\" Remove", "first value returned by :meth:`.get_cleaned_values`, or ``None`` if there is no values. Use", "for this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the DOM id", "the template rendering the filter (and most likely not add anything to the", "it in your template context to render urls for choices if your filter", "of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this defaults to", "for choices if your filter is a single select filter. \"\"\" copy =", "the DOM ID of this filter. The base template adds this to the", "HTML attribute quoted version of :meth:`.get_angularjs_options_dict`. You use this with the angularjs directive", "but you should call ``super`` to get the default options. \"\"\" return {", "to the filter (typically via checkboxes). You should not need to override this,", "this will typically be a QuerySet, but for other backends such as ElasticSearch,", "backend. If you are filtering against the Django ORM, this will typically be", "must override get_slug(), or send a slug to __init__().') def get_label(self): \"\"\" Get", "None or empty string. Parameters: queryobject: The type of the queryobject depends on", "the current values stored in the filter to the given ``values``. \"\"\" self.values", "value. \"\"\" return value def get_cleaned_values(self): \"\"\" Clean the values, to prepare them", "get_loadingmessage(self): \"\"\" Get the loading message to show when the filter loads the", ":meth:`.get_label`. label_is_screenreader_only: You can set this as a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\"", "a user shares an URL with another user). \"\"\" if self.slug: return self.slug", "is a single select filter. \"\"\" copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def", "get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add the current values to the", "def build_add_values_url(self, values): \"\"\" Get the URL that adds the given values for", "= [] def add_values(self, values): \"\"\" Add the given list of ``values`` to", "for multiselect filters where the user can add valuess to the filter (typically", "your filter uses multiselect. \"\"\" copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self,", "return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self, values): \"\"\" Change the current values", "the filter. \"\"\" new_values = list(self.values) values_to_remove = values for value in new_values:", "``<label>`` for the filter input field. A label is optional, but highly recommended.", "makes sense for your :meth:`.filter`. If you want validation, you should handle that", "sense for your :meth:`.filter`. If you want validation, you should handle that by", "different such as a dict. Returns: An object of the same type as", "value of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this defaults", "the current values stored in the filter. \"\"\" self.values = [] def add_values(self,", "self.values += values def remove_values(self, values): \"\"\" Remove the given list of ``values``", "else: return self.label_is_screenreader_only def set_values(self, values): \"\"\" Change the current values stored in", "filters where the user can add/remove valuess to the filter (typically via checkboxes).", "ElasticSearch, MongoDB, etc. this can be something completely different such as a dict.", "= values for value in new_values: if value in values_to_remove: new_values.remove(value) self.values =", "choices if your filter uses multiselect. \"\"\" copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)", "= self.label return copy def get_slug(self): \"\"\" Get the slug for this filter.", "with the given values to the current url. You should not need to", "value): \"\"\" Called by :meth:`.clean_values` to clean a single value. \"\"\" return value", "get the default options. \"\"\" return { 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\"", "the URL that adds the given values for this filter to the current", "\"\"\" Get the URL that removes the given values for this filter to", "[] self.slug = slug self.label = label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def", "ID of the first field to make the label focus on the first", "specify one, and hide it from everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\"", "the given list of ``values`` from the values currently stored in the filter.", "override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug = slug self.label = label self.label_is_screenreader_only", "string into something that makes sense for your :meth:`.filter`. If you want validation,", "Get the URL that adds the given values for this filter to the", "or ``'`` around the directives HTML attribute - that is included by this", "currently stored in the filter. \"\"\" self.values += values def remove_values(self, values): \"\"\"", "add anything to the queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value) for value in", "break accessibility. Defaults to the value of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If", "different language (if a user shares an URL with another user). \"\"\" if", "of ``values`` from the values currently stored in the filter. \"\"\" new_values =", "adds the given values for this filter to the current url. This is", "filter to the current url. This is not the same as :meth:`.build_set_values_url`. This", "self.values = new_values def clean_value(self, value): \"\"\" Called by :meth:`.clean_values` to clean a", "= self.slug copy.label = self.label return copy def get_slug(self): \"\"\" Get the slug", "make an URL unusable by a user with a different language (if a", "(E.g.: Field ID to attach a labels to a form field). \"\"\" return", "to type in a values, or if you want to convert the values", "in the filter. \"\"\" self.values += values def remove_values(self, values): \"\"\" Remove the", "the same as :meth:`.build_set_values_url`. This method is for multiselect filters where the user", "filtering against the Django ORM, this will typically be a QuerySet, but for", "get_slug(), or send a slug to __init__().') def get_label(self): \"\"\" Get the label", "returning the values unchanged, but you will typically want to override this if", "def remove_values(self, values): \"\"\" Remove the given list of ``values`` from the values", "unusable by a user with a different language (if a user shares an", "the wrapping DIV, but you can also use this if you need DOM", "label will be styled to only make it visible to screenreaders. This is", "return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the loading message to show when the", "the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we do not include any", "the first field to make the label focus on the first field when", "the same type as the given ``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\"", ":meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\" If this returns ``True``, the label", "a parameter, or override :meth:`.get_label`. label_is_screenreader_only: You can set this as a parameter,", "values unchanged, but you will typically want to override this if your filter", "language, this should not be translatable since that would make an URL unusable", "everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\" If", "to a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get the", "DOM id of the input field. If the filter uses multiple input fields,", "\"\"\" copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get the", "filter. The base template adds this to the wrapping DIV, but you can", "to override this, but you will use it in your template context to", "encoded and HTML attribute quoted version of :meth:`.get_angularjs_options_dict`. You use this with the", "\"\"\" Get the loading message to show when the filter loads the updated", "your filters, but you should call ``super`` to get the default options. \"\"\"", "\"\"\" if self.slug: return self.slug else: raise NotImplementedError('You must override get_slug(), or send", "in values_to_remove: new_values.remove(value) self.values = new_values def clean_value(self, value): \"\"\" Called by :meth:`.clean_values`", "use this if you need DOM IDs for components of a filter (E.g.:", "values def remove_values(self, values): \"\"\" Remove the given list of ``values`` from the", "copy def get_slug(self): \"\"\" Get the slug for this filter. The slug is", "or if you want to convert the values from a string into something", "This is recommended over simply not setting a label since that would break", "via checkboxes). You should not need to override this, but you will use", "\"\"\" Get angularjs directive options dict. You can override this in your filters,", "template rendering the filter (and most likely not add anything to the queryobject", "to the current url. This is not the same as :meth:`.build_clear_values_url`. This method", "return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add the current values to the given", "for your :meth:`.filter`. If you want validation, you should handle that by setting", "want to convert the values from a string into something that makes sense", "new_values def clean_value(self, value): \"\"\" Called by :meth:`.clean_values` to clean a single value.", "return clean_values[0] else: return None def build_set_values_url(self, values): \"\"\" Get the URL that", "filter supports \"clear\". \"\"\" copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values):", "the current values to the given ``queryobject``. This is always called unless the", "\"\"\" Get the DOM ID of this filter. The base template adds this", "filter to send options into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that", "a parameter, or override :meth:`.get_slug`. label: You can send the label as a", "is for multiselect filters where the user can add valuess to the filter", "filter with the given values to the current url. You should not need", "\"\"\" Returns the first value returned by :meth:`.get_cleaned_values`, or ``None`` if there is", "for choices if your filter uses multiselect. \"\"\" copy = self.copy() copy.add_values(values) return", "values): \"\"\" Add the given list of ``values`` to the values currently stored", "label: You can send the label as a parameter, or override :meth:`.get_label`. label_is_screenreader_only:", "to the wrapping DIV, but you can also use this if you need", "convert the values from a string into something that makes sense for your", "is recommended over simply not setting a label since that would break accessibility.", "if you need DOM IDs for components of a filter (E.g.: Field ID", "you should handle that by setting some attribute on ``self``, and handle the", "\"\"\" Remove the given list of ``values`` from the values currently stored in", "not add anything to the queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value) for value", "template context to render urls for choices if your filter uses multiselect. \"\"\"", "the slug as a parameter, or override :meth:`.get_slug`. label: You can send the", "\"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the DOM id of the input", "to the values currently stored in the filter. \"\"\" self.values += values def", "self.label = label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns a", "shares an URL with another user). \"\"\" if self.slug: return self.slug else: raise", "return [self.clean_value(value) for value in self.values] def get_cleaned_value(self): \"\"\" Returns the first value", "Parameters: queryobject: The type of the queryobject depends on the query backend. If", "get_label_dom_id(self): \"\"\" Get the DOM ID of the label for this filter. \"\"\"", "the values, to prepare them for usage in :meth:`.filter`. Defaults to returning the", "the DOM ID of the label for this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def", "def get_angularjs_options_dict(self): \"\"\" Get angularjs directive options dict. You can override this in", "urls for choices if your filter uses multiselect. \"\"\" copy = self.copy() copy.add_values(values)", "since that would break accessibility. Defaults to the value of the ``label_is_screenreader_only`` parameter", "return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get the URL that removes the given", "to the given ``queryobject``. This is always called unless the ``filter_string`` is None", "'{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get the DOM ID of the label for", "in the template rendering the filter (and most likely not add anything to", "add valuess to the filter (typically via checkboxes). You should not need to", "``super`` to get the default options. \"\"\" return { 'loadingmessage': self.get_loadingmessage() } def", "most likely not be used, or it may be used as the ID", "for usage in :meth:`.filter`. Defaults to returning the values unchanged, but you will", "but for other backends such as ElasticSearch, MongoDB, etc. this can be something", "is ``None``, this defaults to the return value of the ``get_label_is_screenreader_only_by_default()`` method of", "this filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label = self.label", "another user). \"\"\" if self.slug: return self.slug else: raise NotImplementedError('You must override get_slug(),", "= new_values def clean_value(self, value): \"\"\" Called by :meth:`.clean_values` to clean a single", "self.label return copy def get_slug(self): \"\"\" Get the slug for this filter. The", "def get_label_is_screenreader_only(self): \"\"\" If this returns ``True``, the label will be styled to", "get_slug(self): \"\"\" Get the slug for this filter. The slug is used in", "\"\"\" Clear the current values stored in the filter. \"\"\" self.values = []", "values stored in the filter. \"\"\" self.values = [] def add_values(self, values): \"\"\"", "slug self.label = label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns", "slug is used in the URL to identify the filter. If your users", "new_values.remove(value) self.values = new_values def clean_value(self, value): \"\"\" Called by :meth:`.clean_values` to clean", "the DOM id of the input field. If the filter uses multiple input", "message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs directive options dict. You can override", "json from xml.sax.saxutils import quoteattr from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild", "with another user). \"\"\" if self.slug: return self.slug else: raise NotImplementedError('You must override", "currently stored in the filter. \"\"\" new_values = list(self.values) values_to_remove = values for", ":meth:`.get_angularjs_options_dict`. You use this with the angularjs directive for the filter to send", "checkboxes). You should not need to override this, but you will use it", "filters, but you should call ``super`` to get the default options. \"\"\" return", "copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the URL that clears this filter", "this filter to the current url. This is not the same as :meth:`.build_set_values_url`.", "your template context to render urls for choices if your filter supports \"clear\".", "supports \"clear\". \"\"\" copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\"", "input field. A label is optional, but highly recommended. Even if you do", "label for this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the DOM", "options. \"\"\" return { 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns a json", "Returns the first value returned by :meth:`.get_cleaned_values`, or ``None`` if there is no", "new_values = list(self.values) values_to_remove = values for value in new_values: if value in", "raise NotImplementedError() def get_dom_id(self): \"\"\" Get the DOM ID of this filter. The", "user can add valuess to the filter (typically via checkboxes). You should not", "as the ``<label>`` for the filter input field. A label is optional, but", "the filter (and most likely not add anything to the queryobject in :meth:`.filter`).", "Field ID to attach a labels to a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(),", "first field to make the label focus on the first field when it", "Get the DOM id of the target of the filter. This is just", "\"\"\" If this returns ``True``, the label will be styled to only make", "highly recommended. Even if you do not want to show the label, you", "filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label = self.label return", "in the filter. \"\"\" new_values = list(self.values) values_to_remove = values for value in", "their language, this should not be translatable since that would make an URL", "``True``, the label will be styled to only make it visible to screenreaders.", "current values to the given ``queryobject``. This is always called unless the ``filter_string``", "label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns a copy of this filter. \"\"\"", "> 0: return clean_values[0] else: return None def build_set_values_url(self, values): \"\"\" Get the", "as :meth:`.build_set_values_url`. This method is for multiselect filters where the user can add", "if there is no values. Use this in :meth:`.filter` if you expect a", "is not the same as :meth:`.build_set_values_url`. This method is for multiselect filters where", "Add the given list of ``values`` to the values currently stored in the", "AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def", "values def clear_values(self): \"\"\" Clear the current values stored in the filter. \"\"\"", ":meth:`.build_clear_values_url`. This method is for multiselect filters where the user can add/remove valuess", "your filter allows the user to type in a values, or if you", "the label of the filter. This is typically set as the ``<label>`` for", "if your filter uses multiselect. \"\"\" copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def", "label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You can send the slug as a parameter,", "filter. The slug is used in the URL to identify the filter. If", "[self.clean_value(value) for value in self.values] def get_cleaned_value(self): \"\"\" Returns the first value returned", "\"\"\" Get the slug for this filter. The slug is used in the", "get_cleaned_values(self): \"\"\" Clean the values, to prepare them for usage in :meth:`.filter`. Defaults", "will typically want to override this if your filter allows the user to", "this if your filter allows the user to type in a values, or", "label is optional, but highly recommended. Even if you do not want to", "URL that clears this filter from the current url. You should not need", "method is for multiselect filters where the user can add/remove valuess to the", "uses multiselect. \"\"\" copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter']", "directive options dict. You can override this in your filters, but you should", "dict. You can override this in your filters, but you should call ``super``", "copy.set_values(list(self.values)) copy.slug = self.slug copy.label = self.label return copy def get_slug(self): \"\"\" Get", ":meth:`.build_set_values_url`. This method is for multiselect filters where the user can add valuess", "called unless the ``filter_string`` is None or empty string. Parameters: queryobject: The type", "that clears this filter from the current url. You should not need to", "def clear_values(self): \"\"\" Clear the current values stored in the filter. \"\"\" self.values", "to override this if your filter allows the user to type in a", "filter. \"\"\" copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the", "You can override this in your filters, but you should call ``super`` to", "a filter (E.g.: Field ID to attach a labels to a form field).", "screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\" If this returns ``True``,", "values from a string into something that makes sense for your :meth:`.filter`. If", "typically set as the ``<label>`` for the filter input field. A label is", "as a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug = slug", "be styled to only make it visible to screenreaders. This is recommended over", "new_values: if value in values_to_remove: new_values.remove(value) self.values = new_values def clean_value(self, value): \"\"\"", "QuerySet, but for other backends such as ElasticSearch, MongoDB, etc. this can be", "is None or empty string. Parameters: queryobject: The type of the queryobject depends", "to the value of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``,", "override this, but you will use it in your template context to render", "unicode_literals import json from xml.sax.saxutils import quoteattr from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild", "= self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get the URL that", "def get_label_dom_id(self): \"\"\" Get the DOM ID of the label for this filter.", "self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self, values): \"\"\" Change the current values stored", "filter. \"\"\" self.values += values def remove_values(self, values): \"\"\" Remove the given list", "for the filter to send options into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}>", "unless the ``filter_string`` is None or empty string. Parameters: queryobject: The type of", "a single value. \"\"\" return value def get_cleaned_values(self): \"\"\" Clean the values, to", "of :meth:`.get_angularjs_options_dict`. You use this with the angularjs directive for the filter to", "the filter. This is typically set as the ``<label>`` for the filter input", "return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get the DOM ID of the label", "most likely not add anything to the queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value)", "copy(self): \"\"\" Returns a copy of this filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values))", "translatable since that would make an URL unusable by a user with a", "should specify one, and hide it from everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`.", "single value. \"\"\" clean_values = self.get_cleaned_values() if len(clean_values) > 0: return clean_values[0] else:", "Clear the current values stored in the filter. \"\"\" self.values = [] def", "if len(clean_values) > 0: return clean_values[0] else: return None def build_set_values_url(self, values): \"\"\"", "url. You should not need to override this, but you will use it", "urls for choices if your filter is a single select filter. \"\"\" copy", "string. Parameters: queryobject: The type of the queryobject depends on the query backend.", "def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add the current values to", "the updated target element content. \"\"\" return pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self):", "this in your filters, but you should call ``super`` to get the default", "<someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we do not include any ``\"`` or", "do not want to show the label, you should specify one, and hide", "NotImplementedError() def get_dom_id(self): \"\"\" Get the DOM ID of this filter. The base", "def add_values(self, values): \"\"\" Add the given list of ``values`` to the values", "The type of the queryobject depends on the query backend. If you are", "in self.values] def get_cleaned_value(self): \"\"\" Returns the first value returned by :meth:`.get_cleaned_values`, or", "to show the label, you should specify one, and hide it from everyone", "def get_target_dom_id(self): \"\"\" Get the DOM id of the target of the filter.", "to __init__().') def get_label(self): \"\"\" Get the label of the filter. This is", "element content. \"\"\" return pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs", "values): \"\"\" Remove the given list of ``values`` from the values currently stored", "only make it visible to screenreaders. This is recommended over simply not setting", "parameter, or override :meth:`.get_slug`. label: You can send the label as a parameter,", "the current url. You should not need to override this, but you will", "filter. \"\"\" self.values = [] def add_values(self, values): \"\"\" Add the given list", "attribute quoted version of :meth:`.get_angularjs_options_dict`. You use this with the angularjs directive for", "me.get_angularjs_options_json|safe }}> Notice that we do not include any ``\"`` or ``'`` around", "to the current url. You should not need to override this, but you", "query backend. If you are filtering against the Django ORM, this will typically", "from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base", "the user can add/remove valuess to the filter (typically via checkboxes). You should", "for value in new_values: if value in values_to_remove: new_values.remove(value) self.values = new_values def", "in the filter. \"\"\" self.values = [] def add_values(self, values): \"\"\" Add the", "be used as the ID of the first field to make the label", "same as :meth:`.build_clear_values_url`. This method is for multiselect filters where the user can", "self).__init__() def copy(self): \"\"\" Returns a copy of this filter. \"\"\" copy =", "list(self.values) values_to_remove = values for value in new_values: if value in values_to_remove: new_values.remove(value)", "attribute on ``self``, and handle the error in the template rendering the filter", "You should not need to override this, but you will use it in", "self.values = [] self.slug = slug self.label = label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter,", "template context to render urls for choices if your filter is a single", "as the given ``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\" Get the DOM", "def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You can send the slug", "in your filters, but you should call ``super`` to get the default options.", "field. If the filter uses multiple input fields, this will most likely not", "for other backends such as ElasticSearch, MongoDB, etc. this can be something completely", "the first value returned by :meth:`.get_cleaned_values`, or ``None`` if there is no values.", "parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this defaults to the return value", "single value. \"\"\" return value def get_cleaned_values(self): \"\"\" Clean the values, to prepare", "the URL to identify the filter. If your users can change their language,", "base template adds this to the wrapping DIV, but you can also use", "or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug = slug self.label = label", "= self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get the URL that", "values currently stored in the filter. \"\"\" self.values += values def remove_values(self, values):", "None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self, values): \"\"\" Change the current", "copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject):", "slug to __init__().') def get_label(self): \"\"\" Get the label of the filter. This", "can send the slug as a parameter, or override :meth:`.get_slug`. label: You can", "self.label_is_screenreader_only def set_values(self, values): \"\"\" Change the current values stored in the filter", "want to show the label, you should specify one, and hide it from", "filter uses multiselect. \"\"\" copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return", "= [] self.slug = slug self.label = label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__()", "None def build_set_values_url(self, values): \"\"\" Get the URL that adds this filter with", "values to the given ``queryobject``. This is always called unless the ``filter_string`` is", "you can also use this if you need DOM IDs for components of", "IDs for components of a filter (E.g.: Field ID to attach a labels", "\"\"\" Get the URL that adds the given values for this filter to", "your filter is a single select filter. \"\"\" copy = self.copy() copy.set_values(values) return", "input field. If the filter uses multiple input fields, this will most likely", "is always called unless the ``filter_string`` is None or empty string. Parameters: queryobject:", "the filter. If your users can change their language, this should not be", "import quoteattr from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\"", "\"\"\" Abstract base class for all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self,", "or override :meth:`.get_slug`. label: You can send the label as a parameter, or", "remove_values(self, values): \"\"\" Remove the given list of ``values`` from the values currently", "self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the URL that clears this filter from the", "This is typically set as the ``<label>`` for the filter input field. A", "\"\"\" copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the URL", "stored in the filter to the given ``values``. \"\"\" self.values = values def", "context to render urls for choices if your filter supports \"clear\". \"\"\" copy", "language (if a user shares an URL with another user). \"\"\" if self.slug:", "clears this filter from the current url. You should not need to override", "one, and hide it from everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return", "be translatable since that would make an URL unusable by a user with", "self.values = values def clear_values(self): \"\"\" Clear the current values stored in the", "that would break accessibility. Defaults to the value of the ``label_is_screenreader_only`` parameter (see", "loading message to show when the filter loads the updated target element content.", "URL that removes the given values for this filter to the current url.", "is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self, values): \"\"\" Change the", "with the angularjs directive for the filter to send options into the directive::", "The slug is used in the URL to identify the filter. If your", "with a different language (if a user shares an URL with another user).", "for value in self.values] def get_cleaned_value(self): \"\"\" Returns the first value returned by", "Use this in :meth:`.filter` if you expect a single value. \"\"\" clean_values =", "the error in the template rendering the filter (and most likely not add", "also use this if you need DOM IDs for components of a filter", "DOM ID of this filter. The base template adds this to the wrapping", "use it in your template context to render urls for choices if your", "to send options into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we", "will typically be a QuerySet, but for other backends such as ElasticSearch, MongoDB,", "as ElasticSearch, MongoDB, etc. this can be something completely different such as a", "\"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get the DOM id of the target", "not the same as :meth:`.build_set_values_url`. This method is for multiselect filters where the", "the given ``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\" Get the DOM ID", "= label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns a copy of this filter.", "that by setting some attribute on ``self``, and handle the error in the", "copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get the URL", "the queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value) for value in self.values] def get_cleaned_value(self):", "with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\" If this returns ``True``, the", "that adds the given values for this filter to the current url. This", "AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for all filters. \"\"\" template_name =", "for all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\"", "``filter_string`` is None or empty string. Parameters: queryobject: The type of the queryobject", "returned by :meth:`.get_cleaned_values`, or ``None`` if there is no values. Use this in", "return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the DOM id of the input field.", "context to render urls for choices if your filter uses multiselect. \"\"\" copy", "values. Use this in :meth:`.filter` if you expect a single value. \"\"\" clean_values", "copy.label = self.label return copy def get_slug(self): \"\"\" Get the slug for this", "no values. Use this in :meth:`.filter` if you expect a single value. \"\"\"", "not want to show the label, you should specify one, and hide it", "copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get the URL that adds the", "the filter (typically via checkboxes). You should not need to override this, but", "the label as a parameter, or override :meth:`.get_label`. label_is_screenreader_only: You can set this", "may be used as the ID of the first field to make the", "for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self): \"\"\" If this returns", "updated target element content. \"\"\" return pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\"", ":class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this defaults to the return value of the", "the given list of ``values`` to the values currently stored in the filter.", "but you will use it in your template context to render urls for", "is typically set as the ``<label>`` for the filter input field. A label", "if you expect a single value. \"\"\" clean_values = self.get_cleaned_values() if len(clean_values) >", "wrapping DIV, but you can also use this if you need DOM IDs", "def get_loadingmessage(self): \"\"\" Get the loading message to show when the filter loads", "get_angularjs_options_json(self): \"\"\" Returns a json encoded and HTML attribute quoted version of :meth:`.get_angularjs_options_dict`.", "``'`` around the directives HTML attribute - that is included by this method.", "recommended. Even if you do not want to show the label, you should", "Remove the given list of ``values`` from the values currently stored in the", "Returns a copy of this filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug =", "A label is optional, but highly recommended. Even if you do not want", "make the label focus on the first field when it is clicked. \"\"\"", "label_is_screenreader_only=None): \"\"\" Parameters: slug: You can send the slug as a parameter, or", "if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self, values): \"\"\"", "make it visible to screenreaders. This is recommended over simply not setting a", "completely different such as a dict. Returns: An object of the same type", "if self.slug: return self.slug else: raise NotImplementedError('You must override get_slug(), or send a", "setting a label since that would break accessibility. Defaults to the value of", "(and most likely not add anything to the queryobject in :meth:`.filter`). \"\"\" return", "that adds this filter with the given values to the current url. You", "return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get the URL that adds the given", "the ``<label>`` for the filter input field. A label is optional, but highly", "slug for this filter. The slug is used in the URL to identify", "this in :meth:`.filter` if you expect a single value. \"\"\" clean_values = self.get_cleaned_values()", "can add/remove valuess to the filter (typically via checkboxes). You should not need", "copy of this filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label", "import unicode_literals import json from xml.sax.saxutils import quoteattr from django.utils.translation import pgettext from", "can override this in your filters, but you should call ``super`` to get", "that makes sense for your :meth:`.filter`. If you want validation, you should handle", "rendering the filter (and most likely not add anything to the queryobject in", "filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug:", "URL unusable by a user with a different language (if a user shares", "the loading message to show when the filter loads the updated target element", "returns ``True``, the label will be styled to only make it visible to", "the filter to send options into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice", "the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this defaults to the", "need DOM IDs for components of a filter (E.g.: Field ID to attach", "'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns a json encoded and HTML attribute", "Get angularjs directive options dict. You can override this in your filters, but", "super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns a copy of this filter. \"\"\" copy", "= self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\"", "You can send the label as a parameter, or override :meth:`.get_label`. label_is_screenreader_only: You", "override :meth:`.get_label`. label_is_screenreader_only: You can set this as a parameter, or override :meth:`.get_label_is_screenreader_only`.", "components of a filter (E.g.: Field ID to attach a labels to a", "is used in the URL to identify the filter. If your users can", "An object of the same type as the given ``queryobject``. \"\"\" raise NotImplementedError()", "ID of the label for this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\"", "this will most likely not be used, or it may be used as", "that would make an URL unusable by a user with a different language", "label of the filter. This is typically set as the ``<label>`` for the", "screenreaders. This is recommended over simply not setting a label since that would", "``queryobject``. This is always called unless the ``filter_string`` is None or empty string.", "the user to type in a values, or if you want to convert", "set this as a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug", "\"\"\" copy = self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self,", "where the user can add/remove valuess to the filter (typically via checkboxes). You", "filter uses multiple input fields, this will most likely not be used, or", "of the same type as the given ``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self):", "get_dom_id(self): \"\"\" Get the DOM ID of this filter. The base template adds", "values stored in the filter to the given ``values``. \"\"\" self.values = values", "Even if you do not want to show the label, you should specify", "the Django ORM, this will typically be a QuerySet, but for other backends", "\"\"\" Get the DOM id of the target of the filter. This is", "value of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None: return", "(typically via checkboxes). You should not need to override this, but you will", "to identify the filter. If your users can change their language, this should", "\"\"\" Get the DOM ID of the label for this filter. \"\"\" return", "if value in values_to_remove: new_values.remove(value) self.values = new_values def clean_value(self, value): \"\"\" Called", "clean a single value. \"\"\" return value def get_cleaned_values(self): \"\"\" Clean the values,", "multiple input fields, this will most likely not be used, or it may", "url. This is not the same as :meth:`.build_set_values_url`. This method is for multiselect", "should handle that by setting some attribute on ``self``, and handle the error", "Get the DOM id of the input field. If the filter uses multiple", "template adds this to the wrapping DIV, but you can also use this", "self.slug: return self.slug else: raise NotImplementedError('You must override get_slug(), or send a slug", "want validation, you should handle that by setting some attribute on ``self``, and", "that removes the given values for this filter to the current url. This", "build_clear_values_url(self): \"\"\" Get the URL that clears this filter from the current url.", "unchanged, but you will typically want to override this if your filter allows", "field. A label is optional, but highly recommended. Even if you do not", "current url. This is not the same as :meth:`.build_clear_values_url`. This method is for", "accessibility. Defaults to the value of the ``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only``", "This method is for multiselect filters where the user can add/remove valuess to", "\"\"\" Parameters: slug: You can send the slug as a parameter, or override", "access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the loading message to", "angularjs directive options dict. You can override this in your filters, but you", "for this filter. The slug is used in the URL to identify the", "return None def build_set_values_url(self, values): \"\"\" Get the URL that adds this filter", "URL that adds the given values for this filter to the current url.", "for this filter to the current url. This is not the same as", "backends such as ElasticSearch, MongoDB, etc. this can be something completely different such", "into the directive:: <someelement my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we do not include", "This is just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self):", "= slug self.label = label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\"", "def get_angularjs_options_json(self): \"\"\" Returns a json encoded and HTML attribute quoted version of", "self.label def get_label_is_screenreader_only(self): \"\"\" If this returns ``True``, the label will be styled", "clean_values[0] else: return None def build_set_values_url(self, values): \"\"\" Get the URL that adds", "anything to the queryobject in :meth:`.filter`). \"\"\" return [self.clean_value(value) for value in self.values]", "list of ``values`` to the values currently stored in the filter. \"\"\" self.values", "Clean the values, to prepare them for usage in :meth:`.filter`. Defaults to returning", "setting some attribute on ``self``, and handle the error in the template rendering", "get_inputfield_dom_id(self): \"\"\" Get the DOM id of the input field. If the filter", "Get the loading message to show when the filter loads the updated target", "the queryobject depends on the query backend. If you are filtering against the", "type as the given ``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\" Get the", "self.copy() copy.remove_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def get_base_css_classes_list(self): return ['cradmin-legacy-listfilter-filter'] def filter(self, queryobject): \"\"\" Add", "want to override this if your filter allows the user to type in", "since that would make an URL unusable by a user with a different", "if your filter supports \"clear\". \"\"\" copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def", "``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\" Get the DOM ID of this", "return pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs directive options dict.", "= label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns a copy", "Get the URL that adds this filter with the given values to the", "to get the default options. \"\"\" return { 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self):", "def get_slug(self): \"\"\" Get the slug for this filter. The slug is used", "this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self): \"\"\" Get the DOM id of", "``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return", "``None`` if there is no values. Use this in :meth:`.filter` if you expect", "user can add/remove valuess to the filter (typically via checkboxes). You should not", "xml.sax.saxutils import quoteattr from django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild):", "def build_set_values_url(self, values): \"\"\" Get the URL that adds this filter with the", "same type as the given ``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\" Get", "self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get the URL that adds the given values", "a json encoded and HTML attribute quoted version of :meth:`.get_angularjs_options_dict`. You use this", "you will typically want to override this if your filter allows the user", "quoted version of :meth:`.get_angularjs_options_dict`. You use this with the angularjs directive for the", "in new_values: if value in values_to_remove: new_values.remove(value) self.values = new_values def clean_value(self, value):", "validation, you should handle that by setting some attribute on ``self``, and handle", "be a QuerySet, but for other backends such as ElasticSearch, MongoDB, etc. this", "class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class for all filters. \"\"\" template_name = 'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html'", "type in a values, or if you want to convert the values from", "id of the target of the filter. This is just a shortcut to", "a single select filter. \"\"\" copy = self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self):", "value def get_cleaned_values(self): \"\"\" Clean the values, to prepare them for usage in", "if your filter is a single select filter. \"\"\" copy = self.copy() copy.set_values(values)", "current url. This is not the same as :meth:`.build_set_values_url`. This method is for", "queryobject depends on the query backend. If you are filtering against the Django", "send the slug as a parameter, or override :meth:`.get_slug`. label: You can send", "__init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You can send the slug as", "can be something completely different such as a dict. Returns: An object of", "return copy def get_slug(self): \"\"\" Get the slug for this filter. The slug", "ORM, this will typically be a QuerySet, but for other backends such as", "in :meth:`.filter` if you expect a single value. \"\"\" clean_values = self.get_cleaned_values() if", "def get_cleaned_value(self): \"\"\" Returns the first value returned by :meth:`.get_cleaned_values`, or ``None`` if", "If you want validation, you should handle that by setting some attribute on", "or override :meth:`.get_label`. label_is_screenreader_only: You can set this as a parameter, or override", "django.utils.translation import pgettext from cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlistchild import AbstractFilterListChild class AbstractFilter(AbstractFilterListChild): \"\"\" Abstract base class", "it from everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label def get_label_is_screenreader_only(self):", "or send a slug to __init__().') def get_label(self): \"\"\" Get the label of", "should not be translatable since that would make an URL unusable by a", "you will use it in your template context to render urls for choices", "get_label(self): \"\"\" Get the label of the filter. This is typically set as", "as a parameter, or override :meth:`.get_slug`. label: You can send the label as", "the slug for this filter. The slug is used in the URL to", "labels to a form field). \"\"\" return '{}_{}'.format(self.filterlist.get_dom_id_prefix(), self.get_slug()) def get_label_dom_id(self): \"\"\" Get", "choices if your filter supports \"clear\". \"\"\" copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy)", "return self.label_is_screenreader_only def set_values(self, values): \"\"\" Change the current values stored in the", ":meth:`.get_slug`. label: You can send the label as a parameter, or override :meth:`.get_label`.", "adds this filter with the given values to the current url. You should", "'cradmin_legacy/viewhelpers/listfilter/base/abstractfilter.django.html' def __init__(self, slug=None, label=None, label_is_screenreader_only=None): \"\"\" Parameters: slug: You can send the", "shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the loading", "return { 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns a json encoded and", "``values``. \"\"\" self.values = values def clear_values(self): \"\"\" Clear the current values stored", "given ``queryobject``. \"\"\" raise NotImplementedError() def get_dom_id(self): \"\"\" Get the DOM ID of", "Get the label of the filter. This is typically set as the ``<label>``", "is no values. Use this in :meth:`.filter` if you expect a single value.", "\"\"\" copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values): \"\"\" Get the", "filter uses multiselect. \"\"\" copy = self.copy() copy.add_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_remove_values_url(self, values):", "If this returns ``True``, the label will be styled to only make it", "the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else:", "def get_cleaned_values(self): \"\"\" Clean the values, to prepare them for usage in :meth:`.filter`.", "``self``, and handle the error in the template rendering the filter (and most", "to only make it visible to screenreaders. This is recommended over simply not", "message to show when the filter loads the updated target element content. \"\"\"", "to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the loading message", ":meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the loading message to show", "parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = [] self.slug = slug self.label =", "and hide it from everyone except for screenreaders with :meth:`.get_label_is_screenreader_only`. \"\"\" return self.label", "the current url. This is not the same as :meth:`.build_set_values_url`. This method is", "to render urls for choices if your filter uses multiselect. \"\"\" copy =", "list of ``values`` from the values currently stored in the filter. \"\"\" new_values", "render urls for choices if your filter supports \"clear\". \"\"\" copy = self.copy()", "over simply not setting a label since that would break accessibility. Defaults to", "add/remove valuess to the filter (typically via checkboxes). You should not need to", "Called by :meth:`.clean_values` to clean a single value. \"\"\" return value def get_cleaned_values(self):", "a dict. Returns: An object of the same type as the given ``queryobject``.", "by :meth:`.get_cleaned_values`, or ``None`` if there is no values. Use this in :meth:`.filter`", "for the filter input field. A label is optional, but highly recommended. Even", "of this filter. \"\"\" copy = self.__class__() copy.set_values(list(self.values)) copy.slug = self.slug copy.label =", "DOM ID of the label for this filter. \"\"\" return '{}_label'.format(self.get_dom_id()) def get_inputfield_dom_id(self):", "This method is for multiselect filters where the user can add valuess to", "used, or it may be used as the ID of the first field", "``label_is_screenreader_only`` parameter (see :class:`.AbstractFilter`). If ``label_is_screenreader_only`` is ``None``, this defaults to the return", "content. \"\"\" return pgettext('listfilter loading message', 'Loading') def get_angularjs_options_dict(self): \"\"\" Get angularjs directive", "can set this as a parameter, or override :meth:`.get_label_is_screenreader_only`. \"\"\" self.values = []", "to convert the values from a string into something that makes sense for", "def build_clear_values_url(self): \"\"\" Get the URL that clears this filter from the current", "used in the URL to identify the filter. If your users can change", "given ``values``. \"\"\" self.values = values def clear_values(self): \"\"\" Clear the current values", "\"\"\" return self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the loading message to show when", "first field when it is clicked. \"\"\" return '{}_input'.format(self.get_dom_id()) def get_target_dom_id(self): \"\"\" Get", "of the queryobject depends on the query backend. If you are filtering against", "of the target of the filter. This is just a shortcut to access", "\"clear\". \"\"\" copy = self.copy() copy.clear_values() return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_add_values_url(self, values): \"\"\" Get", "my-filter-directive={{ me.get_angularjs_options_json|safe }}> Notice that we do not include any ``\"`` or ``'``", "can send the label as a parameter, or override :meth:`.get_label`. label_is_screenreader_only: You can", "URL with another user). \"\"\" if self.slug: return self.slug else: raise NotImplementedError('You must", "__future__ import unicode_literals import json from xml.sax.saxutils import quoteattr from django.utils.translation import pgettext", "self.label_is_screenreader_only is None: return self.filterlist.get_label_is_screenreader_only_by_default() else: return self.label_is_screenreader_only def set_values(self, values): \"\"\" Change", "the label, you should specify one, and hide it from everyone except for", "def set_values(self, values): \"\"\" Change the current values stored in the filter to", "url. This is not the same as :meth:`.build_clear_values_url`. This method is for multiselect", "the default options. \"\"\" return { 'loadingmessage': self.get_loadingmessage() } def get_angularjs_options_json(self): \"\"\" Returns", "Add the current values to the given ``queryobject``. This is always called unless", "the return value of the ``get_label_is_screenreader_only_by_default()`` method of :class:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList`. \"\"\" if self.label_is_screenreader_only is", "= self.copy() copy.set_values(values) return self.filterlist.filtershandler.build_filter_url(changed_filterobject=copy) def build_clear_values_url(self): \"\"\" Get the URL that clears", "= list(self.values) values_to_remove = values for value in new_values: if value in values_to_remove:", "label since that would break accessibility. Defaults to the value of the ``label_is_screenreader_only``", "\"\"\" return value def get_cleaned_values(self): \"\"\" Clean the values, to prepare them for", "on ``self``, and handle the error in the template rendering the filter (and", "If your users can change their language, this should not be translatable since", "def get_inputfield_dom_id(self): \"\"\" Get the DOM id of the input field. If the", "self.filterlist.get_target_dom_id() def get_loadingmessage(self): \"\"\" Get the loading message to show when the filter", "send a slug to __init__().') def get_label(self): \"\"\" Get the label of the", "\"\"\" new_values = list(self.values) values_to_remove = values for value in new_values: if value", "a user with a different language (if a user shares an URL with", "is not the same as :meth:`.build_clear_values_url`. This method is for multiselect filters where", "+= values def remove_values(self, values): \"\"\" Remove the given list of ``values`` from", "or ``None`` if there is no values. Use this in :meth:`.filter` if you", "where the user can add valuess to the filter (typically via checkboxes). You", "filter. This is just a shortcut to access :meth:`cradmin_legacy.viewhelpers.listfilter.base.abstractfilterlist.AbstractFilterList.get_target_dom_id`. \"\"\" return self.filterlist.get_target_dom_id() def", "the given ``queryobject``. This is always called unless the ``filter_string`` is None or", "the directives HTML attribute - that is included by this method. \"\"\" return", "label self.label_is_screenreader_only = label_is_screenreader_only super(AbstractFilter, self).__init__() def copy(self): \"\"\" Returns a copy of", "against the Django ORM, this will typically be a QuerySet, but for other", "in your template context to render urls for choices if your filter uses" ]
[]
[ "None: super().__init__(*args) if reduce_expand: self.message = \"Expected dimensional reduction then expansion but got", "__init__(self, name:str, *args: object) -> None: super().__init__(*args) self.message = f\"{name} layer not found.\"", "{type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str, *args: object) -> None: super().__init__(*args) self.message =", "of object is not Layer. Expected type Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error):", "reduction then expansion but got the reverse.\" else: self.message = \"Expected dimensional expansion", "str: return self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) -> None: super().__init__(*args) if", "__init__(self, name:str, *args) -> None: super().__init__(*args) self.message = f\"{name} activation not found.\" class", "__init__(self, reduce_expand, *args) -> None: super().__init__(*args) if reduce_expand: self.message = \"Expected dimensional reduction", "self.message def __str__(self) -> str: return self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args)", "not found.\" class LayerTypeMismatchError(Error): def __init__(self, obj, *args) -> None: super().__init__(*args) self.message =", "None: super().__init__(*args) self.message = f\"Type of object is not Layer. Expected type Layer,", "reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str, *args) -> None: super().__init__(*args) self.message = f\"{name}", "__init__(self): self.message = None def __repr__(self) -> str: return self.message def __str__(self) ->", "object is not Layer. Expected type Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def", "super().__init__(*args) self.message = f\"Type of object is not Layer. Expected type Layer, given", "self.message = f\"{name} activation not found.\" class LayerTypeMismatchError(Error): def __init__(self, obj, *args) ->", "class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) -> None: super().__init__(*args) if reduce_expand: self.message =", "found.\" class LayerTypeMismatchError(Error): def __init__(self, obj, *args) -> None: super().__init__(*args) self.message = f\"Type", "dimensional reduction then expansion but got the reverse.\" else: self.message = \"Expected dimensional", "def __str__(self) -> str: return self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) ->", "but got the reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str, *args) -> None: super().__init__(*args)", "if reduce_expand: self.message = \"Expected dimensional reduction then expansion but got the reverse.\"", "reduction but got the reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str, *args) -> None:", "def __repr__(self) -> str: return self.message def __str__(self) -> str: return self.message class", "def __init__(self): self.message = None def __repr__(self) -> str: return self.message def __str__(self)", "class ActivationNotFoundError(Error): def __init__(self, name:str, *args) -> None: super().__init__(*args) self.message = f\"{name} activation", "got the reverse.\" else: self.message = \"Expected dimensional expansion then reduction but got", "then expansion but got the reverse.\" else: self.message = \"Expected dimensional expansion then", "None: super().__init__(*args) self.message = f\"{name} activation not found.\" class LayerTypeMismatchError(Error): def __init__(self, obj,", "reduce_expand, *args) -> None: super().__init__(*args) if reduce_expand: self.message = \"Expected dimensional reduction then", "*args) -> None: super().__init__(*args) if reduce_expand: self.message = \"Expected dimensional reduction then expansion", "activation not found.\" class LayerTypeMismatchError(Error): def __init__(self, obj, *args) -> None: super().__init__(*args) self.message", "self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) -> None: super().__init__(*args) if reduce_expand: self.message", "reduce_expand: self.message = \"Expected dimensional reduction then expansion but got the reverse.\" else:", "not Layer. Expected type Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str,", "ActivationNotFoundError(Error): def __init__(self, name:str, *args) -> None: super().__init__(*args) self.message = f\"{name} activation not", "Layer. Expected type Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str, *args:", "super().__init__(*args) self.message = f\"{name} activation not found.\" class LayerTypeMismatchError(Error): def __init__(self, obj, *args)", "Error(Exception): def __init__(self): self.message = None def __repr__(self) -> str: return self.message def", "-> None: super().__init__(*args) self.message = f\"{name} activation not found.\" class LayerTypeMismatchError(Error): def __init__(self,", "\"Expected dimensional expansion then reduction but got the reverse.\" class ActivationNotFoundError(Error): def __init__(self,", "expansion then reduction but got the reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str, *args)", "self.message = None def __repr__(self) -> str: return self.message def __str__(self) -> str:", "def __init__(self, name:str, *args) -> None: super().__init__(*args) self.message = f\"{name} activation not found.\"", "def __init__(self, name:str, *args: object) -> None: super().__init__(*args) self.message = f\"{name} layer not", "Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str, *args: object) -> None:", "return self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) -> None: super().__init__(*args) if reduce_expand:", "LayerTypeMismatchError(Error): def __init__(self, obj, *args) -> None: super().__init__(*args) self.message = f\"Type of object", "*args) -> None: super().__init__(*args) self.message = f\"Type of object is not Layer. Expected", "-> str: return self.message def __str__(self) -> str: return self.message class DimensionalityMismatchError(Error): def", "but got the reverse.\" else: self.message = \"Expected dimensional expansion then reduction but", "None def __repr__(self) -> str: return self.message def __str__(self) -> str: return self.message", "__init__(self, obj, *args) -> None: super().__init__(*args) self.message = f\"Type of object is not", "__repr__(self) -> str: return self.message def __str__(self) -> str: return self.message class DimensionalityMismatchError(Error):", "__str__(self) -> str: return self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) -> None:", "LayerNotFoundError(Error): def __init__(self, name:str, *args: object) -> None: super().__init__(*args) self.message = f\"{name} layer", "\"Expected dimensional reduction then expansion but got the reverse.\" else: self.message = \"Expected", "str: return self.message def __str__(self) -> str: return self.message class DimensionalityMismatchError(Error): def __init__(self,", "self.message = f\"Type of object is not Layer. Expected type Layer, given type", "type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str, *args: object) -> None: super().__init__(*args) self.message", "= f\"{name} activation not found.\" class LayerTypeMismatchError(Error): def __init__(self, obj, *args) -> None:", "Expected type Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str, *args: object)", "f\"{name} activation not found.\" class LayerTypeMismatchError(Error): def __init__(self, obj, *args) -> None: super().__init__(*args)", "self.message = \"Expected dimensional reduction then expansion but got the reverse.\" else: self.message", "-> None: super().__init__(*args) if reduce_expand: self.message = \"Expected dimensional reduction then expansion but", "got the reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str, *args) -> None: super().__init__(*args) self.message", "f\"Type of object is not Layer. Expected type Layer, given type {type(obj).__name__}.\" class", "obj, *args) -> None: super().__init__(*args) self.message = f\"Type of object is not Layer.", "type Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str, *args: object) ->", "dimensional expansion then reduction but got the reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str,", "= f\"Type of object is not Layer. Expected type Layer, given type {type(obj).__name__}.\"", "= \"Expected dimensional expansion then reduction but got the reverse.\" class ActivationNotFoundError(Error): def", "DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) -> None: super().__init__(*args) if reduce_expand: self.message = \"Expected", "expansion but got the reverse.\" else: self.message = \"Expected dimensional expansion then reduction", "super().__init__(*args) if reduce_expand: self.message = \"Expected dimensional reduction then expansion but got the", "given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self, name:str, *args: object) -> None: super().__init__(*args)", "= None def __repr__(self) -> str: return self.message def __str__(self) -> str: return", "-> str: return self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand, *args) -> None: super().__init__(*args)", "def __init__(self, reduce_expand, *args) -> None: super().__init__(*args) if reduce_expand: self.message = \"Expected dimensional", "class Error(Exception): def __init__(self): self.message = None def __repr__(self) -> str: return self.message", "self.message = \"Expected dimensional expansion then reduction but got the reverse.\" class ActivationNotFoundError(Error):", "class LayerTypeMismatchError(Error): def __init__(self, obj, *args) -> None: super().__init__(*args) self.message = f\"Type of", "the reverse.\" else: self.message = \"Expected dimensional expansion then reduction but got the", "*args) -> None: super().__init__(*args) self.message = f\"{name} activation not found.\" class LayerTypeMismatchError(Error): def", "-> None: super().__init__(*args) self.message = f\"Type of object is not Layer. Expected type", "the reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str, *args) -> None: super().__init__(*args) self.message =", "<gh_stars>0 class Error(Exception): def __init__(self): self.message = None def __repr__(self) -> str: return", "return self.message def __str__(self) -> str: return self.message class DimensionalityMismatchError(Error): def __init__(self, reduce_expand,", "else: self.message = \"Expected dimensional expansion then reduction but got the reverse.\" class", "reverse.\" else: self.message = \"Expected dimensional expansion then reduction but got the reverse.\"", "def __init__(self, obj, *args) -> None: super().__init__(*args) self.message = f\"Type of object is", "= \"Expected dimensional reduction then expansion but got the reverse.\" else: self.message =", "name:str, *args) -> None: super().__init__(*args) self.message = f\"{name} activation not found.\" class LayerTypeMismatchError(Error):", "is not Layer. Expected type Layer, given type {type(obj).__name__}.\" class LayerNotFoundError(Error): def __init__(self,", "class LayerNotFoundError(Error): def __init__(self, name:str, *args: object) -> None: super().__init__(*args) self.message = f\"{name}", "then reduction but got the reverse.\" class ActivationNotFoundError(Error): def __init__(self, name:str, *args) ->" ]
[ "bin): \"\"\" Determine the format of a picture. \\n :param file_head: The [:8]", "of the pic's file_head. :return: Pic's format if matched, \"unknown\" if none matched", "\"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff' in file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n'", "a picture. \\n :param file_head: The [:8] of the pic's file_head. :return: Pic's", "file_head. :return: Pic's format if matched, \"unknown\" if none matched . \"\"\" res", "if none matched . \"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff' in file_head: res", "pic's file_head. :return: Pic's format if matched, \"unknown\" if none matched . \"\"\"", "file_head: The [:8] of the pic's file_head. :return: Pic's format if matched, \"unknown\"", "= \"unknown\" if b'\\xff\\xd8\\xff' in file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n' in file_head:", "in file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n' in file_head: res = 'png' return", "none matched . \"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff' in file_head: res =", "if b'\\xff\\xd8\\xff' in file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n' in file_head: res =", "the format of a picture. \\n :param file_head: The [:8] of the pic's", "pic_format(file_head: bin): \"\"\" Determine the format of a picture. \\n :param file_head: The", "matched, \"unknown\" if none matched . \"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff' in", "<reponame>YeungShaoFeng/libxib def pic_format(file_head: bin): \"\"\" Determine the format of a picture. \\n :param", ". \"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff' in file_head: res = 'jpg' elif", "[:8] of the pic's file_head. :return: Pic's format if matched, \"unknown\" if none", "file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n' in file_head: res = 'png' return res", "format of a picture. \\n :param file_head: The [:8] of the pic's file_head.", "Pic's format if matched, \"unknown\" if none matched . \"\"\" res = \"unknown\"", "\"\"\" Determine the format of a picture. \\n :param file_head: The [:8] of", ":return: Pic's format if matched, \"unknown\" if none matched . \"\"\" res =", "picture. \\n :param file_head: The [:8] of the pic's file_head. :return: Pic's format", "def pic_format(file_head: bin): \"\"\" Determine the format of a picture. \\n :param file_head:", "\"unknown\" if b'\\xff\\xd8\\xff' in file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n' in file_head: res", "b'\\xff\\xd8\\xff' in file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n' in file_head: res = 'png'", "\"unknown\" if none matched . \"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff' in file_head:", ":param file_head: The [:8] of the pic's file_head. :return: Pic's format if matched,", "The [:8] of the pic's file_head. :return: Pic's format if matched, \"unknown\" if", "if matched, \"unknown\" if none matched . \"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff'", "format if matched, \"unknown\" if none matched . \"\"\" res = \"unknown\" if", "res = \"unknown\" if b'\\xff\\xd8\\xff' in file_head: res = 'jpg' elif b'\\x89PNG\\r\\n\\x1a\\n' in", "the pic's file_head. :return: Pic's format if matched, \"unknown\" if none matched .", "Determine the format of a picture. \\n :param file_head: The [:8] of the", "\\n :param file_head: The [:8] of the pic's file_head. :return: Pic's format if", "of a picture. \\n :param file_head: The [:8] of the pic's file_head. :return:", "matched . \"\"\" res = \"unknown\" if b'\\xff\\xd8\\xff' in file_head: res = 'jpg'" ]
[ "= models.CharField(max_length=255) address2 = models.CharField(max_length=255) city = models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True)", "django.contrib.gis.db import models class Address(models.Model): address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255) city =", "models.CharField(max_length=255) address2 = models.CharField(max_length=255) city = models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True) state", "zip = models.CharField(max_length=20, blank=True, null=True) state = models.CharField(max_length=100, blank=True, null=True) points = models.PointField()", "<filename>common/models.py from django.contrib.gis.db import models class Address(models.Model): address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255)", "from django.contrib.gis.db import models class Address(models.Model): address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255) city", "= models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True) state = models.CharField(max_length=100, blank=True, null=True) points", "Address(models.Model): address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255) city = models.CharField(max_length=255) zip = models.CharField(max_length=20,", "models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True) state = models.CharField(max_length=100, blank=True, null=True) points =", "class Address(models.Model): address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255) city = models.CharField(max_length=255) zip =", "city = models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True) state = models.CharField(max_length=100, blank=True, null=True)", "= models.CharField(max_length=255) city = models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True) state = models.CharField(max_length=100,", "import models class Address(models.Model): address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255) city = models.CharField(max_length=255)", "address2 = models.CharField(max_length=255) city = models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True) state =", "models class Address(models.Model): address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255) city = models.CharField(max_length=255) zip", "models.CharField(max_length=255) city = models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True, null=True) state = models.CharField(max_length=100, blank=True,", "address1 = models.CharField(max_length=255) address2 = models.CharField(max_length=255) city = models.CharField(max_length=255) zip = models.CharField(max_length=20, blank=True," ]
[ "path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/', recruiter.JobPostCreateView.as_view()), path('categories/',", "jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('',", "[ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()),", "path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/',", "jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()),", "path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/',", "recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/', recruiter.JobPostCreateView.as_view()), path('categories/', recruiter.JobCategoryListView.as_view()), ], 'jobMatchingApi'),", "path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')),", "path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/',", "recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/', recruiter.JobPostCreateView.as_view()),", "import recruiter, jobseeker urlpatterns = [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/',", "jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()),", "path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/',", "'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/',", "path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/',", "path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()),", "jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/',", "urlpatterns = [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()),", "jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()),", "jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/',", "include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()),", "recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()),", "jobseeker urlpatterns = [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/',", "path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ],", "include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()),", "= [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/',", "jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()),", ".views import recruiter, jobseeker urlpatterns = [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()),", "path from .views import recruiter, jobseeker urlpatterns = [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()),", "namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()),", "django.urls import include, path from .views import recruiter, jobseeker urlpatterns = [ path('jobseeker/',", "path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/', recruiter.JobPostCreateView.as_view()), path('categories/', recruiter.JobCategoryListView.as_view()), ],", "path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([", "jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/',", "], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()),", "path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/',", "include, path from .views import recruiter, jobseeker urlpatterns = [ path('jobseeker/', include(([ path('',", "recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/', recruiter.JobPostCreateView.as_view()), path('categories/', recruiter.JobCategoryListView.as_view()),", "path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/',", "import include, path from .views import recruiter, jobseeker urlpatterns = [ path('jobseeker/', include(([", "path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()),", "jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()),", "from .views import recruiter, jobseeker urlpatterns = [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/',", "path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/',", "path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/',", "path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/',", "jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()),", "jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/',", "<gh_stars>0 from django.urls import include, path from .views import recruiter, jobseeker urlpatterns =", "jobseeker.ResumeUpdateView.as_view()), path('resume/evaluate/', jobseeker.EvaluateResumeListView.as_view()), path('resume/matched-posts/', jobseeker.MatchedPostsListView.as_view()), path('resume/matched-posts/<int:id>/', jobseeker.MatchedPostsDetailView.as_view()), path('resume/add/', jobseeker.ResumeCreateView.as_view()), path('whistory/add/', jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()),", "recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/', recruiter.JobPostCreateView.as_view()), path('categories/', recruiter.JobCategoryListView.as_view()), ], 'jobMatchingApi'), namespace='recruiter')), ]", "recruiter, jobseeker urlpatterns = [ path('jobseeker/', include(([ path('', jobseeker.JobSeekerView.as_view()), path('resume/', jobseeker.ResumeDetailView.as_view()), path('resume/<int:id>/', jobseeker.ResumeUpdateView.as_view()),", "path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()), path('post/evaluate/', recruiter.EvaluatePostListView.as_view()), path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/',", "path('post/matched-resumes/', recruiter.MatchedResumesListView.as_view()), path('allposts/<int:id>/', recruiter.JobPostAllDetailView.as_view()), path('jobposts/<int:id>/', recruiter.JobPostDetailView.as_view()), path('jobpost/add/', recruiter.JobPostCreateView.as_view()), path('categories/', recruiter.JobCategoryListView.as_view()), ], 'jobMatchingApi'), namespace='recruiter')),", "path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'), namespace='jobseeker')), path('recruiter/', include(([ path('', recruiter.RecruiterView.as_view()), path('jobposts/', recruiter.JobPostListView.as_view()), path('allposts/', recruiter.JobPostAllView.as_view()),", "jobseeker.WorkHistoryCreateView.as_view()), path('whistory/<int:id>/', jobseeker.WorkHistoryDetailView.as_view()), path('whistory/', jobseeker.WorkHistoryListView.as_view()), path('education/add/', jobseeker.EducationBackgroundCreateView.as_view()), path('education/<int:id>/', jobseeker.EducationBackgroundDetailView.as_view()), path('education/', jobseeker.EducationBackgroundListView.as_view()), ], 'jobMatchingApi'),", "from django.urls import include, path from .views import recruiter, jobseeker urlpatterns = [" ]
[ "Format string linking to the download of a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK", "session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path ) -> None: \"\"\" Download", "patch. :type extension_paths: typing.List[ExtensionPath] :return: None, this patches the existing objects. :rtype: None", ":param versionize: Wether to append version names to the paths, defaults to True", "directory and delve one level deeper into the value. If the value of", "patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool = True, ) -> None:", "from pathlib import Path import aiohttp from loguru import logger from .utils import", "Either a path to a json config file or it's raw data (dict", "aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\" Add the `version` attributes to the", "get the real names of extensions. Can also append the current version number.", "extension to. :type save_path: Path :return: None. :rtype: None \"\"\" publisher_name, extension_name =", "download_url, get_request, get_original_filename # Format string linking to the download of a vscode", "url = _build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to", "use. :type session: aiohttp.ClientSession :param extension_id: Desired extension ID. :type extension_id: str :param", "MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build the", "'latest' logger.debug(f'Extension {extension_id} is of version {version}.') return version async def versionize_extension_paths( session:", "= await get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise", "open it and then do the same thing. :param json_data: Either a path", "key {key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value, dict): for ext_path", "return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise ValueError('Extension marketplace page data", "the real names of extensions. Can also append the current version number. :param", "parsed from the initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list = [] for key,", ":rtype: None \"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths ]", "download url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path:", "up the extension paths by altering their name. Basic functionality is to get", "or it's raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :param", "None: versionize = True extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession() as session: if", "Download an extension according to the given parameters. When one needs to be", "json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path, *, real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool]", "str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str:", ":rtype: None \"\"\" publisher_name, extension_name = extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version, save_path)", "root_dict: The current \"root\" of our config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises", "to. :type save_path: Path :return: None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url =", "A given key was neither a str or a dict. :return: List of", "wether the data provided was a Path or not and act accordingly: If", "get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths ] versions = await asyncio.gather(*get_version_tasks) for ext_path,", "= version async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool =", "session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\" Add the `version` attributes to", "an extension according to the given parameters. :param session: An aiohttp session object", "give it it's \"path\" down the hierarchy. :param root_dict: The current \"root\" of", "Path): with json_data.open() as json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session:", "extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension( session: aiohttp.ClientSession, extension_name:", ":param save_path: Save path for all the downloaded VSCode binaries. :type save_path: Path", "TypeError: A given key was neither a str or a dict. :return: List", "ExtensionPath: \"\"\" Dataclass for storing info regarding a certain VSCode extension. \"\"\" path:", "tiny bit more verbose than the `by_id` version. :param session: An aiohttp session", ":rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with json_data.open() as json_file: json_data = json.load(json_file)['extensions']", "_build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks) for filename, ext_path", "typing.Union[typing.Dict[str, str], Path], save_path: Path, *, real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool] =", "a key is a string, create a spec object from it and give", "None, ) -> None: \"\"\" Parse the given json data and download the", "Regex used to extract the exact version of an extension from it's marketplace", "extension_name: str, publisher_name: str, version: str ) -> str: \"\"\" Build the download", "= [] for ext_path in extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True)", "\"\"\" Decide wether the data provided was a Path or not and act", "ext_path, version in zip(extension_paths, versions): ext_path.version = version async def patch_extension_paths( session: aiohttp.ClientSession,", "extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames, extension_paths): ext_path.path", "our config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A given key had", "True if versionize is None: versionize = True extension_paths = parse_extensions_json(json_data) async with", "from the initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list = [] for key, value", "str, version: str ) -> str: \"\"\" Build the download url for the", "Download an extension according to the given parameters. :param session: An aiohttp session", ":return: String of the extension's latest version. :rtype: str \"\"\" logger.debug(f'Requesting version of", ":return: List of spec objects parsed from the initial config. :rtype: typing.List[ExtensionPath] \"\"\"", "version of. :type extension_id: str :raises ValueError: Can't find the extension version. :return:", "is a string, create a spec object from it and give it it's", "Build the download url for the given parameters. Just a shortcut for the", "True, ) -> None: \"\"\" Fix up the extension paths by altering their", "or dict.') return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]:", "the extensions, defaults to None (True) :type real_name: typing.Optional[bool], optional :param versionize: Wether", "string, create a spec object from it and give it it's \"path\" down", "extensions into the save path. :param json_data: Either a path to a json", "Path] :param save_path: Save path for all the downloaded VSCode binaries. :type save_path:", "spec objects parsed from the initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list = []", "and delve one level deeper into the value. If the value of a", "await asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths, versions): ext_path.version = version async def", "current version of the extensions, has no effect without `real_name`, defaults to None", "of our config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A given key", "extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with json_data.open() as json_file: json_data =", "-> None: \"\"\" Fix up the extension paths by altering their name. Basic", "a Path, open it and then do the same thing. :param json_data: Either", "is a dict, treat it like a directory and delve one level deeper", "was a Path or not and act accordingly: If it's valid json format", "version number. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession", "downloaded VSCode binaries. :type save_path: Path :param real_name: Wether to patch the real", "logger.debug(f'Requesting version of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await", "def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str: \"\"\" Get the latest version of", "async with aiohttp.ClientSession() as session: if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks =", ":rtype: None \"\"\" if real_name is None: real_name = True if versionize is", "session: aiohttp.ClientSession :param extension_name: Desired extension name. :type extension_name: str :param publisher_name: Desired", "altering their name. Basic functionality is to get the real names of extensions.", "it and return a list of specs. If it's a Path, open it", "formatted download url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def", ":type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A given key had an empty", "existing objects. :rtype: None \"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for ext_path in", "extension_paths: typing.List[ExtensionPath] :param versionize: Wether to append version names to the paths, defaults", "given json data and download the given VSCode extensions into the save path.", "as session: if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = [] for ext_path", "to use. :type session: aiohttp.ClientSession :param extension_paths: List of extension spec objects to", "save path. :param json_data: Either a path to a json config file or", "key was neither a str or a dict. :return: List of spec objects", "typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with json_data.open() as json_file: json_data = json.load(json_file)['extensions'] return", ":type session: aiohttp.ClientSession :param extension_id: Desired marketplace extension to get the version of.", "url for the given parameters. Just a shortcut for the string formatting. :param", "use. :type session: aiohttp.ClientSession :param extension_id: Desired marketplace extension to get the version", "to use. :type session: aiohttp.ClientSession :param extension_id: Desired marketplace extension to get the", "(True) :type real_name: typing.Optional[bool], optional :param versionize: Wether to patch the current version", "formatting. :param extension_name: Desired extension name. :type extension_name: str :param publisher_name: Desired extension", "a tiny bit more verbose than the `by_id` version. :param session: An aiohttp", "str = await get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if not match:", "for ext_path, version in zip(extension_paths, versions): ext_path.version = version async def patch_extension_paths( session:", "name. :type publisher_name: str :param version: Desired extension version. :type version: str :return:", "publisher_name: str, version: str ) -> str: \"\"\" Build the download url for", "publisher's name. :type publisher_name: str :param version: Desired extension version. :type version: str", "the existing objects. :rtype: None \"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for ext_path", "value. :raises TypeError: A given key was neither a str or a dict.", "objects to patch. :type extension_paths: typing.List[ExtensionPath] :param versionize: Wether to append version names", "async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str: \"\"\" Get the latest version", "list). :type json_data: typing.Union[typing.Dict[str, str], Path] :param save_path: Save path for all the", "publisher_name, ext_path.version) async def _download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name: str, version: str,", "url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath)", "None: \"\"\" Parse the given json data and download the given VSCode extensions", "VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for storing info regarding a", "str: \"\"\" Get the latest version of an extension on the marketplace. :param", "string linking to the marketplace page of some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id}", "object describing the desired extension. :type ext_path: ExtensionPath :return: The formatted download url.", "path for all the downloaded VSCode binaries. :type save_path: Path :param real_name: Wether", "= ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to extract the exact version of", "= None, versionize: typing.Optional[bool] = None, ) -> None: \"\"\" Parse the given", "patch. :type extension_paths: typing.List[ExtensionPath] :param versionize: Wether to append version names to the", "an extension according to the given parameters. When one needs to be a", "marketplace extension to get the version of. :type extension_id: str :raises ValueError: Can't", "according to the given parameters. :param session: An aiohttp session object to use.", "str) -> str: \"\"\" Get the latest version of an extension on the", "config data: If the value of a key is a dict, treat it", "or it's raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :return:", "save_path: Path, *, real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool] = None, ) ->", "aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_name: Desired extension name.", "object to use. :type session: aiohttp.ClientSession :param extension_id: Desired extension ID. :type extension_id:", "the string formatting. :param extension_name: Desired extension name. :type extension_name: str :param publisher_name:", "if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in", "extension version. :return: String of the extension's latest version. :rtype: str \"\"\" logger.debug(f'Requesting", "\"\"\" Build the download url for the given parameters. Just a shortcut for", "versionize: bool = True, ) -> None: \"\"\" Fix up the extension paths", "/ ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session, ext_path.extension_id, ext_path.version, extension_full_save_path ) ) await", "json_data.open() as json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id:", "a json config file or it's raw data (dict / list). :type json_data:", "extension_paths ] versions = await asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths, versions): ext_path.version", "session: aiohttp.ClientSession :param extension_id: Desired extension ID. :type extension_id: str :param version: Desired", "extension on the marketplace. :param session: An aiohttp session object to use. :type", "empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path", "versionize is None: versionize = True extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession() as", "to \\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id} is of version {version}.') return version", "describing the desired extension. :type ext_path: ExtensionPath :return: The formatted download url. :rtype:", "the value of a key is a string, create a spec object from", "Path], save_path: Path, *, real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool] = None, )", "in extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session, ext_path.extension_id,", "name. Basic functionality is to get the real names of extensions. Can also", "the given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with json_data.open() as json_file:", "*, real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool] = None, ) -> None: \"\"\"", "act accordingly: If it's valid json format data, parse it and return a", "ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for key", "Get the latest version of an extension on the marketplace. :param session: An", "parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide wether the data", "data provided was a Path or not and act accordingly: If it's valid", "logger.warning('Can\\'t get extension version, setting version to \\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id}", "real_name: Wether to patch the real filenames of the extensions, defaults to None", "\"\"\" if real_name is None: real_name = True if versionize is None: versionize", "str, version: str, save_path: Path, ) -> None: \"\"\" Download an extension according", "versionize: typing.Optional[bool], optional :return: None. :rtype: None \"\"\" if real_name is None: real_name", "loguru import logger from .utils import download_url, get_request, get_original_filename # Format string linking", "session object to use. :type session: aiohttp.ClientSession :param extension_id: Desired marketplace extension to", "get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths ] versions = await", "versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ] original_filenames", "a key is a dict, treat it like a directory and delve one", "import re import typing from pathlib import Path import aiohttp from loguru import", ") -> typing.List[ExtensionPath]: \"\"\" Decide wether the data provided was a Path or", "of spec objects parsed from the initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list =", "{save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path )", "value: raise ValueError(f'Value for key {key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif", "shortcut for the string formatting. :param extension_name: Desired extension name. :type extension_name: str", "aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_id: Desired extension ID.", "Desired extension ID. :type extension_id: str :param version: Desired extension version. :type version:", "version in zip(extension_paths, versions): ext_path.version = version async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths:", "= extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str,", ":rtype: str \"\"\" logger.debug(f'Requesting version of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text:", "str], Path] :param save_path: Save path for all the downloaded VSCode binaries. :type", "of spec objects describing the given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path):", "None, this patches the existing objects. :rtype: None \"\"\" if versionize: await versionize_extension_paths(session,", "key, value in root_dict.items(): if isinstance(value, str): if not value: raise ValueError(f'Value for", "version: Desired extension version. :type version: str :param save_path: Save path to downloaded", "the latest version of an extension on the marketplace. :param session: An aiohttp", "Path import aiohttp from loguru import logger from .utils import download_url, get_request, get_original_filename", "create a spec object from it and give it it's \"path\" down the", "json_data: typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide wether the data provided", "real filenames of the extensions, defaults to None (True) :type real_name: typing.Optional[bool], optional", "= save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session, ext_path.extension_id, ext_path.version, extension_full_save_path )", "= ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension( session: aiohttp.ClientSession, extension_name: str,", "not match: raise ValueError('Extension marketplace page data doesn\\'t contain a version.') version =", "logger.info(f'Downloaded {extension_name} to {save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version: str,", ") -> None: \"\"\" Download an extension according to the given parameters. :param", "\"\"\" publisher_name, extension_name = extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict(", "the extension's latest version. :rtype: str \"\"\" logger.debug(f'Requesting version of extension {extension_id}...') url", "marketplace page of some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used", "text) if not match: raise ValueError('Extension marketplace page data doesn\\'t contain a version.')", "download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path ) -> None: \"\"\"", "If the value of a key is a dict, treat it like a", "an extension from it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\"", "this patches the existing objects. :rtype: None \"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id)", "typing.List[ExtensionPath] :param versionize: Wether to append version names to the paths, defaults to", "paths, defaults to True :type versionize: bool, optional :return: None, this patches the", "ID. :type extension_id: str :param version: Desired extension version. :type version: str :param", "get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str: \"\"\" Get the latest version of an", "defaults to None (True) :type real_name: typing.Optional[bool], optional :param versionize: Wether to patch", "versionize: Wether to patch the current version of the extensions, has no effect", "# Extension ID. version: str = 'latest' # Extension version. def _build_extension_download_url( extension_name:", ") -> None: \"\"\" Download an extension according to the given parameters. When", "in extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames, extension_paths):", "] original_filenames = await asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames, extension_paths): ext_path.path =", "key had an empty value. :raises TypeError: A given key was neither a", "extension_paths, versionize=versionize) download_extension_tasks = [] for ext_path in extension_paths: extension_full_save_path = save_path /", "given parameters. :param ext_path: A spec object describing the desired extension. :type ext_path:", "pathlib import Path import aiohttp from loguru import logger from .utils import download_url,", "and return a list of specs. If it's a Path, open it and", "the given parameters. When one needs to be a tiny bit more verbose", "desired extension to. :type save_path: Path :return: None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...')", "= 'latest' logger.debug(f'Extension {extension_id} is of version {version}.') return version async def versionize_extension_paths(", "get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks) for filename,", "await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async def download_extension_by_id( session:", "version of the extensions, has no effect without `real_name`, defaults to None (True)", "MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking to the marketplace page", "{key} was neither str or dict.') return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str],", "extension ID. :type extension_id: str :param version: Desired extension version. :type version: str", "json format data, parse it and return a list of specs. If it's", "names to the paths, defaults to True :type versionize: bool, optional :return: None,", "real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool] = None, ) -> None: \"\"\" Parse", ":param save_path: Save path to downloaded the desired extension to. :type save_path: Path", "all the downloaded VSCode binaries. :type save_path: Path :param real_name: Wether to patch", "= re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise ValueError('Extension marketplace page data doesn\\'t contain", ":type session: aiohttp.ClientSession :param extension_id: Desired extension ID. :type extension_id: str :param version:", "an extension on the marketplace. :param session: An aiohttp session object to use.", "re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise ValueError('Extension marketplace page data doesn\\'t contain a", "or a dict. :return: List of spec objects parsed from the initial config.", "their name. Basic functionality is to get the real names of extensions. Can", "real_name = True if versionize is None: versionize = True extension_paths = parse_extensions_json(json_data)", "save_path: Path :return: None. :rtype: None \"\"\" publisher_name, extension_name = extension_id.split('.') await _download_extension(session,", "had an empty value. :raises TypeError: A given key was neither a str", ":rtype: typing.List[ExtensionPath] \"\"\" path_list = [] for key, value in root_dict.items(): if isinstance(value,", "describing the given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with json_data.open() as", "use. :type session: aiohttp.ClientSession :param extension_name: Desired extension name. :type extension_name: str :param", "in extension_paths ] versions = await asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths, versions):", "download_extension_tasks = [] for ext_path in extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True,", "an empty value. :raises TypeError: A given key was neither a str or", "the download url for the given parameters. Just a shortcut for the string", "the download url for the given parameters. :param ext_path: A spec object describing", "version = 'latest' logger.debug(f'Extension {extension_id} is of version {version}.') return version async def", "= ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking to the marketplace page of", "level deeper into the value. If the value of a key is a", ":param version: Desired extension version. :type version: str :param save_path: Save path to", "When one needs to be a tiny bit more verbose than the `by_id`", "this patches the existing objects. :rtype: None \"\"\" if versionize: await versionize_extension_paths(session, extension_paths)", "the `version` attributes to the extensions spec objects. :param session: An aiohttp session", "None: \"\"\" Download an extension according to the given parameters. When one needs", "of the extensions, defaults to None (True) :type real_name: typing.Optional[bool], optional :param versionize:", "is to get the real names of extensions. Can also append the current", ":type extension_id: str :raises ValueError: Can't find the extension version. :return: String of", "key is a string, create a spec object from it and give it", "Add the `version` attributes to the extensions spec objects. :param session: An aiohttp", "of a key is a dict, treat it like a directory and delve", "extension from it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass", "-> str: \"\"\" Build the download url for the given parameters. :param ext_path:", "value of a key is a dict, treat it like a directory and", ":param real_name: Wether to patch the real filenames of the extensions, defaults to", "return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build", "given key was neither a str or a dict. :return: List of spec", "as json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id: str)", "Path ) -> None: \"\"\" Download an extension according to the given parameters.", "spec object describing the desired extension. :type ext_path: ExtensionPath :return: The formatted download", "\"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url, save_path, return_type=bytes)", "neither str or dict.') return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], )", "string formatting. :param extension_name: Desired extension name. :type extension_name: str :param publisher_name: Desired", "async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool = True, )", "Desired extension publisher's name. :type publisher_name: str :param version: Desired extension version. :type", "str :param save_path: Save path to downloaded the desired extension to. :type save_path:", "down the hierarchy. :param root_dict: The current \"root\" of our config. :type root_dict:", "page data doesn\\'t contain a version.') version = match.group(1) # The captured version", "to None (True) :type versionize: typing.Optional[bool], optional :return: None. :rtype: None \"\"\" if", "Desired extension version. :type version: str :param save_path: Save path to downloaded the", "raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :param save_path: Save", "_download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name: str, version: str, save_path: Path, ) ->", "the exact version of an extension from it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"')", "extension according to the given parameters. :param session: An aiohttp session object to", "version: str :param save_path: Save path to downloaded the desired extension to. :type", "Just a shortcut for the string formatting. :param extension_name: Desired extension name. :type", "for ext_path in extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id(", "given config data: If the value of a key is a dict, treat", "json_data: Either a path to a json config file or it's raw data", "aiohttp.ClientSession, extension_id: str, version: str, save_path: Path ) -> None: \"\"\" Download an", "version: str, save_path: Path ) -> None: \"\"\" Download an extension according to", "extensions, defaults to None (True) :type real_name: typing.Optional[bool], optional :param versionize: Wether to", "\"\"\" if isinstance(json_data, Path): with json_data.open() as json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data)", "= json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str: \"\"\"", "/ f'{value}', value)) elif isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key,", ":param extension_paths: List of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :return:", "to get the version of. :type extension_id: str :raises ValueError: Can't find the", "def _download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name: str, version: str, save_path: Path, )", "= match.group(1) # The captured version specifier. except Exception as error: logger.debug(error) logger.warning('Can\\'t", "to use. :type session: aiohttp.ClientSession :param extension_id: Desired extension ID. :type extension_id: str", "Exception as error: logger.debug(error) logger.warning('Can\\'t get extension version, setting version to \\'latest\\'...') version", "to the given parameters. :param session: An aiohttp session object to use. :type", ":type real_name: typing.Optional[bool], optional :param versionize: Wether to patch the current version of", "VSCode extension. \"\"\" path: Path # Extension final save path. extension_id: str #", ") -> None: \"\"\" Add the `version` attributes to the extensions spec objects.", "of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await get_request(session, url,", "extension_id: str, version: str, save_path: Path ) -> None: \"\"\" Download an extension", "append version names to the paths, defaults to True :type versionize: bool, optional", "version. def _build_extension_download_url( extension_name: str, publisher_name: str, version: str ) -> str: \"\"\"", ") -> str: \"\"\" Build the download url for the given parameters. Just", "to. :type save_path: Path :return: None. :rtype: None \"\"\" publisher_name, extension_name = extension_id.split('.')", "Format string linking to the marketplace page of some extension. MARKETPLACE_PAGE_LINK = '''", "format data, parse it and return a list of specs. If it's a", "extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]:", "aiohttp.ClientSession() as session: if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = [] for", "json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str:", "parse it and return a list of specs. If it's a Path, open", "version {version}.') return version async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) ->", "typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively parse the given config data:", "Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for key {key} was neither str or", "match: raise ValueError('Extension marketplace page data doesn\\'t contain a version.') version = match.group(1)", ") -> None: \"\"\" Fix up the extension paths by altering their name.", "= True extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession() as session: if real_name: await", ":param version: Desired extension version. :type version: str :return: The formatted download url.", "for filename, ext_path in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json( json_data:", "thing. :param json_data: Either a path to a json config file or it's", "versionize: typing.Optional[bool] = None, ) -> None: \"\"\" Parse the given json data", "_recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for key {key} was", "version. :type version: str :return: The formatted download url. :rtype: str \"\"\" return", "spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :return: None, this patches the existing", "typing.List[ExtensionPath]: \"\"\" Recursively parse the given config data: If the value of a", "ext_path in extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames,", "real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ] original_filenames = await", "to the download of a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage", "session object to use. :type session: aiohttp.ClientSession :param extension_paths: List of extension spec", ":type session: aiohttp.ClientSession :param extension_paths: List of extension spec objects to patch. :type", "extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to extract the exact", "dict. :return: List of spec objects parsed from the initial config. :rtype: typing.List[ExtensionPath]", "Path] :return: List of spec objects describing the given extensions. :rtype: typing.List[ExtensionPath] \"\"\"", "for all the downloaded VSCode binaries. :type save_path: Path :param real_name: Wether to", "setting version to \\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id} is of version {version}.')", "name. :type publisher_name: str :param version: Desired extension version. :type version: str :param", ") def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build the download url for the", ":raises ValueError: Can't find the extension version. :return: String of the extension's latest", "a shortcut for the string formatting. :param extension_name: Desired extension name. :type extension_name:", "the downloaded VSCode binaries. :type save_path: Path :param real_name: Wether to patch the", "save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session, ext_path.extension_id, ext_path.version, extension_full_save_path ) )", ":param versionize: Wether to patch the current version of the extensions, has no", "real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = [] for ext_path in extension_paths: extension_full_save_path", "the same thing. :param json_data: Either a path to a json config file", "optional :return: None, this patches the existing objects. :rtype: None \"\"\" if versionize:", "captured version specifier. except Exception as error: logger.debug(error) logger.warning('Can\\'t get extension version, setting", "str :raises ValueError: Can't find the extension version. :return: String of the extension's", "{version}.') return version async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None:", "it's raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :param save_path:", "publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\"", "-> typing.List[ExtensionPath]: \"\"\" Decide wether the data provided was a Path or not", "names of extensions. Can also append the current version number. :param session: An", "str, publisher_name: str, version: str ) -> str: \"\"\" Build the download url", "ExtensionPath) -> str: \"\"\" Build the download url for the given parameters. :param", "initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list = [] for key, value in root_dict.items():", "extension version. :type version: str :param save_path: Save path to downloaded the desired", "be a tiny bit more verbose than the `by_id` version. :param session: An", "None: \"\"\" Fix up the extension paths by altering their name. Basic functionality", "str or dict.') return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], ) ->", "version.') version = match.group(1) # The captured version specifier. except Exception as error:", "download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path, *, real_name: typing.Optional[bool] = None, versionize:", "\\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id} is of version {version}.') return version async", "to append version names to the paths, defaults to True :type versionize: bool,", "typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide wether the data provided was", "extensions spec objects. :param session: An aiohttp session object to use. :type session:", "a dict. :return: List of spec objects parsed from the initial config. :rtype:", "data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :return: List of spec", "_build_extension_download_url( extension_name: str, publisher_name: str, version: str ) -> str: \"\"\" Build the", "async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\" Add the", "parse_extensions_json(json_data) async with aiohttp.ClientSession() as session: if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks", ":type extension_id: str :param version: Desired extension version. :type version: str :param save_path:", "extension_id: str) -> str: \"\"\" Get the latest version of an extension on", "parse the given config data: If the value of a key is a", "needs to be a tiny bit more verbose than the `by_id` version. :param", "extension_paths: List of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :param versionize:", "-> None: \"\"\" Download an extension according to the given parameters. :param session:", "versionize=versionize) download_extension_tasks = [] for ext_path in extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix')", "version. :rtype: str \"\"\" logger.debug(f'Requesting version of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try:", ") -> None: \"\"\" Parse the given json data and download the given", ":param publisher_name: Desired extension publisher's name. :type publisher_name: str :param version: Desired extension", "Path, ) -> None: \"\"\" Download an extension according to the given parameters.", "aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_paths: List of extension", "ext_path in extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session,", "the initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list = [] for key, value in", "str): if not value: raise ValueError(f'Value for key {key} was empty.') path_list.append(ExtensionPath(Path(key) /", "a str or a dict. :return: List of spec objects parsed from the", "logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded", "Desired marketplace extension to get the version of. :type extension_id: str :raises ValueError:", "data doesn\\'t contain a version.') version = match.group(1) # The captured version specifier.", "use. :type session: aiohttp.ClientSession :param extension_paths: List of extension spec objects to patch.", "'''.strip() # Regex used to extract the exact version of an extension from", "_build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build the download url for the given parameters.", "None, versionize: typing.Optional[bool] = None, ) -> None: \"\"\" Parse the given json", "json config file or it's raw data (dict / list). :type json_data: typing.Union[typing.Dict[str,", "return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide", ":type save_path: Path :param real_name: Wether to patch the real filenames of the", "optional :param versionize: Wether to patch the current version of the extensions, has", "data, parse it and return a list of specs. If it's a Path,", "typing.List[ExtensionPath] \"\"\" path_list = [] for key, value in root_dict.items(): if isinstance(value, str):", "versions = await asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths, versions): ext_path.version = version", "the `by_id` version. :param session: An aiohttp session object to use. :type session:", "was neither a str or a dict. :return: List of spec objects parsed", "the version of. :type extension_id: str :raises ValueError: Can't find the extension version.", ":type publisher_name: str :param version: Desired extension version. :type version: str :return: The", "into the value. If the value of a key is a string, create", ":type json_data: typing.Union[typing.Dict[str, str], Path] :return: List of spec objects describing the given", "object to use. :type session: aiohttp.ClientSession :param extension_id: Desired marketplace extension to get", "real names of extensions. Can also append the current version number. :param session:", "Path :param real_name: Wether to patch the real filenames of the extensions, defaults", "it's valid json format data, parse it and return a list of specs.", "= [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks)", "dataclasses import json import re import typing from pathlib import Path import aiohttp", ":type versionize: bool, optional :return: None, this patches the existing objects. :rtype: None", "return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name: str,", "to the given parameters. When one needs to be a tiny bit more", "An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_id: Desired extension", "download the given VSCode extensions into the save path. :param json_data: Either a", "str \"\"\" logger.debug(f'Requesting version of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str", "Extension version. def _build_extension_download_url( extension_name: str, publisher_name: str, version: str ) -> str:", "await get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise ValueError('Extension", "= 'latest' # Extension version. def _build_extension_download_url( extension_name: str, publisher_name: str, version: str", "download url. :rtype: str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version)", "object to use. :type session: aiohttp.ClientSession :param extension_name: Desired extension name. :type extension_name:", "spec object from it and give it it's \"path\" down the hierarchy. :param", ":return: None, this patches the existing objects. :rtype: None \"\"\" if versionize: await", "= Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for key {key} was neither str", "_recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively parse the given", "of a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format", "Path :return: None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version)", "ValueError('Extension marketplace page data doesn\\'t contain a version.') version = match.group(1) # The", "extension_name: Desired extension name. :type extension_name: str :param publisher_name: Desired extension publisher's name.", "filename, ext_path in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str,", "aiohttp.ClientSession :param extension_id: Desired marketplace extension to get the version of. :type extension_id:", "_recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str: \"\"\" Get the latest", "extensions. Can also append the current version number. :param session: An aiohttp session", "optional :return: None. :rtype: None \"\"\" if real_name is None: real_name = True", "ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name:", "value of a key is a string, create a spec object from it", "given parameters. When one needs to be a tiny bit more verbose than", "objects parsed from the initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list = [] for", "Path, open it and then do the same thing. :param json_data: Either a", "ext_path in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str],", "treat it like a directory and delve one level deeper into the value.", "path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path =", "the given parameters. Just a shortcut for the string formatting. :param extension_name: Desired", "A spec object describing the desired extension. :type ext_path: ExtensionPath :return: The formatted", "version of an extension on the marketplace. :param session: An aiohttp session object", "{extension_id} is of version {version}.') return version async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths:", "str, version: str, save_path: Path ) -> None: \"\"\" Download an extension according", "value)) elif isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path)", "storing info regarding a certain VSCode extension. \"\"\" path: Path # Extension final", "real_name: typing.Optional[bool], optional :param versionize: Wether to patch the current version of the", "the marketplace. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession", "is None: versionize = True extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession() as session:", ":param root_dict: The current \"root\" of our config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]]", "= ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path, *, real_name:", "file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking to the marketplace", "url for the given parameters. :param ext_path: A spec object describing the desired", "Basic functionality is to get the real names of extensions. Can also append", ":type extension_paths: typing.List[ExtensionPath] :param versionize: Wether to append version names to the paths,", "hierarchy. :param root_dict: The current \"root\" of our config. :type root_dict: typing.Dict[str, typing.Union[str,", "version. :return: String of the extension's latest version. :rtype: str \"\"\" logger.debug(f'Requesting version", "page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for storing info regarding", "path_list = [] for key, value in root_dict.items(): if isinstance(value, str): if not", ":param extension_paths: List of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :param", "to the marketplace page of some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() #", "the extensions, has no effect without `real_name`, defaults to None (True) :type versionize:", "str # Extension ID. version: str = 'latest' # Extension version. def _build_extension_download_url(", "return a list of specs. If it's a Path, open it and then", "url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession, extension_id:", "str, save_path: Path ) -> None: \"\"\" Download an extension according to the", "download url for the given parameters. Just a shortcut for the string formatting.", "(dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :param save_path: Save path for", "path. extension_id: str # Extension ID. version: str = 'latest' # Extension version.", "same thing. :param json_data: Either a path to a json config file or", "None \"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths ] versions", "objects. :rtype: None \"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths", "f'{value}', value)) elif isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path)", "Decide wether the data provided was a Path or not and act accordingly:", ":param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_id:", "linking to the marketplace page of some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip()", "bool, optional :return: None, this patches the existing objects. :rtype: None \"\"\" if", "version: str :return: The formatted download url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name,", "to patch. :type extension_paths: typing.List[ExtensionPath] :return: None, this patches the existing objects. :rtype:", "given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with json_data.open() as json_file: json_data", "latest version. :rtype: str \"\"\" logger.debug(f'Requesting version of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id)", "into the save path. :param json_data: Either a path to a json config", "None: real_name = True if versionize is None: versionize = True extension_paths =", "else: raise TypeError(f'Value for key {key} was neither str or dict.') return path_list", "paths by altering their name. Basic functionality is to get the real names", "for the given parameters. :param ext_path: A spec object describing the desired extension.", "# Extension final save path. extension_id: str # Extension ID. version: str =", "and then do the same thing. :param json_data: Either a path to a", ":param extension_id: Desired extension ID. :type extension_id: str :param version: Desired extension version.", "extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession() as session: if real_name: await patch_extension_paths(session, extension_paths,", "/ list). :type json_data: typing.Union[typing.Dict[str, str], Path] :return: List of spec objects describing", "ext_path.version) async def _download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name: str, version: str, save_path:", "a list of specs. If it's a Path, open it and then do", "ValueError: Can't find the extension version. :return: String of the extension's latest version.", "logger.debug(f'Extension {extension_id} is of version {version}.') return version async def versionize_extension_paths( session: aiohttp.ClientSession,", "it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for storing", "verbose than the `by_id` version. :param session: An aiohttp session object to use.", "None \"\"\" if real_name is None: real_name = True if versionize is None:", "str :param publisher_name: Desired extension publisher's name. :type publisher_name: str :param version: Desired", "extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]],", "was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value):", "typing.Optional[bool], optional :return: None. :rtype: None \"\"\" if real_name is None: real_name =", "= True if versionize is None: versionize = True extension_paths = parse_extensions_json(json_data) async", "from it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for", "# Format string linking to the download of a vscode extension .vsix file.", "parameters. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param", "typing.Union[str, typing.Dict]] :raises ValueError: A given key had an empty value. :raises TypeError:", "extension to get the version of. :type extension_id: str :raises ValueError: Can't find", "[] for ext_path in extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append(", "spec objects describing the given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with", "of specs. If it's a Path, open it and then do the same", "\"\"\" logger.debug(f'Requesting version of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str =", "\"\"\" path: Path # Extension final save path. extension_id: str # Extension ID.", "parameters. When one needs to be a tiny bit more verbose than the", "import download_url, get_request, get_original_filename # Format string linking to the download of a", "{extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await get_request(session, url, return_type=str) match", "json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str: \"\"\" Get", "save_path: Save path to downloaded the desired extension to. :type save_path: Path :return:", "path to downloaded the desired extension to. :type save_path: Path :return: None. :rtype:", "delve one level deeper into the value. If the value of a key", "the hierarchy. :param root_dict: The current \"root\" of our config. :type root_dict: typing.Dict[str,", "asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths, versions): ext_path.version = version async def patch_extension_paths(", "Can also append the current version number. :param session: An aiohttp session object", "import dataclasses import json import re import typing from pathlib import Path import", "-> None: \"\"\" Parse the given json data and download the given VSCode", ":type version: str :return: The formatted download url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format(", "in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path],", "to None (True) :type real_name: typing.Optional[bool], optional :param versionize: Wether to patch the", "info regarding a certain VSCode extension. \"\"\" path: Path # Extension final save", "_download_extension(session, extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) ->", "for ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for", "as error: logger.debug(error) logger.warning('Can\\'t get extension version, setting version to \\'latest\\'...') version =", "https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to extract the exact version of an extension", "= await asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths, versions): ext_path.version = version async", "extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :return: None, this patches the", "patch the real filenames of the extensions, defaults to None (True) :type real_name:", "url. :rtype: str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async", "with json_data.open() as json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession,", "real_name is None: real_name = True if versionize is None: versionize = True", "import aiohttp from loguru import logger from .utils import download_url, get_request, get_original_filename #", "config. :rtype: typing.List[ExtensionPath] \"\"\" path_list = [] for key, value in root_dict.items(): if", "of an extension on the marketplace. :param session: An aiohttp session object to", "version of an extension from it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class", "json_data: typing.Union[typing.Dict[str, str], Path] :param save_path: Save path for all the downloaded VSCode", "to patch the real filenames of the extensions, defaults to None (True) :type", "= re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for storing info regarding a certain", "publisher_name, extension_name = extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict:", "if versionize is None: versionize = True extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession()", "None \"\"\" if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for", "asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json(", "is of version {version}.') return version async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath]", "MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to extract the exact version", "versionize: bool, optional :return: None, this patches the existing objects. :rtype: None \"\"\"", "extensions, has no effect without `real_name`, defaults to None (True) :type versionize: typing.Optional[bool],", "VSCode binaries. :type save_path: Path :param real_name: Wether to patch the real filenames", "patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = [] for ext_path in extension_paths: extension_full_save_path = save_path", "async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path, *, real_name: typing.Optional[bool] =", "version) await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async def download_extension_by_id(", "downloaded the desired extension to. :type save_path: Path :return: None. :rtype: None \"\"\"", "a Path or not and act accordingly: If it's valid json format data,", "Wether to patch the current version of the extensions, has no effect without", "dict, treat it like a directory and delve one level deeper into the", "existing objects. :rtype: None \"\"\" if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks = [", "with aiohttp.ClientSession() as session: if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = []", "extension. :type ext_path: ExtensionPath :return: The formatted download url. :rtype: str \"\"\" publisher_name,", "extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking to", "empty value. :raises TypeError: A given key was neither a str or a", "provided was a Path or not and act accordingly: If it's valid json", "match.group(1) # The captured version specifier. except Exception as error: logger.debug(error) logger.warning('Can\\'t get", "List of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :param versionize: Wether", "binaries. :type save_path: Path :param real_name: Wether to patch the real filenames of", "to downloaded the desired extension to. :type save_path: Path :return: None. :rtype: None", "re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for storing info regarding a certain VSCode", "-> str: \"\"\" Get the latest version of an extension on the marketplace.", "aiohttp.ClientSession, extension_name: str, publisher_name: str, version: str, save_path: Path, ) -> None: \"\"\"", ":return: None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version) await", "str: \"\"\" Build the download url for the given parameters. Just a shortcut", "def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build the download url for the given", "save_path: Path, ) -> None: \"\"\" Download an extension according to the given", "typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively parse the given config data: If", "it like a directory and delve one level deeper into the value. If", "like a directory and delve one level deeper into the value. If the", "typing.Optional[bool] = None, ) -> None: \"\"\" Parse the given json data and", "for storing info regarding a certain VSCode extension. \"\"\" path: Path # Extension", "the given VSCode extensions into the save path. :param json_data: Either a path", "of the extension's latest version. :rtype: str \"\"\" logger.debug(f'Requesting version of extension {extension_id}...')", ":rtype: None \"\"\" if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path))", "/ list). :type json_data: typing.Union[typing.Dict[str, str], Path] :param save_path: Save path for all", "download url for the given parameters. :param ext_path: A spec object describing the", "download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession,", ":type extension_name: str :param publisher_name: Desired extension publisher's name. :type publisher_name: str :param", "List of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :return: None, this", "version to \\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id} is of version {version}.') return", "get extension version, setting version to \\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id} is", "session: if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = [] for ext_path in", "the given parameters. :param session: An aiohttp session object to use. :type session:", "session object to use. :type session: aiohttp.ClientSession :param extension_name: Desired extension name. :type", "def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively parse the", "to patch. :type extension_paths: typing.List[ExtensionPath] :param versionize: Wether to append version names to", "for key {key} was neither str or dict.') return path_list def parse_extensions_json( json_data:", "-> None: \"\"\" Download an extension according to the given parameters. When one", "session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_id: Desired", "patches the existing objects. :rtype: None \"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for", "str], Path], save_path: Path, *, real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool] = None,", "of some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to extract", "if not value: raise ValueError(f'Value for key {key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}',", "objects. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param", "typing.Union[typing.Dict[str, str], Path] :param save_path: Save path for all the downloaded VSCode binaries.", "certain VSCode extension. \"\"\" path: Path # Extension final save path. extension_id: str", "[ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks) for", "= _build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.')", "Path or not and act accordingly: If it's valid json format data, parse", ":param extension_id: Desired marketplace extension to get the version of. :type extension_id: str", ":return: None. :rtype: None \"\"\" if real_name is None: real_name = True if", "import asyncio import dataclasses import json import re import typing from pathlib import", "publisher_name: Desired extension publisher's name. :type publisher_name: str :param version: Desired extension version.", ":rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) ->", "\"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension( session:", ":return: List of spec objects describing the given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if", "Build the download url for the given parameters. :param ext_path: A spec object", "VSCode extensions into the save path. :param json_data: Either a path to a", "str], Path] :return: List of spec objects describing the given extensions. :rtype: typing.List[ExtensionPath]", "latest version of an extension on the marketplace. :param session: An aiohttp session", "extension_name: str, publisher_name: str, version: str, save_path: Path, ) -> None: \"\"\" Download", "bit more verbose than the `by_id` version. :param session: An aiohttp session object", "save_path: Path :return: None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name,", "None: \"\"\" Add the `version` attributes to the extensions spec objects. :param session:", "Wether to patch the real filenames of the extensions, defaults to None (True)", "config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A given key had an", ".vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking to the", "MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text)", "given parameters. Just a shortcut for the string formatting. :param extension_name: Desired extension", "'latest' # Extension version. def _build_extension_download_url( extension_name: str, publisher_name: str, version: str )", "extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session, ext_path.extension_id, ext_path.version, extension_full_save_path", "None, this patches the existing objects. :rtype: None \"\"\" get_version_tasks = [ get_extension_version(session,", "the extension paths by altering their name. Basic functionality is to get the", "TypeError(f'Value for key {key} was neither str or dict.') return path_list def parse_extensions_json(", "async def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path ) ->", "\"\"\" Download an extension according to the given parameters. :param session: An aiohttp", "If it's a Path, open it and then do the same thing. :param", "extension. \"\"\" path: Path # Extension final save path. extension_id: str # Extension", "-> str: \"\"\" Build the download url for the given parameters. Just a", "path: Path # Extension final save path. extension_id: str # Extension ID. version:", "the paths, defaults to True :type versionize: bool, optional :return: None, this patches", "contain a version.') version = match.group(1) # The captured version specifier. except Exception", "zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path:", "typing from pathlib import Path import aiohttp from loguru import logger from .utils", "to patch the current version of the extensions, has no effect without `real_name`,", ":return: None. :rtype: None \"\"\" publisher_name, extension_name = extension_id.split('.') await _download_extension(session, extension_name, publisher_name,", "version: str, save_path: Path, ) -> None: \"\"\" Download an extension according to", "error: logger.debug(error) logger.warning('Can\\'t get extension version, setting version to \\'latest\\'...') version = 'latest'", "to True :type versionize: bool, optional :return: None, this patches the existing objects.", "{extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name}", "Fix up the extension paths by altering their name. Basic functionality is to", "session: aiohttp.ClientSession :param extension_paths: List of extension spec objects to patch. :type extension_paths:", "for key {key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value, dict): for", "str = 'latest' # Extension version. def _build_extension_download_url( extension_name: str, publisher_name: str, version:", "extension_id: str :raises ValueError: Can't find the extension version. :return: String of the", "to {save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path", "match = re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise ValueError('Extension marketplace page data doesn\\'t", "def _build_extension_download_url( extension_name: str, publisher_name: str, version: str ) -> str: \"\"\" Build", "version: str = 'latest' # Extension version. def _build_extension_download_url( extension_name: str, publisher_name: str,", "extension_name: str :param publisher_name: Desired extension publisher's name. :type publisher_name: str :param version:", "isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else: raise", "typing.List[ExtensionPath]: \"\"\" Decide wether the data provided was a Path or not and", ".utils import download_url, get_request, get_original_filename # Format string linking to the download of", "-> typing.List[ExtensionPath]: \"\"\" Recursively parse the given config data: If the value of", "a dict, treat it like a directory and delve one level deeper into", "string linking to the download of a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK =", "extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ] original_filenames =", "save_path: Save path for all the downloaded VSCode binaries. :type save_path: Path :param", "None: \"\"\" Download an extension according to the given parameters. :param session: An", "root_dict.items(): if isinstance(value, str): if not value: raise ValueError(f'Value for key {key} was", "version = match.group(1) # The captured version specifier. except Exception as error: logger.debug(error)", "str ) -> str: \"\"\" Build the download url for the given parameters.", "str: \"\"\" Build the download url for the given parameters. :param ext_path: A", "key {key} was neither str or dict.') return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str,", "ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session, ext_path.extension_id, ext_path.version, extension_full_save_path ) ) await asyncio.gather(*download_extension_tasks)", "re import typing from pathlib import Path import aiohttp from loguru import logger", "to extract the exact version of an extension from it's marketplace page. VERSION_REGEX", ":param ext_path: A spec object describing the desired extension. :type ext_path: ExtensionPath :return:", "in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for key {key}", "str], Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide wether the data provided was a", "An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_id: Desired marketplace", "of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :param versionize: Wether to", "accordingly: If it's valid json format data, parse it and return a list", "return version async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\"", "functionality is to get the real names of extensions. Can also append the", "str :return: The formatted download url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name,", "The captured version specifier. except Exception as error: logger.debug(error) logger.warning('Can\\'t get extension version,", "@dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for storing info regarding a certain VSCode extension.", "for ext_path in extension_paths ] versions = await asyncio.gather(*get_version_tasks) for ext_path, version in", "path to a json config file or it's raw data (dict / list).", "neither a str or a dict. :return: List of spec objects parsed from", "from .utils import download_url, get_request, get_original_filename # Format string linking to the download", "session: aiohttp.ClientSession, extension_name: str, publisher_name: str, version: str, save_path: Path, ) -> None:", "`by_id` version. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession", "url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise ValueError('Extension marketplace page", "spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :param versionize: Wether to append version", ":param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_paths:", "extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build the download", "str or a dict. :return: List of spec objects parsed from the initial", "more verbose than the `by_id` version. :param session: An aiohttp session object to", "None (True) :type versionize: typing.Optional[bool], optional :return: None. :rtype: None \"\"\" if real_name", "the desired extension to. :type save_path: Path :return: None. :rtype: None \"\"\" publisher_name,", "publisher_name: str :param version: Desired extension version. :type version: str :return: The formatted", "session: aiohttp.ClientSession :param extension_id: Desired marketplace extension to get the version of. :type", "= parse_extensions_json(json_data) async with aiohttp.ClientSession() as session: if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize)", ":return: The formatted download url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version", "to be a tiny bit more verbose than the `by_id` version. :param session:", "An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_name: Desired extension", "\"\"\" Add the `version` attributes to the extensions spec objects. :param session: An", "of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :return: None, this patches", "deeper into the value. If the value of a key is a string,", "versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths", "import logger from .utils import download_url, get_request, get_original_filename # Format string linking to", "of an extension from it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath:", "desired extension. :type ext_path: ExtensionPath :return: The formatted download url. :rtype: str \"\"\"", "None. :rtype: None \"\"\" publisher_name, extension_name = extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version,", "extension according to the given parameters. When one needs to be a tiny", "given key had an empty value. :raises TypeError: A given key was neither", "version. :type version: str :param save_path: Save path to downloaded the desired extension", "if not match: raise ValueError('Extension marketplace page data doesn\\'t contain a version.') version", "typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively parse the given config data: If the", "for key, value in root_dict.items(): if isinstance(value, str): if not value: raise ValueError(f'Value", "ValueError: A given key had an empty value. :raises TypeError: A given key", "linking to the download of a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = '''", "vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking", "session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_name: Desired", "path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide wether", "json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) ->", "on the marketplace. :param session: An aiohttp session object to use. :type session:", "of the extensions, has no effect without `real_name`, defaults to None (True) :type", "None. :rtype: None \"\"\" if real_name is None: real_name = True if versionize", ":rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url,", "import typing from pathlib import Path import aiohttp from loguru import logger from", "not value: raise ValueError(f'Value for key {key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value))", "for ext_path in extension_paths ] original_filenames = await asyncio.gather(*real_name_tasks) for filename, ext_path in", "save_path: Path ) -> None: \"\"\" Download an extension according to the given", ":param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_name:", "ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for key {key} was neither str or dict.')", "Desired extension version. :type version: str :return: The formatted download url. :rtype: str", "for the given parameters. Just a shortcut for the string formatting. :param extension_name:", "parameters. :param ext_path: A spec object describing the desired extension. :type ext_path: ExtensionPath", "publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension( session: aiohttp.ClientSession,", "zip(extension_paths, versions): ext_path.version = version async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *,", "json_data: typing.Union[typing.Dict[str, str], Path] :return: List of spec objects describing the given extensions.", "if isinstance(json_data, Path): with json_data.open() as json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async", "given VSCode extensions into the save path. :param json_data: Either a path to", "save_path: Path :param real_name: Wether to patch the real filenames of the extensions,", ":type versionize: typing.Optional[bool], optional :return: None. :rtype: None \"\"\" if real_name is None:", "typing.List[ExtensionPath] :return: None, this patches the existing objects. :rtype: None \"\"\" get_version_tasks =", "if isinstance(value, str): if not value: raise ValueError(f'Value for key {key} was empty.')", "version: str ) -> str: \"\"\" Build the download url for the given", "formatted download url. :rtype: str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name,", ":type publisher_name: str :param version: Desired extension version. :type version: str :param save_path:", "object to use. :type session: aiohttp.ClientSession :param extension_paths: List of extension spec objects", "by altering their name. Basic functionality is to get the real names of", "\"\"\" Get the latest version of an extension on the marketplace. :param session:", "publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build the download url", "None \"\"\" publisher_name, extension_name = extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version, save_path) def", "key is a dict, treat it like a directory and delve one level", "] versions = await asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths, versions): ext_path.version =", "ext_path in extension_paths ] versions = await asyncio.gather(*get_version_tasks) for ext_path, version in zip(extension_paths,", "# The captured version specifier. except Exception as error: logger.debug(error) logger.warning('Can\\'t get extension", ":param json_data: Either a path to a json config file or it's raw", "A given key had an empty value. :raises TypeError: A given key was", "the existing objects. :rtype: None \"\"\" if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks =", "for the string formatting. :param extension_name: Desired extension name. :type extension_name: str :param", "None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version) await download_url(session,", "marketplace page data doesn\\'t contain a version.') version = match.group(1) # The captured", "specs. If it's a Path, open it and then do the same thing.", "version: Desired extension version. :type version: str :return: The formatted download url. :rtype:", "# Extension version. def _build_extension_download_url( extension_name: str, publisher_name: str, version: str ) ->", "do the same thing. :param json_data: Either a path to a json config", ":type ext_path: ExtensionPath :return: The formatted download url. :rtype: str \"\"\" publisher_name, extension_name", "extension_paths: typing.List[ExtensionPath] :return: None, this patches the existing objects. :rtype: None \"\"\" get_version_tasks", "Save path for all the downloaded VSCode binaries. :type save_path: Path :param real_name:", "Desired extension name. :type extension_name: str :param publisher_name: Desired extension publisher's name. :type", "root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A given key had an empty value.", "ID. version: str = 'latest' # Extension version. def _build_extension_download_url( extension_name: str, publisher_name:", "to get the real names of extensions. Can also append the current version", "''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to extract the exact version of an", "await asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async def", "_build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name: str, version:", "def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool = True, ) ->", "ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path, *,", "aiohttp.ClientSession :param extension_name: Desired extension name. :type extension_name: str :param publisher_name: Desired extension", "some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to extract the", "than the `by_id` version. :param session: An aiohttp session object to use. :type", "session object to use. :type session: aiohttp.ClientSession :param extension_id: Desired extension ID. :type", "versionize = True extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession() as session: if real_name:", ":type save_path: Path :return: None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name,", "Save path to downloaded the desired extension to. :type save_path: Path :return: None.", "'''.strip() # Format string linking to the marketplace page of some extension. MARKETPLACE_PAGE_LINK", "class ExtensionPath: \"\"\" Dataclass for storing info regarding a certain VSCode extension. \"\"\"", "version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively", "of version {version}.') return version async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] )", "versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\" Add the `version` attributes", "attributes to the extensions spec objects. :param session: An aiohttp session object to", "= True, ) -> None: \"\"\" Fix up the extension paths by altering", "import Path import aiohttp from loguru import logger from .utils import download_url, get_request,", "it's raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :return: List", "version of extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await get_request(session,", "to the extensions spec objects. :param session: An aiohttp session object to use.", "aiohttp.ClientSession :param extension_paths: List of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath]", "typing.Optional[bool], optional :param versionize: Wether to patch the current version of the extensions,", "aiohttp.ClientSession :param extension_id: Desired extension ID. :type extension_id: str :param version: Desired extension", "session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool = True, ) -> None: \"\"\"", ":type json_data: typing.Union[typing.Dict[str, str], Path] :param save_path: Save path for all the downloaded", "from loguru import logger from .utils import download_url, get_request, get_original_filename # Format string", "extension name. :type extension_name: str :param publisher_name: Desired extension publisher's name. :type publisher_name:", "ext_path: ExtensionPath :return: The formatted download url. :rtype: str \"\"\" publisher_name, extension_name =", "the current version number. :param session: An aiohttp session object to use. :type", "versions): ext_path.version = version async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize:", "bool = True, ) -> None: \"\"\" Fix up the extension paths by", "version. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param", "The current \"root\" of our config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError:", "\"\"\" Recursively parse the given config data: If the value of a key", "it's \"path\" down the hierarchy. :param root_dict: The current \"root\" of our config.", "typing.List[ExtensionPath] ) -> None: \"\"\" Add the `version` attributes to the extensions spec", "if real_name: await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = [] for ext_path in extension_paths:", "-> None: \"\"\" Add the `version` attributes to the extensions spec objects. :param", "desired extension to. :type save_path: Path :return: None. :rtype: None \"\"\" publisher_name, extension_name", "of a key is a string, create a spec object from it and", "= MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"',", "get the version of. :type extension_id: str :raises ValueError: Can't find the extension", "def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version: str, save_path: Path ) -> None:", "a version.') version = match.group(1) # The captured version specifier. except Exception as", "name. :type extension_name: str :param publisher_name: Desired extension publisher's name. :type publisher_name: str", "The formatted download url. :rtype: str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name,", "raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :return: List of", "try: text: str = await get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if", "{extension_name} to {save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version: str, save_path:", "valid json format data, parse it and return a list of specs. If", "value in root_dict.items(): if isinstance(value, str): if not value: raise ValueError(f'Value for key", "download of a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() #", "list of specs. If it's a Path, open it and then do the", "Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide wether the data provided was a Path", "str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def _download_extension(", "ext_path.extension_id) for ext_path in extension_paths ] versions = await asyncio.gather(*get_version_tasks) for ext_path, version", "''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking to the marketplace page of some", "dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value", "config file or it's raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str],", "str, save_path: Path, ) -> None: \"\"\" Download an extension according to the", "data: If the value of a key is a dict, treat it like", "used to extract the exact version of an extension from it's marketplace page.", "to the paths, defaults to True :type versionize: bool, optional :return: None, this", "and download the given VSCode extensions into the save path. :param json_data: Either", "aiohttp from loguru import logger from .utils import download_url, get_request, get_original_filename # Format", "parameters. Just a shortcut for the string formatting. :param extension_name: Desired extension name.", "defaults to True :type versionize: bool, optional :return: None, this patches the existing", "given parameters. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession", "extension_name = extension_id.split('.') await _download_extension(session, extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str,", "asyncio import dataclasses import json import re import typing from pathlib import Path", "version async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool = True,", "*, versionize: bool = True, ) -> None: \"\"\" Fix up the extension", "get_original_filename # Format string linking to the download of a vscode extension .vsix", "and give it it's \"path\" down the hierarchy. :param root_dict: The current \"root\"", "typing.Union[typing.Dict[str, str], Path] :return: List of spec objects describing the given extensions. :rtype:", "marketplace. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param", "typing.Optional[bool] = None, versionize: typing.Optional[bool] = None, ) -> None: \"\"\" Parse the", "path. :param json_data: Either a path to a json config file or it's", "of. :type extension_id: str :raises ValueError: Can't find the extension version. :return: String", "the save path. :param json_data: Either a path to a json config file", "`real_name`, defaults to None (True) :type versionize: typing.Optional[bool], optional :return: None. :rtype: None", "\"\"\" path_list = [] for key, value in root_dict.items(): if isinstance(value, str): if", "json import re import typing from pathlib import Path import aiohttp from loguru", "a certain VSCode extension. \"\"\" path: Path # Extension final save path. extension_id:", "str :param version: Desired extension version. :type version: str :return: The formatted download", "the desired extension. :type ext_path: ExtensionPath :return: The formatted download url. :rtype: str", "raise TypeError(f'Value for key {key} was neither str or dict.') return path_list def", "Path, *, real_name: typing.Optional[bool] = None, versionize: typing.Optional[bool] = None, ) -> None:", "the current version of the extensions, has no effect without `real_name`, defaults to", ":type version: str :param save_path: Save path to downloaded the desired extension to.", "a spec object from it and give it it's \"path\" down the hierarchy.", "await patch_extension_paths(session, extension_paths, versionize=versionize) download_extension_tasks = [] for ext_path in extension_paths: extension_full_save_path =", "without `real_name`, defaults to None (True) :type versionize: typing.Optional[bool], optional :return: None. :rtype:", "If the value of a key is a string, create a spec object", "_build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async", "extension publisher's name. :type publisher_name: str :param version: Desired extension version. :type version:", "save path. extension_id: str # Extension ID. version: str = 'latest' # Extension", "file or it's raw data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path]", "If it's valid json format data, parse it and return a list of", "spec objects. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession", "return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str, version:", "the given json data and download the given VSCode extensions into the save", "aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_id: Desired marketplace extension", "save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async def download_extension_by_id( session: aiohttp.ClientSession, extension_id: str,", "extension's latest version. :rtype: str \"\"\" logger.debug(f'Requesting version of extension {extension_id}...') url =", "extension version, setting version to \\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id} is of", "extension_paths: typing.List[ExtensionPath], *, versionize: bool = True, ) -> None: \"\"\" Fix up", ":return: The formatted download url. :rtype: str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return", "\"root\" of our config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A given", "\"\"\" get_version_tasks = [ get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths ] versions =", "number. :param session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param", "extension_paths: extension_full_save_path = save_path / ext_path.path.with_suffix('.vsix') extension_full_save_path.parent.mkdir(parents=True, exist_ok=True) download_extension_tasks.append( download_extension_by_id( session, ext_path.extension_id, ext_path.version,", ":type save_path: Path :return: None. :rtype: None \"\"\" publisher_name, extension_name = extension_id.split('.') await", "True :type versionize: bool, optional :return: None, this patches the existing objects. :rtype:", "the desired extension to. :type save_path: Path :return: None. :rtype: None \"\"\" logger.info(f'Downloading", "defaults to None (True) :type versionize: typing.Optional[bool], optional :return: None. :rtype: None \"\"\"", "await _download_extension(session, extension_name, publisher_name, version, save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], )", "String of the extension's latest version. :rtype: str \"\"\" logger.debug(f'Requesting version of extension", "version, setting version to \\'latest\\'...') version = 'latest' logger.debug(f'Extension {extension_id} is of version", "in zip(extension_paths, versions): ext_path.version = version async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath],", "extension version. :type version: str :return: The formatted download url. :rtype: str \"\"\"", "async def _download_extension( session: aiohttp.ClientSession, extension_name: str, publisher_name: str, version: str, save_path: Path,", "import json import re import typing from pathlib import Path import aiohttp from", "Path :return: None. :rtype: None \"\"\" publisher_name, extension_name = extension_id.split('.') await _download_extension(session, extension_name,", "raise ValueError(f'Value for key {key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value,", "objects. :rtype: None \"\"\" if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session,", "one needs to be a tiny bit more verbose than the `by_id` version.", "Extension ID. version: str = 'latest' # Extension version. def _build_extension_download_url( extension_name: str,", "it's a Path, open it and then do the same thing. :param json_data:", "The formatted download url. :rtype: str \"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version )", "(dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :return: List of spec objects", "# Format string linking to the marketplace page of some extension. MARKETPLACE_PAGE_LINK =", "ext_path.version = version async def patch_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool", "to a json config file or it's raw data (dict / list). :type", "= [ get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths ] versions = await asyncio.gather(*get_version_tasks)", "current \"root\" of our config. :type root_dict: typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A", ":return: None, this patches the existing objects. :rtype: None \"\"\" get_version_tasks = [", "patches the existing objects. :rtype: None \"\"\" if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks", ":raises ValueError: A given key had an empty value. :raises TypeError: A given", "marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass class ExtensionPath: \"\"\" Dataclass for storing info", "= await asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename) async", "None (True) :type real_name: typing.Optional[bool], optional :param versionize: Wether to patch the current", "session: An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_paths: List", "\"\"\" Dataclass for storing info regarding a certain VSCode extension. \"\"\" path: Path", "extension {extension_id}...') url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await get_request(session, url, return_type=str)", "value. If the value of a key is a string, create a spec", "def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path, *, real_name: typing.Optional[bool] = None,", ":raises TypeError: A given key was neither a str or a dict. :return:", "List of spec objects parsed from the initial config. :rtype: typing.List[ExtensionPath] \"\"\" path_list", "return _recursive_parse_to_dict(json_data) async def get_extension_version(session: aiohttp.ClientSession, extension_id: str) -> str: \"\"\" Get the", "one level deeper into the value. If the value of a key is", "str, publisher_name: str, version: str, save_path: Path, ) -> None: \"\"\" Download an", "save_path) def _recursive_parse_to_dict( root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively parse", "extension_id: Desired marketplace extension to get the version of. :type extension_id: str :raises", "get_request, get_original_filename # Format string linking to the download of a vscode extension", "filenames of the extensions, defaults to None (True) :type real_name: typing.Optional[bool], optional :param", "extension to. :type save_path: Path :return: None. :rtype: None \"\"\" logger.info(f'Downloading {extension_name}...') url", "the value of a key is a dict, treat it like a directory", "isinstance(value, str): if not value: raise ValueError(f'Value for key {key} was empty.') path_list.append(ExtensionPath(Path(key)", "= None, ) -> None: \"\"\" Parse the given json data and download", "logger.debug(error) logger.warning('Can\\'t get extension version, setting version to \\'latest\\'...') version = 'latest' logger.debug(f'Extension", "versionize: Wether to append version names to the paths, defaults to True :type", "was neither str or dict.') return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path],", "typing.Dict]] :raises ValueError: A given key had an empty value. :raises TypeError: A", "ValueError(f'Value for key {key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value, dict):", "Can't find the extension version. :return: String of the extension's latest version. :rtype:", "effect without `real_name`, defaults to None (True) :type versionize: typing.Optional[bool], optional :return: None.", "a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string", "path_list.append(ext_path) else: raise TypeError(f'Value for key {key} was neither str or dict.') return", "`version` attributes to the extensions spec objects. :param session: An aiohttp session object", "original_filenames = await asyncio.gather(*real_name_tasks) for filename, ext_path in zip(original_filenames, extension_paths): ext_path.path = ext_path.path.with_name(filename)", "find the extension version. :return: String of the extension's latest version. :rtype: str", "extension_paths): ext_path.path = ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path,", "json data and download the given VSCode extensions into the save path. :param", ":param extension_name: Desired extension name. :type extension_name: str :param publisher_name: Desired extension publisher's", "except Exception as error: logger.debug(error) logger.warning('Can\\'t get extension version, setting version to \\'latest\\'...')", "List of spec objects describing the given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data,", "the download of a vscode extension .vsix file. MARKETPLACE_DOWNLOAD_LINK = ''' https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip()", "to use. :type session: aiohttp.ClientSession :param extension_name: Desired extension name. :type extension_name: str", "\"\"\" Build the download url for the given parameters. :param ext_path: A spec", "extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :param versionize: Wether to append", "doesn\\'t contain a version.') version = match.group(1) # The captured version specifier. except", "data and download the given VSCode extensions into the save path. :param json_data:", "the extensions spec objects. :param session: An aiohttp session object to use. :type", "has no effect without `real_name`, defaults to None (True) :type versionize: typing.Optional[bool], optional", "typing.Dict[str, typing.Union[str, typing.Dict]] :raises ValueError: A given key had an empty value. :raises", "patch the current version of the extensions, has no effect without `real_name`, defaults", ":rtype: str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.') return _build_extension_download_url(extension_name, publisher_name, ext_path.version) async def", "extension_id: Desired extension ID. :type extension_id: str :param version: Desired extension version. :type", "[ get_extension_version(session, ext_path.extension_id) for ext_path in extension_paths ] versions = await asyncio.gather(*get_version_tasks) for", "https://marketplace.visualstudio.com/_apis/public/gallery/publishers/{publisher_name}/vsextensions/{extension_name}/{version}/vspackage '''.strip() # Format string linking to the marketplace page of some extension.", "from it and give it it's \"path\" down the hierarchy. :param root_dict: The", "\"path\" down the hierarchy. :param root_dict: The current \"root\" of our config. :type", "None \"\"\" logger.info(f'Downloading {extension_name}...') url = _build_extension_download_url(extension_name, publisher_name, version) await download_url(session, url, save_path,", "isinstance(json_data, Path): with json_data.open() as json_file: json_data = json.load(json_file)['extensions'] return _recursive_parse_to_dict(json_data) async def", "raise ValueError('Extension marketplace page data doesn\\'t contain a version.') version = match.group(1) #", "dict.') return path_list def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]: \"\"\"", "object from it and give it it's \"path\" down the hierarchy. :param root_dict:", "if real_name is None: real_name = True if versionize is None: versionize =", "it it's \"path\" down the hierarchy. :param root_dict: The current \"root\" of our", "typing.List[ExtensionPath], *, versionize: bool = True, ) -> None: \"\"\" Fix up the", "True extension_paths = parse_extensions_json(json_data) async with aiohttp.ClientSession() as session: if real_name: await patch_extension_paths(session,", "the data provided was a Path or not and act accordingly: If it's", "get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if not match: raise ValueError('Extension marketplace", "a path to a json config file or it's raw data (dict /", "the extension version. :return: String of the extension's latest version. :rtype: str \"\"\"", "Path # Extension final save path. extension_id: str # Extension ID. version: str", "according to the given parameters. When one needs to be a tiny bit", "str :param version: Desired extension version. :type version: str :param save_path: Save path", "regarding a certain VSCode extension. \"\"\" path: Path # Extension final save path.", "Parse the given json data and download the given VSCode extensions into the", "version names to the paths, defaults to True :type versionize: bool, optional :return:", "page of some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex used to", "Extension final save path. extension_id: str # Extension ID. version: str = 'latest'", "is None: real_name = True if versionize is None: versionize = True extension_paths", "version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\" Build the download url for", "final save path. extension_id: str # Extension ID. version: str = 'latest' #", "current version number. :param session: An aiohttp session object to use. :type session:", "await versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path in extension_paths ]", "specifier. except Exception as error: logger.debug(error) logger.warning('Can\\'t get extension version, setting version to", "publisher_name, version) await download_url(session, url, save_path, return_type=bytes) logger.info(f'Downloaded {extension_name} to {save_path}.') async def", "url = MARKETPLACE_PAGE_LINK.format(extension_id=extension_id) try: text: str = await get_request(session, url, return_type=str) match =", "extension_id: str # Extension ID. version: str = 'latest' # Extension version. def", "and act accordingly: If it's valid json format data, parse it and return", "logger from .utils import download_url, get_request, get_original_filename # Format string linking to the", "the real filenames of the extensions, defaults to None (True) :type real_name: typing.Optional[bool],", "the marketplace page of some extension. MARKETPLACE_PAGE_LINK = ''' https://marketplace.visualstudio.com/items?itemName={extension_id} '''.strip() # Regex", "\"\"\" if versionize: await versionize_extension_paths(session, extension_paths) real_name_tasks = [ get_original_filename(session, _build_extension_download_url_from_ext_path(ext_path)) for ext_path", "root_dict: typing.Dict[str, typing.Union[str, typing.Dict]], ) -> typing.List[ExtensionPath]: \"\"\" Recursively parse the given config", "the value. If the value of a key is a string, create a", "ExtensionPath :return: The formatted download url. :rtype: str \"\"\" publisher_name, extension_name = ext_path.extension_id.split('.')", "objects to patch. :type extension_paths: typing.List[ExtensionPath] :return: None, this patches the existing objects.", "# Regex used to extract the exact version of an extension from it's", "objects describing the given extensions. :rtype: typing.List[ExtensionPath] \"\"\" if isinstance(json_data, Path): with json_data.open()", "publisher_name: str :param version: Desired extension version. :type version: str :param save_path: Save", "{key} was empty.') path_list.append(ExtensionPath(Path(key) / f'{value}', value)) elif isinstance(value, dict): for ext_path in", "version specifier. except Exception as error: logger.debug(error) logger.warning('Can\\'t get extension version, setting version", "\"\"\" Fix up the extension paths by altering their name. Basic functionality is", "in root_dict.items(): if isinstance(value, str): if not value: raise ValueError(f'Value for key {key}", "ext_path: A spec object describing the desired extension. :type ext_path: ExtensionPath :return: The", "extension paths by altering their name. Basic functionality is to get the real", "publisher_name: str, version: str, save_path: Path, ) -> None: \"\"\" Download an extension", "extension_paths: List of extension spec objects to patch. :type extension_paths: typing.List[ExtensionPath] :return: None,", ") -> typing.List[ExtensionPath]: \"\"\" Recursively parse the given config data: If the value", "def parse_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], ) -> typing.List[ExtensionPath]: \"\"\" Decide wether the", "it and give it it's \"path\" down the hierarchy. :param root_dict: The current", "Wether to append version names to the paths, defaults to True :type versionize:", "def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\" Add the `version`", "An aiohttp session object to use. :type session: aiohttp.ClientSession :param extension_paths: List of", "ext_path.path.with_name(filename) async def download_extensions_json( json_data: typing.Union[typing.Dict[str, str], Path], save_path: Path, *, real_name: typing.Optional[bool]", "also append the current version number. :param session: An aiohttp session object to", "aiohttp.ClientSession, extension_id: str) -> str: \"\"\" Get the latest version of an extension", "a string, create a spec object from it and give it it's \"path\"", "Dataclass for storing info regarding a certain VSCode extension. \"\"\" path: Path #", "= [] for key, value in root_dict.items(): if isinstance(value, str): if not value:", ":type session: aiohttp.ClientSession :param extension_name: Desired extension name. :type extension_name: str :param publisher_name:", "a directory and delve one level deeper into the value. If the value", "\"\"\" Parse the given json data and download the given VSCode extensions into", "it and then do the same thing. :param json_data: Either a path to", "then do the same thing. :param json_data: Either a path to a json", "no effect without `real_name`, defaults to None (True) :type versionize: typing.Optional[bool], optional :return:", "[] for key, value in root_dict.items(): if isinstance(value, str): if not value: raise", "(True) :type versionize: typing.Optional[bool], optional :return: None. :rtype: None \"\"\" if real_name is", "the given parameters. :param ext_path: A spec object describing the desired extension. :type", "\"\"\" return MARKETPLACE_DOWNLOAD_LINK.format( extension_name=extension_name, publisher_name=publisher_name, version=version ) def _build_extension_download_url_from_ext_path(ext_path: ExtensionPath) -> str: \"\"\"", "aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath], *, versionize: bool = True, ) -> None: \"\"\" Fix", "data (dict / list). :type json_data: typing.Union[typing.Dict[str, str], Path] :param save_path: Save path", "the given config data: If the value of a key is a dict,", "\"\"\" Download an extension according to the given parameters. When one needs to", "Recursively parse the given config data: If the value of a key is", "exact version of an extension from it's marketplace page. VERSION_REGEX = re.compile(r'\"Version\":\"(.*?)\"') @dataclasses.dataclass", "not and act accordingly: If it's valid json format data, parse it and", ":type extension_paths: typing.List[ExtensionPath] :return: None, this patches the existing objects. :rtype: None \"\"\"", "extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\" Add the `version` attributes to the extensions", "of extensions. Can also append the current version number. :param session: An aiohttp", "elif isinstance(value, dict): for ext_path in _recursive_parse_to_dict(value): ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else:", "extract the exact version of an extension from it's marketplace page. VERSION_REGEX =", "ext_path.path = Path(key, ext_path.path) path_list.append(ext_path) else: raise TypeError(f'Value for key {key} was neither", "list). :type json_data: typing.Union[typing.Dict[str, str], Path] :return: List of spec objects describing the", "text: str = await get_request(session, url, return_type=str) match = re.search(r'\"Version\":\"(.*?)\"', text) if not", "version async def versionize_extension_paths( session: aiohttp.ClientSession, extension_paths: typing.List[ExtensionPath] ) -> None: \"\"\" Add", "append the current version number. :param session: An aiohttp session object to use.", "extension_id: str :param version: Desired extension version. :type version: str :param save_path: Save", "or not and act accordingly: If it's valid json format data, parse it" ]
[ "m[\"x\"] = 1 ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter", "\"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping", "'<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run()", "$HOME ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\"", "cd $HOME ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>'", "sections. \"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" ) ) assert (", ") ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\"", "dedent( \"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\"", ") assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"]", "ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script):", "``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test':", "``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m", "( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in", "test_code_block_escape(script): \"\"\" If code contains \"]]>\" (CDATA end), split it into multiple CDATA", "\"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script): \"\"\" Test code block with a default", ") ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\"", ") ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\"", "'<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script): \"\"\"", "\"\"\" ``` cd $HOME ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">'", "m = {} m[\"x\"] = 1 ``` \"\"\" ) ) assert ( '<ac:structured-macro", "```xml <![CDATA[TEST]]> ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">xml</ac:parameter>'", "a default language. \"\"\" script.set_content( dedent( \"\"\" ``` cd $HOME ``` \"\"\" )", "\"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script): \"\"\" If code contains", "in script.run() def test_code_block_default_language(script): \"\"\" Test code block with a default language. \"\"\"", "( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] = 1\\n'", "'<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script): \"\"\"", "<![CDATA[TEST]]> ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">xml</ac:parameter>' \"<ac:plain-text-body><![CDATA[<![CDATA[TEST]]>]]&gt;<![CDATA[\\n]]></ac:plain-text-body>\"", ") in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\" script.set_content( dedent( \"\"\"", "end), split it into multiple CDATA sections. \"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]>", "ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run()", "= {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script): \"\"\"", "Avoid escaping code. \"\"\" script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\" )", "code contains \"]]>\" (CDATA end), split it into multiple CDATA sections. \"\"\" script.set_content(", "in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\" script.set_content( dedent( \"\"\" ```yaml", "\"\"\" If code contains \"]]>\" (CDATA end), split it into multiple CDATA sections.", "into multiple CDATA sections. \"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" )", "test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\" script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>' ```", "test_code_block_default_language(script): \"\"\" Test code block with a default language. \"\"\" script.set_content( dedent( \"\"\"", "ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid", "(CDATA end), split it into multiple CDATA sections. \"\"\" script.set_content( dedent( \"\"\" ```xml", "multiple CDATA sections. \"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" ) )", "$HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\"", "{} m[\"x\"] = 1 ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">'", "\"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script): \"\"\" If code", ") in script.run() def test_code_block_escape(script): \"\"\" If code contains \"]]>\" (CDATA end), split", "test_code_block(script): \"\"\" Test code block. \"\"\" script.set_content( dedent( \"\"\" ```python m = {}", "If code contains \"]]>\" (CDATA end), split it into multiple CDATA sections. \"\"\"", "{}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script): \"\"\" Test", "1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script): \"\"\" Test code block with", "\"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script): \"\"\" Test code block with a", "dedent( \"\"\" ```python m = {} m[\"x\"] = 1 ``` \"\"\" ) )", "from textwrap import dedent def test_code_block(script): \"\"\" Test code block. \"\"\" script.set_content( dedent(", "default language. \"\"\" script.set_content( dedent( \"\"\" ``` cd $HOME ``` \"\"\" ) )", "textwrap import dedent def test_code_block(script): \"\"\" Test code block. \"\"\" script.set_content( dedent( \"\"\"", "script.set_content( dedent( \"\"\" ``` cd $HOME ``` \"\"\" ) ) assert ( '<ac:structured-macro", "script.run() def test_code_block_escape(script): \"\"\" If code contains \"]]>\" (CDATA end), split it into", "\"\"\" Avoid escaping code. \"\"\" script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\"", "'test': '<[{}]>' ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>'", "1 ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\"", "import dedent def test_code_block(script): \"\"\" Test code block. \"\"\" script.set_content( dedent( \"\"\" ```python", "contains \"]]>\" (CDATA end), split it into multiple CDATA sections. \"\"\" script.set_content( dedent(", "assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" )", "block with a default language. \"\"\" script.set_content( dedent( \"\"\" ``` cd $HOME ```", "assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" )", "script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\"", "= 1 ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>'", "= 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script): \"\"\" Test code block", "language. \"\"\" script.set_content( dedent( \"\"\" ``` cd $HOME ``` \"\"\" ) ) assert", "'<[{}]>' ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\"", "\"m = {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script):", "with a default language. \"\"\" script.set_content( dedent( \"\"\" ``` cd $HOME ``` \"\"\"", "\"\"\" Test code block. \"\"\" script.set_content( dedent( \"\"\" ```python m = {} m[\"x\"]", "( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in", "\"]]>\" (CDATA end), split it into multiple CDATA sections. \"\"\" script.set_content( dedent( \"\"\"", "\"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code.", "'<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\"", "script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\" ) ) assert ( '<ac:structured-macro", "```yaml 'test': '<[{}]>' ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter", "``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd", "'<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script): \"\"\" If code contains \"]]>\"", "Test code block. \"\"\" script.set_content( dedent( \"\"\" ```python m = {} m[\"x\"] =", "\"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter", "dedent def test_code_block(script): \"\"\" Test code block. \"\"\" script.set_content( dedent( \"\"\" ```python m", "it into multiple CDATA sections. \"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\"", "escaping code. \"\"\" script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\" ) )", "``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">xml</ac:parameter>' \"<ac:plain-text-body><![CDATA[<![CDATA[TEST]]>]]&gt;<![CDATA[\\n]]></ac:plain-text-body>\" \"</ac:structured-macro>\"", "Test code block with a default language. \"\"\" script.set_content( dedent( \"\"\" ``` cd", "\"\"\" script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\" ) ) assert (", "\"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script): \"\"\" If code contains \"]]>\" (CDATA end),", "'m[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_default_language(script): \"\"\" Test code", "ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\"", "dedent( \"\"\" ``` cd $HOME ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\"", "\"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">'", "ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script): \"\"\" If", "in script.run() def test_code_block_escape(script): \"\"\" If code contains \"]]>\" (CDATA end), split it", "def test_code_block_default_language(script): \"\"\" Test code block with a default language. \"\"\" script.set_content( dedent(", "\"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" ) ) assert ( '<ac:structured-macro", ") ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">xml</ac:parameter>' \"<ac:plain-text-body><![CDATA[<![CDATA[TEST]]>]]&gt;<![CDATA[\\n]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in", "\"\"\" Test code block with a default language. \"\"\" script.set_content( dedent( \"\"\" ```", "code block with a default language. \"\"\" script.set_content( dedent( \"\"\" ``` cd $HOME", "``` cd $HOME ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter", ") in script.run() def test_code_block_default_language(script): \"\"\" Test code block with a default language.", "= {} m[\"x\"] = 1 ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\"", "split it into multiple CDATA sections. \"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ```", "code block. \"\"\" script.set_content( dedent( \"\"\" ```python m = {} m[\"x\"] = 1", "\"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def", ") assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">xml</ac:parameter>' \"<ac:plain-text-body><![CDATA[<![CDATA[TEST]]>]]&gt;<![CDATA[\\n]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run()", "\"\"\" ```python m = {} m[\"x\"] = 1 ``` \"\"\" ) ) assert", "\"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\"", "CDATA sections. \"\"\" script.set_content( dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" ) ) assert", "\"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\" script.set_content(", "\"\"\" script.set_content( dedent( \"\"\" ```python m = {} m[\"x\"] = 1 ``` \"\"\"", "script.set_content( dedent( \"\"\" ```python m = {} m[\"x\"] = 1 ``` \"\"\" )", "'<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in", ") assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\"", "\"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\" script.set_content( dedent(", "assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] =", "script.run() def test_code_block_default_language(script): \"\"\" Test code block with a default language. \"\"\" script.set_content(", "\"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_escape(script): \"\"\" If code contains \"]]>\" (CDATA", "'<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run()", "block. \"\"\" script.set_content( dedent( \"\"\" ```python m = {} m[\"x\"] = 1 ```", "\"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\"", "\"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m =", "dedent( \"\"\" ```xml <![CDATA[TEST]]> ``` \"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">'", "def test_code_block(script): \"\"\" Test code block. \"\"\" script.set_content( dedent( \"\"\" ```python m =", "ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">python</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"m = {}\\n\" 'm[\"x\"] = 1\\n' \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" )", "script.run() def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\" script.set_content( dedent( \"\"\" ```yaml 'test':", "code. \"\"\" script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>' ``` \"\"\" ) ) assert", "def test_code_block_escape(script): \"\"\" If code contains \"]]>\" (CDATA end), split it into multiple", "ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def", "ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">bash</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"cd $HOME\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def test_code_block_avoid_escape(script):", "\"\"\" script.set_content( dedent( \"\"\" ``` cd $HOME ``` \"\"\" ) ) assert (", "def test_code_block_avoid_escape(script): \"\"\" Avoid escaping code. \"\"\" script.set_content( dedent( \"\"\" ```yaml 'test': '<[{}]>'", "ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\" ) in script.run() def", "```python m = {} m[\"x\"] = 1 ``` \"\"\" ) ) assert (", ") assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">yaml</ac:parameter>' \"<ac:plain-text-body><![CDATA[\" \"'test': '<[{}]>'\\n\" \"]]></ac:plain-text-body>\" \"</ac:structured-macro>\"", "\"\"\" ) ) assert ( '<ac:structured-macro ac:name=\"code\" ac:schema-version=\"1\">' '<ac:parameter ac:name=\"language\">xml</ac:parameter>' \"<ac:plain-text-body><![CDATA[<![CDATA[TEST]]>]]&gt;<![CDATA[\\n]]></ac:plain-text-body>\" \"</ac:structured-macro>\" )" ]
[ "= '' vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>'", "% version.title v_text += version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(),", "'' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'),", "angepasst werden. 0.01 17.03.2008 Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff", "= item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container):", "vars['title'] = parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] = diff_list vars['org_text'] = textDiff(org_text, last_text)", "version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() == '': last_text = v_text curr_text", "def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name = 'wikiitem' parent = item_container.get_parent() name", "die Versionen dieser Seite \"\"\" versions = get_page_versions(item_container) diff_list = [] org_text =", "item_container.item.text curr_text = last_text = '' for version in versions: v_text = '<h4>%s</h4>\\n'", "zeigt den Inhalt einer Wiki-Seite an Django content Management System <NAME> <EMAIL> Die", "dms.queries import get_base_site_url from dms.queries import get_user_by_id from dms.utils_form import get_item_vars_show from dms.utils_form", "get_role_by_user_path from dms.queries import get_base_site_url from dms.queries import get_user_by_id from dms.utils_form import get_item_vars_show", "Aktionen') vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title", "----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser Seite \"\"\" versions = get_page_versions(item_container)", "from django.http import HttpResponseRedirect from django.utils.translation import ugettext as _ from dms.queries import", "get_item_vars_show from dms.utils_form import get_item_vars_edit from dms.utils_form import get_folderish_vars_show from dms.utils_base import show_link", "# dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser Seite", "der Wiki-Seite') content = diff_list parent = item_container.get_parent() name = item_container.item.name wiki_page =", "angezeigt 0.04 07.05.2008 diff wird jeweils gegenueber der letzten Version angezeigt \"\"\" import", "version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') })", "'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() == '':", "= { 'no_version': True, 'no_new_items': True} vars = get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent,", "0.01 17.03.2008 Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008", "vars['site_url'] = get_base_site_url() vars['versions'] = diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name()", "= textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response (", "from dms.queries import get_role_by_user_path from dms.queries import get_base_site_url from dms.queries import get_user_by_id from", "dms.utils_form import get_item_vars_show from dms.utils_form import get_item_vars_edit from dms.utils_form import get_folderish_vars_show from dms.utils_base", "import get_user_support from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container):", "content = diff_list parent = item_container.get_parent() name = item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show", "get_page_versions(item_container) diff_list = [] org_text = '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text =", "get_item_vars_edit from dms.utils_form import get_folderish_vars_show from dms.utils_base import show_link from dms.views_comment import item_comment", "'no_version': True, 'no_new_items': True} vars = get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page, dont_show))", "% (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] =", "gegenueber der letzten Version angezeigt \"\"\" import datetime from django.shortcuts import render_to_response from", "Wiki-Seite an Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen", "einer Wiki-Seite an Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems", "Urheber der Aenderungen werden angezeigt 0.04 07.05.2008 diff wird jeweils gegenueber der letzten", "from dms.utils_base import show_link from dms.views_comment import item_comment from dms.wiki.utils import check_wiki_urls from", "dieser Seite \"\"\" versions = get_page_versions(item_container) diff_list = [] org_text = '<h4>%s</h4>\\n' %", "True, 'no_new_items': True} vars = get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page, dont_show)) if", "= '' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der", "der letzten Version angezeigt \"\"\" import datetime from django.shortcuts import render_to_response from django.http", "import get_role_by_user_path from dms.queries import get_base_site_url from dms.queries import get_user_by_id from dms.utils_form import", "from dms.diff import textDiff from dms.wiki.utils import get_user_support from dms_ext.extension import * #", "dms.wiki.utils import get_user_support from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def", "item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html', vars ) # -----------------------------------------------------", "return render_to_response ( 'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite", "zeigt Wiki-Seite \"\"\" app_name = 'wikiitem' parent = item_container.get_parent() name = item_container.item.name url", "'wikiitem' parent = item_container.get_parent() name = item_container.item.name url = parent.get_absolute_url() + '?wiki_page=' +", "= get_page_versions(item_container) diff_list = [] org_text = '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text", "app_name, '', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] = True vars['text'] = ''", "item_comment from dms.wiki.utils import check_wiki_urls from dms.wiki.queries import get_page_versions from dms.diff import textDiff", "get_user_support from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\"", "from dms.utils_form import get_item_vars_show from dms.utils_form import get_item_vars_edit from dms.utils_form import get_folderish_vars_show from", "import item_comment from dms.wiki.utils import check_wiki_urls from dms.wiki.queries import get_page_versions from dms.diff import", "item_container, app_name, '', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] = True vars['text'] =", "vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def", "dms.queries import get_user_by_id from dms.utils_form import get_item_vars_show from dms.utils_form import get_item_vars_edit from dms.utils_form", "diff_list = [] org_text = '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text = last_text", "textDiff from dms.wiki.utils import get_user_support from dms_ext.extension import * # dms-Funktionen ueberschreiben #", "-*- \"\"\" /dms/wikiitem/views_show.py .. zeigt den Inhalt einer Wiki-Seite an Django content Management", "v_text = '<h4>%s</h4>\\n' % version.title v_text += version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text,", "= name[:name.rfind('.html')] dont_show = { 'no_version': True, 'no_new_items': True} vars = get_folderish_vars_show(request, item_container,", "last_text.strip() == '': last_text = v_text curr_text = v_text app_name = 'wikiitem' my_title", "frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008 Beginn der", "HttpResponseRedirect from django.utils.translation import ugettext as _ from dms.queries import get_role_by_user_path from dms.queries", "import check_wiki_urls from dms.wiki.queries import get_page_versions from dms.diff import textDiff from dms.wiki.utils import", "for version in versions: v_text = '<h4>%s</h4>\\n' % version.title v_text += version.text diff_list.append({", "= _(u'Versionen der Wiki-Seite') content = diff_list parent = item_container.get_parent() name = item_container.item.name", "= item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html', vars ) #", "<reponame>shagun30/djambala-2 # -*- coding: utf-8 -*- \"\"\" /dms/wikiitem/views_show.py .. zeigt den Inhalt einer", "curr_text = last_text = '' for version in versions: v_text = '<h4>%s</h4>\\n' %", "----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name = 'wikiitem' parent = item_container.get_parent()", "version.title v_text += version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified':", "if last_text.strip() == '': last_text = v_text curr_text = v_text app_name = 'wikiitem'", "dms.views_comment import item_comment from dms.wiki.utils import check_wiki_urls from dms.wiki.queries import get_page_versions from dms.diff", "'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() == '': last_text = v_text curr_text =", "from dms.queries import get_base_site_url from dms.queries import get_user_by_id from dms.utils_form import get_item_vars_show from", "name = item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show = { 'no_version': True, 'no_new_items': True}", "get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] = True vars['text'] = '' vars['image_url'] =", "vars['org_text'] = textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response", "Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt", "= parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] = diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user']", "get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] = True vars['text']", "as _ from dms.queries import get_role_by_user_path from dms.queries import get_base_site_url from dms.queries import", "(_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] = diff_list", "wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name = 'wikiitem' parent = item_container.get_parent() name =", "}) if last_text.strip() == '': last_text = v_text curr_text = v_text app_name =", "parent.item.has_comments: vars['comments'] = True vars['text'] = '' vars['image_url'] = '' vars['slot_right_info'] = ''", "textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html',", "+ item_container.item.text curr_text = last_text = '' for version in versions: v_text =", "dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser Seite \"\"\"", "import get_folderish_vars_show from dms.utils_base import show_link from dms.views_comment import item_comment from dms.wiki.utils import", "item_container.item.title + item_container.item.text curr_text = last_text = '' for version in versions: v_text", "Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden.", "'no_new_items': True} vars = get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments:", "from dms.utils_form import get_folderish_vars_show from dms.utils_base import show_link from dms.views_comment import item_comment from", "-*- coding: utf-8 -*- \"\"\" /dms/wikiitem/views_show.py .. zeigt den Inhalt einer Wiki-Seite an", "from dms.queries import get_user_by_id from dms.utils_form import get_item_vars_show from dms.utils_form import get_item_vars_edit from", "ugettext as _ from dms.queries import get_role_by_user_path from dms.queries import get_base_site_url from dms.queries", "get_page_versions from dms.diff import textDiff from dms.wiki.utils import get_user_support from dms_ext.extension import *", "dms.wiki.queries import get_page_versions from dms.diff import textDiff from dms.wiki.utils import get_user_support from dms_ext.extension", "% item_container.item.title + item_container.item.text curr_text = last_text = '' for version in versions:", "item_container.get_parent() name = item_container.item.name url = parent.get_absolute_url() + '?wiki_page=' + name[:name.rfind('.html')] return HttpResponseRedirect(url)", "\"\"\" app_name = 'wikiitem' parent = item_container.get_parent() name = item_container.item.name url = parent.get_absolute_url()", "vars = get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] =", ".. zeigt den Inhalt einer Wiki-Seite an Django content Management System <NAME> <EMAIL>", "versions: v_text = '<h4>%s</h4>\\n' % version.title v_text += version.text diff_list.append({ 'version': version.version, 'text_diff':", "'': last_text = v_text curr_text = v_text app_name = 'wikiitem' my_title = _(u'Versionen", "21.03.2008 Urheber der Aenderungen werden angezeigt 0.04 07.05.2008 diff wird jeweils gegenueber der", "get_user_by_id from dms.utils_form import get_item_vars_show from dms.utils_form import get_item_vars_edit from dms.utils_form import get_folderish_vars_show", "from dms.wiki.utils import check_wiki_urls from dms.wiki.queries import get_page_versions from dms.diff import textDiff from", "versions = get_page_versions(item_container) diff_list = [] org_text = '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text", "den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008 Beginn der Arbeit 0.02 18.03.2008", "from dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt", "diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if", "genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008 Beginn der Arbeit", "from django.utils.translation import ugettext as _ from dms.queries import get_role_by_user_path from dms.queries import", "20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen werden angezeigt 0.04 07.05.2008 diff wird", "vars['comments'] = True vars['text'] = '' vars['image_url'] = '' vars['slot_right_info'] = '' vars['user_support_header']", "vars['text'] = '' vars['image_url'] = '' vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche Aktionen')", "get_base_site_url() vars['versions'] = diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] =", "= item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show = { 'no_version': True, 'no_new_items': True} vars", "Wiki-Seite') content = diff_list parent = item_container.get_parent() name = item_container.item.name wiki_page = name[:name.rfind('.html')]", "vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html', vars )", "diff_list parent = item_container.get_parent() name = item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show = {", "wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] = True vars['text'] = '' vars['image_url'] = ''", "'', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] = True vars['text'] = '' vars['image_url']", "# ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser Seite \"\"\" versions =", "= item_container.get_parent() name = item_container.item.name url = parent.get_absolute_url() + '?wiki_page=' + name[:name.rfind('.html')] return", "und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008 Beginn der Arbeit 0.02", "'' for version in versions: v_text = '<h4>%s</h4>\\n' % version.title v_text += version.text", "<EMAIL> Die Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend", "18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen werden angezeigt 0.04", "True} vars = get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments']", "v_text += version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y", "= last_text = '' for version in versions: v_text = '<h4>%s</h4>\\n' % version.title", ") # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name = 'wikiitem' parent", "Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] = diff_list vars['org_text'] =", "der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen", "= '<h4>%s</h4>\\n' % version.title v_text += version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text),", "# ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name = 'wikiitem' parent =", "wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen werden angezeigt 0.04 07.05.2008 diff wird jeweils", "System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und den spezifischen", "dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008", "import get_user_by_id from dms.utils_form import get_item_vars_show from dms.utils_form import get_item_vars_edit from dms.utils_form import", "0.04 07.05.2008 diff wird jeweils gegenueber der letzten Version angezeigt \"\"\" import datetime", "wird jeweils gegenueber der letzten Version angezeigt \"\"\" import datetime from django.shortcuts import", "import get_item_vars_edit from dms.utils_form import get_folderish_vars_show from dms.utils_base import show_link from dms.views_comment import", "= diff_list parent = item_container.get_parent() name = item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show =", "/dms/wikiitem/views_show.py .. zeigt den Inhalt einer Wiki-Seite an Django content Management System <NAME>", "version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() == '': last_text = v_text curr_text = v_text", "my_title = _(u'Versionen der Wiki-Seite') content = diff_list parent = item_container.get_parent() name =", "import HttpResponseRedirect from django.utils.translation import ugettext as _ from dms.queries import get_role_by_user_path from", "Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und den", "v_text curr_text = v_text app_name = 'wikiitem' my_title = _(u'Versionen der Wiki-Seite') content", "{ 'no_version': True, 'no_new_items': True} vars = get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page,", "'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() == '': last_text = v_text", "if parent.item.has_comments: vars['comments'] = True vars['text'] = '' vars['image_url'] = '' vars['slot_right_info'] =", "= [] org_text = '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text = last_text =", "org_text = '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text = last_text = '' for", "last_text = v_text curr_text = v_text app_name = 'wikiitem' my_title = _(u'Versionen der", "vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen", "= '' for version in versions: v_text = '<h4>%s</h4>\\n' % version.title v_text +=", "== '': last_text = v_text curr_text = v_text app_name = 'wikiitem' my_title =", "= get_folderish_vars_show(request, item_container, app_name, '', get_user_support(parent, wiki_page, dont_show)) if parent.item.has_comments: vars['comments'] = True", "0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen werden angezeigt", "True vars['text'] = '' vars['image_url'] = '' vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche", "= _(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title']", "vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url']", "import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen", "jeweils gegenueber der letzten Version angezeigt \"\"\" import datetime from django.shortcuts import render_to_response", "\"\"\" versions = get_page_versions(item_container) diff_list = [] org_text = '<h4>%s</h4>\\n' % item_container.item.title +", "dont_show)) if parent.item.has_comments: vars['comments'] = True vars['text'] = '' vars['image_url'] = '' vars['slot_right_info']", "<i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url'] = get_base_site_url() vars['versions']", "'<h4>%s</h4>\\n' % version.title v_text += version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user':", "app_name = 'wikiitem' my_title = _(u'Versionen der Wiki-Seite') content = diff_list parent =", "= '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text = last_text = '' for version", "'%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url'] = get_base_site_url()", "= diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M')", "in versions: v_text = '<h4>%s</h4>\\n' % version.title v_text += version.text diff_list.append({ 'version': version.version,", "vars['image_url'] = '' vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] = '%s:", "der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] = diff_list vars['org_text']", "from dms.utils_form import get_item_vars_edit from dms.utils_form import get_folderish_vars_show from dms.utils_base import show_link from", "import get_base_site_url from dms.queries import get_user_by_id from dms.utils_form import get_item_vars_show from dms.utils_form import", "item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show = { 'no_version': True, 'no_new_items': True} vars =", "= item_container.get_parent() name = item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show = { 'no_version': True,", "= 'wikiitem' parent = item_container.get_parent() name = item_container.item.name url = parent.get_absolute_url() + '?wiki_page='", "werden angezeigt 0.04 07.05.2008 diff wird jeweils gegenueber der letzten Version angezeigt \"\"\"", "from dms.wiki.utils import get_user_support from dms_ext.extension import * # dms-Funktionen ueberschreiben # -----------------------------------------------------", "vars['title']) vars['title'] = parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] = diff_list vars['org_text'] = textDiff(org_text,", "import textDiff from dms.wiki.utils import get_user_support from dms_ext.extension import * # dms-Funktionen ueberschreiben", "item_container.get_parent() name = item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show = { 'no_version': True, 'no_new_items':", "item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\"", "= get_base_site_url() vars['versions'] = diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified']", "django.shortcuts import render_to_response from django.http import HttpResponseRedirect from django.utils.translation import ugettext as _", "# -*- coding: utf-8 -*- \"\"\" /dms/wikiitem/views_show.py .. zeigt den Inhalt einer Wiki-Seite", "dms.diff import textDiff from dms.wiki.utils import get_user_support from dms_ext.extension import * # dms-Funktionen", "import get_page_versions from dms.diff import textDiff from dms.wiki.utils import get_user_support from dms_ext.extension import", "get_folderish_vars_show from dms.utils_base import show_link from dms.views_comment import item_comment from dms.wiki.utils import check_wiki_urls", "ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser Seite \"\"\" versions", "curr_text = v_text app_name = 'wikiitem' my_title = _(u'Versionen der Wiki-Seite') content =", "_(u'Versionen der Wiki-Seite') content = diff_list parent = item_container.get_parent() name = item_container.item.name wiki_page", "<NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen", "dont_show = { 'no_version': True, 'no_new_items': True} vars = get_folderish_vars_show(request, item_container, app_name, '',", "'' vars['image_url'] = '' vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] =", "django.utils.translation import ugettext as _ from dms.queries import get_role_by_user_path from dms.queries import get_base_site_url", "vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name = 'wikiitem'", "dms.utils_form import get_folderish_vars_show from dms.utils_base import show_link from dms.views_comment import item_comment from dms.wiki.utils", "import render_to_response from django.http import HttpResponseRedirect from django.utils.translation import ugettext as _ from", "= v_text app_name = 'wikiitem' my_title = _(u'Versionen der Wiki-Seite') content = diff_list", "werden. 0.01 17.03.2008 Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03", "zeigt die Versionen dieser Seite \"\"\" versions = get_page_versions(item_container) diff_list = [] org_text", "Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen werden", "'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name =", "\"\"\" zeigt Wiki-Seite \"\"\" app_name = 'wikiitem' parent = item_container.get_parent() name = item_container.item.name", "v_text app_name = 'wikiitem' my_title = _(u'Versionen der Wiki-Seite') content = diff_list parent", "Die Programme des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst", "last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return render_to_response ( 'app/wiki/show_version.html', vars", "= '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] = parent.item.title vars['site_url'] =", "from django.shortcuts import render_to_response from django.http import HttpResponseRedirect from django.utils.translation import ugettext as", "check_wiki_urls from dms.wiki.queries import get_page_versions from dms.diff import textDiff from dms.wiki.utils import get_user_support", "curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() == '': last_text =", "( 'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\" app_name", "* # dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser", "Seite \"\"\" versions = get_page_versions(item_container) diff_list = [] org_text = '<h4>%s</h4>\\n' % item_container.item.title", "Aenderungen werden angezeigt 0.04 07.05.2008 diff wird jeweils gegenueber der letzten Version angezeigt", "0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen werden angezeigt 0.04 07.05.2008 diff", "der Aenderungen werden angezeigt 0.04 07.05.2008 diff wird jeweils gegenueber der letzten Version", "diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y %H:%M') return", "%H:%M') return render_to_response ( 'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt", "utf-8 -*- \"\"\" /dms/wikiitem/views_show.py .. zeigt den Inhalt einer Wiki-Seite an Django content", "koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008 Beginn", "+= version.text diff_list.append({ 'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M')", "name[:name.rfind('.html')] dont_show = { 'no_version': True, 'no_new_items': True} vars = get_folderish_vars_show(request, item_container, app_name,", "import show_link from dms.views_comment import item_comment from dms.wiki.utils import check_wiki_urls from dms.wiki.queries import", "'' vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>' %", "wiki_page = name[:name.rfind('.html')] dont_show = { 'no_version': True, 'no_new_items': True} vars = get_folderish_vars_show(request,", "coding: utf-8 -*- \"\"\" /dms/wikiitem/views_show.py .. zeigt den Inhalt einer Wiki-Seite an Django", "django.http import HttpResponseRedirect from django.utils.translation import ugettext as _ from dms.queries import get_role_by_user_path", "vars['versions'] = diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user'] = item_container.owner.get_full_name() vars['modified'] = item_container.last_modified.strftime('%d.%m.%Y", "dms.utils_form import get_item_vars_edit from dms.utils_form import get_folderish_vars_show from dms.utils_base import show_link from dms.views_comment", "07.05.2008 diff wird jeweils gegenueber der letzten Version angezeigt \"\"\" import datetime from", "entsprechend angepasst werden. 0.01 17.03.2008 Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008", "from dms.views_comment import item_comment from dms.wiki.utils import check_wiki_urls from dms.wiki.queries import get_page_versions from", "dms.wiki.utils import check_wiki_urls from dms.wiki.queries import get_page_versions from dms.diff import textDiff from dms.wiki.utils", "'version': version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip()", "version.version, 'text_diff': textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() ==", "parent.item.title vars['site_url'] = get_base_site_url() vars['versions'] = diff_list vars['org_text'] = textDiff(org_text, last_text) vars['user'] =", "check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der Aenderungen werden angezeigt 0.04 07.05.2008", "\"\"\" import datetime from django.shortcuts import render_to_response from django.http import HttpResponseRedirect from django.utils.translation", "import ugettext as _ from dms.queries import get_role_by_user_path from dms.queries import get_base_site_url from", "\"\"\" zeigt die Versionen dieser Seite \"\"\" versions = get_page_versions(item_container) diff_list = []", "[] org_text = '<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text = last_text = ''", "= v_text curr_text = v_text app_name = 'wikiitem' my_title = _(u'Versionen der Wiki-Seite')", "= '' vars['image_url'] = '' vars['slot_right_info'] = '' vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title']", "render_to_response ( 'app/wiki/show_version.html', vars ) # ----------------------------------------------------- def wikiitem_show(request,item_container): \"\"\" zeigt Wiki-Seite \"\"\"", "Versionen dieser Seite \"\"\" versions = get_page_versions(item_container) diff_list = [] org_text = '<h4>%s</h4>\\n'", "angezeigt \"\"\" import datetime from django.shortcuts import render_to_response from django.http import HttpResponseRedirect from", "dms.utils_base import show_link from dms.views_comment import item_comment from dms.wiki.utils import check_wiki_urls from dms.wiki.queries", "from dms.wiki.queries import get_page_versions from dms.diff import textDiff from dms.wiki.utils import get_user_support from", "= True vars['text'] = '' vars['image_url'] = '' vars['slot_right_info'] = '' vars['user_support_header'] =", "17.03.2008 Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber", "render_to_response from django.http import HttpResponseRedirect from django.utils.translation import ugettext as _ from dms.queries", "Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03 20.03.2008 wikiitem_diff 0.03 21.03.2008 Urheber der", "spezifischen Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008 Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls", "'<h4>%s</h4>\\n' % item_container.item.title + item_container.item.text curr_text = last_text = '' for version in", "diff wird jeweils gegenueber der letzten Version angezeigt \"\"\" import datetime from django.shortcuts", "get_base_site_url from dms.queries import get_user_by_id from dms.utils_form import get_item_vars_show from dms.utils_form import get_item_vars_edit", "version in versions: v_text = '<h4>%s</h4>\\n' % version.title v_text += version.text diff_list.append({ 'version':", "'wikiitem' my_title = _(u'Versionen der Wiki-Seite') content = diff_list parent = item_container.get_parent() name", "des dms-Systems koennen frei genutzt und den spezifischen Beduerfnissen entsprechend angepasst werden. 0.01", "an Django content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei", "datetime from django.shortcuts import render_to_response from django.http import HttpResponseRedirect from django.utils.translation import ugettext", "show_link from dms.views_comment import item_comment from dms.wiki.utils import check_wiki_urls from dms.wiki.queries import get_page_versions", "import datetime from django.shortcuts import render_to_response from django.http import HttpResponseRedirect from django.utils.translation import", "_(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title']) vars['title'] =", "_ from dms.queries import get_role_by_user_path from dms.queries import get_base_site_url from dms.queries import get_user_by_id", "letzten Version angezeigt \"\"\" import datetime from django.shortcuts import render_to_response from django.http import", "den Inhalt einer Wiki-Seite an Django content Management System <NAME> <EMAIL> Die Programme", "import get_item_vars_show from dms.utils_form import get_item_vars_edit from dms.utils_form import get_folderish_vars_show from dms.utils_base import", "vars['user_support_header'] = _(u'Mögliche Aktionen') vars['sub_title'] = '%s: <i>%s</i>' % (_(u'Versionen der Wiki-Seite'), vars['title'])", "parent = item_container.get_parent() name = item_container.item.name url = parent.get_absolute_url() + '?wiki_page=' + name[:name.rfind('.html')]", "\"\"\" /dms/wikiitem/views_show.py .. zeigt den Inhalt einer Wiki-Seite an Django content Management System", "parent = item_container.get_parent() name = item_container.item.name wiki_page = name[:name.rfind('.html')] dont_show = { 'no_version':", "Wiki-Seite \"\"\" app_name = 'wikiitem' parent = item_container.get_parent() name = item_container.item.name url =", "0.03 21.03.2008 Urheber der Aenderungen werden angezeigt 0.04 07.05.2008 diff wird jeweils gegenueber", "app_name = 'wikiitem' parent = item_container.get_parent() name = item_container.item.name url = parent.get_absolute_url() +", "last_text = '' for version in versions: v_text = '<h4>%s</h4>\\n' % version.title v_text", "Inhalt einer Wiki-Seite an Django content Management System <NAME> <EMAIL> Die Programme des", "wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser Seite \"\"\" versions = get_page_versions(item_container) diff_list =", "textDiff(v_text, curr_text), 'user': version.owner.get_full_name(), 'modified': version.modified.strftime('%d.%m.%Y %H:%M') }) if last_text.strip() == '': last_text", "content Management System <NAME> <EMAIL> Die Programme des dms-Systems koennen frei genutzt und", "Version angezeigt \"\"\" import datetime from django.shortcuts import render_to_response from django.http import HttpResponseRedirect", "dms.queries import get_role_by_user_path from dms.queries import get_base_site_url from dms.queries import get_user_by_id from dms.utils_form", "dms_ext.extension import * # dms-Funktionen ueberschreiben # ----------------------------------------------------- def wikiitem_diff(request,item_container): \"\"\" zeigt die", "= 'wikiitem' my_title = _(u'Versionen der Wiki-Seite') content = diff_list parent = item_container.get_parent()", "%H:%M') }) if last_text.strip() == '': last_text = v_text curr_text = v_text app_name", "def wikiitem_diff(request,item_container): \"\"\" zeigt die Versionen dieser Seite \"\"\" versions = get_page_versions(item_container) diff_list", "Beduerfnissen entsprechend angepasst werden. 0.01 17.03.2008 Beginn der Arbeit 0.02 18.03.2008 check_wiki_urls 0.03" ]
[ "\"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\"", "TYPE_CHECKING from . import igmp_globals from .GroupState import GroupState from .querier.Querier import Querier", "Change state regarding querier state machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state = Querier", "= GroupState(self, group_ip) self.group_state[group_ip] = group_state return group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\"", "logger_extra = dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra)", "Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received", "general_query_timer def clear_general_query_timer(self): \"\"\" Stop general query timer \"\"\" if self.general_query_timer is not", "igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this IGMP", "= Querier # state of each group # Key: GroupIPAddress, Value: GroupState object", "self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop", "group specific query if igmp_group != \"0.0.0.0\" and igmp_group in self.group_state: max_response_time =", "from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'):", "NonQuerier self.router_state_logger.debug('change querier state to -> NonQuerier') ############################################ # group state methods ############################################", "############################################ def get_group_state(self, group_ip): \"\"\" Get object that monitors a given group (with", "receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group =", "self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general query timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL,", "general query timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer", "#logger logger_extra = dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER,", "Query timer has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier Present timer", "querier present timer \"\"\" if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\"", "is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other querier present timer \"\"\"", "print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general query timer \"\"\" self.clear_general_query_timer() general_query_timer", "state regarding querier state machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state = Querier self.router_state_logger.debug('change", "return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state = self.group_state[group_ip] else: group_state", ". import igmp_globals from .GroupState import GroupState from .querier.Querier import Querier from .nonquerier.NonQuerier", "import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra", "IP address) \"\"\" with self.group_state_lock.genRlock(): if group_ip in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock():", "object that monitors a given group (with group_ip IP address) \"\"\" with self.group_state_lock.genRlock():", "logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the router connected", "self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query timer has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self):", "self.interface_state = NonQuerier self.router_state_logger.debug('change querier state to -> NonQuerier') ############################################ # group state", "Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received", "from igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from", "= dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) #", "def general_query_timeout(self): \"\"\" General Query timer has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\"", "= Querier self.router_state_logger.debug('change querier state to -> Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change", "dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface", "querier state to -> Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier state to", "if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query timer has", "str): self.interface.send(data, address) ############################################ # interface_state methods ############################################ def print_state(self): return self.interface_state.state_name() def", "\"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self)", "def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Membership Report packet \"\"\" igmp_group =", "methods ############################################ def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general query timer", "= packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Membership Report packet", "self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet)", "logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of", "packet via interface def send(self, data: bytes, address: str): self.interface.send(data, address) ############################################ #", "max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this IGMP interface Clear all", "# state of each group # Key: GroupIPAddress, Value: GroupState object self.group_state =", "self.group_state[group_ip] = group_state return group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Version", "network self.interface = interface # state of the router (Querier/NonQuerier) self.interface_state = Querier", "group_ip in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state =", "packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP Query packet \"\"\" self.interface_state.receive_query(self,", "group_state = self.group_state[group_ip] else: group_state = GroupState(self, group_ip) self.group_state[group_ip] = group_state return group_state", "self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the router connected to the network", "general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\" Stop general query timer \"\"\" if", "self.general_query_timer is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other querier present timer", "\"\"\" Set general query timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer", "remove(self): \"\"\" Remove this IGMP interface Clear all state \"\"\" for group in", "Timer import logging from igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock", "igmp.rwlock.RWLock import RWLockWrite from igmp.utils import TYPE_CHECKING from . import igmp_globals from .GroupState", "group_ip) self.group_state[group_ip] = group_state return group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP", "10) self.interface.send(packet.bytes()) # set initial general query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start()", "other querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer =", "the network self.interface = interface # state of the router (Querier/NonQuerier) self.interface_state =", "Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\"", "to -> NonQuerier') ############################################ # group state methods ############################################ def get_group_state(self, group_ip): \"\"\"", "def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Version 1 Membership Report packet \"\"\"", "timer \"\"\" if self.general_query_timer is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other", "self.interface = interface # state of the router (Querier/NonQuerier) self.interface_state = Querier #", "self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\" Stop", "def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP Leave packet \"\"\" igmp_group = packet.payload.group_address", "Received IGMP Version 1 Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def", "def set_other_querier_present_timer(self): \"\"\" Set other querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL,", "\"\"\" Received IGMP Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self,", "not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other querier present timer \"\"\" self.clear_other_querier_present_timer()", "GroupState(self, group_ip) self.group_state[group_ip] = group_state return group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received", "packet.payload.group_address # process group specific query if igmp_group != \"0.0.0.0\" and igmp_group in", "IGMP Leave packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\"", "igmp_group = packet.payload.group_address # process group specific query if igmp_group != \"0.0.0.0\" and", "= Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\" Stop general query", "Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier state to -> NonQuerier') ############################################ #", "data: bytes, address: str): self.interface.send(data, address) ############################################ # interface_state methods ############################################ def print_state(self):", "self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other querier present timer \"\"\" if", "def remove(self): \"\"\" Remove this IGMP interface Clear all state \"\"\" for group", "self.interface.send(packet.bytes()) # set initial general query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer", "self.router_state_logger.debug('change querier state to -> NonQuerier') ############################################ # group state methods ############################################ def", "group_ip): \"\"\" Get object that monitors a given group (with group_ip IP address)", "set_other_querier_present_timer(self): \"\"\" Set other querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout)", "# send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set", "Version 1 Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet:", "timer.start() self.general_query_timer = timer # present timer self.other_querier_present_timer = None # Send packet", "NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def", "return group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Version 1 Membership Report", "max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set initial general query timer timer = Timer(igmp_globals.QUERY_INTERVAL,", "InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra =", "logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename']", "state to -> Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier state to ->", "def change_interface_state(self, querier: bool): \"\"\" Change state regarding querier state machine (Querier/NonQuerier) \"\"\"", "Remove this IGMP interface Clear all state \"\"\" for group in self.group_state.values(): group.remove()", "Leave packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\" Received", "= interface # state of the router (Querier/NonQuerier) self.interface_state = Querier # state", "self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\" Stop general query timer \"\"\" if self.general_query_timer", "monitors a given group (with group_ip IP address) \"\"\" with self.group_state_lock.genRlock(): if group_ip", "def clear_other_querier_present_timer(self): \"\"\" Stop other querier present timer \"\"\" if self.other_querier_present_timer is not", "Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer # present timer self.other_querier_present_timer = None #", "address) ############################################ # interface_state methods ############################################ def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\"", "None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query timer has expired \"\"\" self.interface_state.general_query_timeout(self) def", "Querier # state of each group # Key: GroupIPAddress, Value: GroupState object self.group_state", ".querier.Querier import Querier from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP", "# state of the router (Querier/NonQuerier) self.interface_state = Querier # state of each", "1 Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket):", "of each group # Key: GroupIPAddress, Value: GroupState object self.group_state = {} self.group_state_lock", "self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query timer has expired", "querier state to -> NonQuerier') ############################################ # group state methods ############################################ def get_group_state(self,", "interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the router connected to the", "= RWLockWrite() # send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes())", "timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self):", "self.router_state_logger.debug('change querier state to -> Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier state", "'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger =", "send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set initial", "\"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Membership", "interface of the router connected to the network self.interface = interface # state", "other_querier_present_timeout(self): \"\"\" Other Querier Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier:", "(Querier/NonQuerier) self.interface_state = Querier # state of each group # Key: GroupIPAddress, Value:", "packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP Leave packet \"\"\" igmp_group", "igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP Query packet", "from .GroupState import GroupState from .querier.Querier import Querier from .nonquerier.NonQuerier import NonQuerier if", "Value: GroupState object self.group_state = {} self.group_state_lock = RWLockWrite() # send general query", "self.group_state: group_state = self.group_state[group_ip] else: group_state = GroupState(self, group_ip) self.group_state[group_ip] = group_state return", "of the router connected to the network self.interface = interface # state of", "change_interface_state(self, querier: bool): \"\"\" Change state regarding querier state machine (Querier/NonQuerier) \"\"\" if", "\"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address # process group specific query if igmp_group", "= packet.payload.group_address # process group specific query if igmp_group != \"0.0.0.0\" and igmp_group", "get_group_state(self, group_ip): \"\"\" Get object that monitors a given group (with group_ip IP", "from threading import Timer import logging from igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import", "general query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer # present", "query timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def", "igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Membership Report", "specific query if igmp_group != \"0.0.0.0\" and igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time", "with self.group_state_lock.genRlock(): if group_ip in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in", "packet) igmp_group = packet.payload.group_address # process group specific query if igmp_group != \"0.0.0.0\"", "def set_general_query_timer(self): \"\"\" Set general query timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)", "connected to the network self.interface = interface # state of the router (Querier/NonQuerier)", "ReceivedPacket): \"\"\" Received IGMP Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def", "None # Send packet via interface def send(self, data: bytes, address: str): self.interface.send(data,", "igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP Leave packet", "import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from igmp.utils import TYPE_CHECKING from . import", "Send packet via interface def send(self, data: bytes, address: str): self.interface.send(data, address) ############################################", "= interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the", "state of the router (Querier/NonQuerier) self.interface_state = Querier # state of each group", "# Send packet via interface def send(self, data: bytes, address: str): self.interface.send(data, address)", "set_general_query_timer(self): \"\"\" Set general query timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start()", "def send(self, data: bytes, address: str): self.interface.send(data, address) ############################################ # interface_state methods ############################################", "RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif']", "other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other querier present timer \"\"\" if self.other_querier_present_timer is", "import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from igmp.utils import", "querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer", "packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP", "PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from igmp.utils import TYPE_CHECKING", "import RWLockWrite from igmp.utils import TYPE_CHECKING from . import igmp_globals from .GroupState import", "# set initial general query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer =", "expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\" Change state regarding querier state", "= logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif'] = interface.vif_index", "= PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set initial general query timer timer", "and igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this", "TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface:", "interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the router", "= {} self.group_state_lock = RWLockWrite() # send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL", "\"\"\" Received IGMP Version 1 Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report()", "Stop general query timer \"\"\" if self.general_query_timer is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self):", "query timer \"\"\" if self.general_query_timer is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set", "Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\" Stop general query timer", "clear_general_query_timer(self): \"\"\" Stop general query timer \"\"\" if self.general_query_timer is not None: self.general_query_timer.cancel()", "logger_extra) # interface of the router connected to the network self.interface = interface", "\"0.0.0.0\" and igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove", "with self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state = self.group_state[group_ip] else: group_state = GroupState(self,", "group_state = GroupState(self, group_ip) self.group_state[group_ip] = group_state return group_state def receive_v1_membership_report(self, packet: ReceivedPacket):", "= Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer # present timer self.other_querier_present_timer = None", "packet: ReceivedPacket): \"\"\" Received IGMP Version 1 Membership Report packet \"\"\" igmp_group =", "\"\"\" General Query timer has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier", "import logging from igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import", "IGMP Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket):", "\"\"\" if self.general_query_timer is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other querier", "GroupState from .querier.Querier import Querier from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP", "group (with group_ip IP address) \"\"\" with self.group_state_lock.genRlock(): if group_ip in self.group_state: return", "logging from igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite", "self.other_querier_present_timer = None # Send packet via interface def send(self, data: bytes, address:", "other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other", "Received IGMP Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet:", "group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Version 1 Membership Report packet", "state of each group # Key: GroupIPAddress, Value: GroupState object self.group_state = {}", "def get_group_state(self, group_ip): \"\"\" Get object that monitors a given group (with group_ip", "receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP Leave packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group()", "self.general_query_timer = timer # present timer self.other_querier_present_timer = None # Send packet via", "= Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other querier", "set initial general query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer", "= None # Send packet via interface def send(self, data: bytes, address: str):", "\"\"\" Received IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address # process", ".GroupState import GroupState from .querier.Querier import Querier from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING:", "timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer # present timer self.other_querier_present_timer =", "igmp_globals from .GroupState import GroupState from .querier.Querier import Querier from .nonquerier.NonQuerier import NonQuerier", "present timer self.other_querier_present_timer = None # Send packet via interface def send(self, data:", "-> NonQuerier') ############################################ # group state methods ############################################ def get_group_state(self, group_ip): \"\"\" Get", "General Query timer has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier Present", ".nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER =", "\"\"\" if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query timer", "(Querier/NonQuerier) \"\"\" if querier: self.interface_state = Querier self.router_state_logger.debug('change querier state to -> Querier')", "import Querier from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class", "!= \"0.0.0.0\" and igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\"", "IGMP Version 1 Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self,", "packet.payload.group_address self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Membership Report packet \"\"\"", "is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query timer has expired \"\"\"", "= logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the router connected to the network self.interface", "present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def", "\"\"\" with self.group_state_lock.genRlock(): if group_ip in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip", "packet: ReceivedPacket): \"\"\" Received IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address", "packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP", "Received IGMP Leave packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket):", "query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer # present timer", "def receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group", "from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER", "interface def send(self, data: bytes, address: str): self.interface.send(data, address) ############################################ # interface_state methods", "if group_ip in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state", "= self.group_state[group_ip] else: group_state = GroupState(self, group_ip) self.group_state[group_ip] = group_state return group_state def", "= group_state return group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Version 1", "the router (Querier/NonQuerier) self.interface_state = Querier # state of each group # Key:", "group_ip IP address) \"\"\" with self.group_state_lock.genRlock(): if group_ip in self.group_state: return self.group_state[group_ip] with", "\"\"\" Change state regarding querier state machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state =", "def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general query timer \"\"\" self.clear_general_query_timer()", "regarding querier state machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state = Querier self.router_state_logger.debug('change querier", "router connected to the network self.interface = interface # state of the router", "receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Membership Report packet \"\"\" igmp_group = packet.payload.group_address", "querier state machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state = Querier self.router_state_logger.debug('change querier state", "that monitors a given group (with group_ip IP address) \"\"\" with self.group_state_lock.genRlock(): if", "packet: ReceivedPacket): \"\"\" Received IGMP Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report()", "self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state = self.group_state[group_ip] else: group_state =", "= timer # present timer self.other_querier_present_timer = None # Send packet via interface", "# present timer self.other_querier_present_timer = None # Send packet via interface def send(self,", "if group_ip in self.group_state: group_state = self.group_state[group_ip] else: group_state = GroupState(self, group_ip) self.group_state[group_ip]", "# group state methods ############################################ def get_group_state(self, group_ip): \"\"\" Get object that monitors", "self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other querier present timer", "bool): \"\"\" Change state regarding querier state machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state", "__init__(self, interface: 'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name", "else: group_state = GroupState(self, group_ip) self.group_state[group_ip] = group_state return group_state def receive_v1_membership_report(self, packet:", "def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] =", "# Key: GroupIPAddress, Value: GroupState object self.group_state = {} self.group_state_lock = RWLockWrite() #", "state methods ############################################ def get_group_state(self, group_ip): \"\"\" Get object that monitors a given", "igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from igmp.utils", "self.group_state = {} self.group_state_lock = RWLockWrite() # send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY,", "timer # present timer self.other_querier_present_timer = None # Send packet via interface def", "(with group_ip IP address) \"\"\" with self.group_state_lock.genRlock(): if group_ip in self.group_state: return self.group_state[group_ip]", "via interface def send(self, data: bytes, address: str): self.interface.send(data, address) ############################################ # interface_state", "general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set initial general", "self.group_state_lock.genRlock(): if group_ip in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in self.group_state:", "threading import Timer import logging from igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket", "other querier present timer \"\"\" if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self):", "self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\" Change state regarding querier state machine (Querier/NonQuerier)", "RWLockWrite from igmp.utils import TYPE_CHECKING from . import igmp_globals from .GroupState import GroupState", "state machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state = Querier self.router_state_logger.debug('change querier state to", "NonQuerier') ############################################ # group state methods ############################################ def get_group_state(self, group_ip): \"\"\" Get object", "import igmp_globals from .GroupState import GroupState from .querier.Querier import Querier from .nonquerier.NonQuerier import", "ReceivedPacket): \"\"\" Received IGMP Version 1 Membership Report packet \"\"\" igmp_group = packet.payload.group_address", "= other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other querier present timer \"\"\" if self.other_querier_present_timer", "Membership Report packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\"", "= general_query_timer def clear_general_query_timer(self): \"\"\" Stop general query timer \"\"\" if self.general_query_timer is", "querier: self.interface_state = Querier self.router_state_logger.debug('change querier state to -> Querier') else: self.interface_state =", "Key: GroupIPAddress, Value: GroupState object self.group_state = {} self.group_state_lock = RWLockWrite() # send", "class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra = dict()", "from .querier.Querier import Querier from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import", "timer has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier Present timer has", "group_ip in self.group_state: group_state = self.group_state[group_ip] else: group_state = GroupState(self, group_ip) self.group_state[group_ip] =", "\"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\"", "\"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\" Change state regarding querier state machine", "\"\"\" Stop general query timer \"\"\" if self.general_query_timer is not None: self.general_query_timer.cancel() def", "else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier state to -> NonQuerier') ############################################ # group", "router (Querier/NonQuerier) self.interface_state = Querier # state of each group # Key: GroupIPAddress,", "igmp_group != \"0.0.0.0\" and igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self):", "\"\"\" Remove this IGMP interface Clear all state \"\"\" for group in self.group_state.values():", "packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set initial general query timer", "\"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP Leave", "\"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP Query", "= NonQuerier self.router_state_logger.debug('change querier state to -> NonQuerier') ############################################ # group state methods", "methods ############################################ def get_group_state(self, group_ip): \"\"\" Get object that monitors a given group", "igmp.utils import TYPE_CHECKING from . import igmp_globals from .GroupState import GroupState from .querier.Querier", "timer \"\"\" if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query", "in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state = self.group_state[group_ip]", "############################################ def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general query timer \"\"\"", "igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from igmp.utils import TYPE_CHECKING from .", "send(self, data: bytes, address: str): self.interface.send(data, address) ############################################ # interface_state methods ############################################ def", "self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def", "self.group_state[group_ip] else: group_state = GroupState(self, group_ip) self.group_state[group_ip] = group_state return group_state def receive_v1_membership_report(self,", "from igmp.rwlock.RWLock import RWLockWrite from igmp.utils import TYPE_CHECKING from . import igmp_globals from", "packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP", "Querier from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object):", "PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set initial general query timer timer =", "timer self.other_querier_present_timer = None # Send packet via interface def send(self, data: bytes,", "address: str): self.interface.send(data, address) ############################################ # interface_state methods ############################################ def print_state(self): return self.interface_state.state_name()", "of the router (Querier/NonQuerier) self.interface_state = Querier # state of each group #", "clear_other_querier_present_timer(self): \"\"\" Stop other querier present timer \"\"\" if self.other_querier_present_timer is not None:", "if querier: self.interface_state = Querier self.router_state_logger.debug('change querier state to -> Querier') else: self.interface_state", "None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer", "timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer # present timer self.other_querier_present_timer", "if self.general_query_timer is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other querier present", "interface: 'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif'] = interface.vif_index logger_extra['interfacename'] = interface.interface_name self.router_state_logger", "ReceivedPacket): \"\"\" Received IGMP Leave packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self,", "Set general query timer \"\"\" self.clear_general_query_timer() general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer =", "initial general query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) timer.start() self.general_query_timer = timer #", "query if igmp_group != \"0.0.0.0\" and igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time)", "ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from igmp.utils import TYPE_CHECKING from . import igmp_globals", "# process group specific query if igmp_group != \"0.0.0.0\" and igmp_group in self.group_state:", "timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self):", "general_query_timeout(self): \"\"\" General Query timer has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other", "if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self,", "general query timer \"\"\" if self.general_query_timer is not None: self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\"", "a given group (with group_ip IP address) \"\"\" with self.group_state_lock.genRlock(): if group_ip in", "bytes, address: str): self.interface.send(data, address) ############################################ # interface_state methods ############################################ def print_state(self): return", "interface_state methods ############################################ def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general query", "receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Version 1 Membership Report packet \"\"\" igmp_group", "import NonQuerier if TYPE_CHECKING: from igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState')", "given group (with group_ip IP address) \"\"\" with self.group_state_lock.genRlock(): if group_ip in self.group_state:", "Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other querier present", "state to -> NonQuerier') ############################################ # group state methods ############################################ def get_group_state(self, group_ip):", "= packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this IGMP interface Clear all state", "self.general_query_timeout) timer.start() self.general_query_timer = timer # present timer self.other_querier_present_timer = None # Send", "import Timer import logging from igmp.packet.PacketIGMPHeader import PacketIGMPHeader from igmp.packet.ReceivedPacket import ReceivedPacket from", "* 10) self.interface.send(packet.bytes()) # set initial general query timer timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout)", "group state methods ############################################ def get_group_state(self, group_ip): \"\"\" Get object that monitors a", "############################################ # interface_state methods ############################################ def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set", "has expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier Present timer has expired", "= interface.interface_name self.router_state_logger = logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the router connected to", "logging.LoggerAdapter(RouterState.ROUTER_STATE_LOGGER, logger_extra) # interface of the router connected to the network self.interface =", "expired \"\"\" self.interface_state.general_query_timeout(self) def other_querier_present_timeout(self): \"\"\" Other Querier Present timer has expired \"\"\"", "import TYPE_CHECKING from . import igmp_globals from .GroupState import GroupState from .querier.Querier import", "from igmp.utils import TYPE_CHECKING from . import igmp_globals from .GroupState import GroupState from", "Querier self.router_state_logger.debug('change querier state to -> Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier", "timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\" Change state regarding", "if igmp_group != \"0.0.0.0\" and igmp_group in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def", "self.interface.send(data, address) ############################################ # interface_state methods ############################################ def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self):", "############################################ # group state methods ############################################ def get_group_state(self, group_ip): \"\"\" Get object that", "in self.group_state: group_state = self.group_state[group_ip] else: group_state = GroupState(self, group_ip) self.group_state[group_ip] = group_state", "Received IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address # process group", "= packet.payload.group_address self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP Leave packet \"\"\"", "# interface of the router connected to the network self.interface = interface #", "ReceivedPacket): \"\"\" Received IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address #", "self.get_group_state(igmp_group).receive_v2_membership_report() def receive_leave_group(self, packet: ReceivedPacket): \"\"\" Received IGMP Leave packet \"\"\" igmp_group =", "self.group_state_lock = RWLockWrite() # send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10)", "Set other querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start() self.other_querier_present_timer", "group # Key: GroupIPAddress, Value: GroupState object self.group_state = {} self.group_state_lock = RWLockWrite()", "packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address # process group specific query if", "self.interface_state = Querier # state of each group # Key: GroupIPAddress, Value: GroupState", "\"\"\" Other Querier Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool):", "from igmp.packet.ReceivedPacket import ReceivedPacket from igmp.rwlock.RWLock import RWLockWrite from igmp.utils import TYPE_CHECKING from", "\"\"\" Set other querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer = Timer(igmp_globals.OTHER_QUERIER_PRESENT_INTERVAL, self.other_querier_present_timeout) other_querier_present_timer.start()", "-> Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier state to -> NonQuerier') ############################################", "self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state = self.group_state[group_ip] else: group_state = GroupState(self, group_ip)", "general_query_timer = Timer(igmp_globals.QUERY_INTERVAL, self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\" Stop general", "other_querier_present_timer.start() self.other_querier_present_timer = other_querier_present_timer def clear_other_querier_present_timer(self): \"\"\" Stop other querier present timer \"\"\"", "Stop other querier present timer \"\"\" if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def", "Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address # process group specific query", "packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this IGMP interface Clear all state \"\"\"", "not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General Query timer has expired \"\"\" self.interface_state.general_query_timeout(self)", "def other_querier_present_timeout(self): \"\"\" Other Querier Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self,", "Querier Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\" Change", "self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this IGMP interface Clear", "GroupIPAddress, Value: GroupState object self.group_state = {} self.group_state_lock = RWLockWrite() # send general", "{} self.group_state_lock = RWLockWrite() # send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL *", "\"\"\" if querier: self.interface_state = Querier self.router_state_logger.debug('change querier state to -> Querier') else:", "self.general_query_timer.cancel() def set_other_querier_present_timer(self): \"\"\" Set other querier present timer \"\"\" self.clear_other_querier_present_timer() other_querier_present_timer =", "to -> Querier') else: self.interface_state = NonQuerier self.router_state_logger.debug('change querier state to -> NonQuerier')", "RWLockWrite() # send general query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) #", "in self.group_state: max_response_time = packet.payload.max_resp_time self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this IGMP interface", "Get object that monitors a given group (with group_ip IP address) \"\"\" with", "self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if group_ip in self.group_state: group_state = self.group_state[group_ip] else:", "# interface_state methods ############################################ def print_state(self): return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general", "\"\"\" Get object that monitors a given group (with group_ip IP address) \"\"\"", "igmp.InterfaceIGMP import InterfaceIGMP class RouterState(object): ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger", "Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\" Change state", "address) \"\"\" with self.group_state_lock.genRlock(): if group_ip in self.group_state: return self.group_state[group_ip] with self.group_state_lock.genWlock(): if", "querier: bool): \"\"\" Change state regarding querier state machine (Querier/NonQuerier) \"\"\" if querier:", "self.get_group_state(igmp_group).receive_group_specific_query(max_response_time) def remove(self): \"\"\" Remove this IGMP interface Clear all state \"\"\" for", "ROUTER_STATE_LOGGER = logging.getLogger('igmp.igmpv2.RouterState') def __init__(self, interface: 'InterfaceIGMP'): #logger logger_extra = dict() logger_extra['vif'] =", "import GroupState from .querier.Querier import Querier from .nonquerier.NonQuerier import NonQuerier if TYPE_CHECKING: from", "interface # state of the router (Querier/NonQuerier) self.interface_state = Querier # state of", "each group # Key: GroupIPAddress, Value: GroupState object self.group_state = {} self.group_state_lock =", "return self.interface_state.state_name() def set_general_query_timer(self): \"\"\" Set general query timer \"\"\" self.clear_general_query_timer() general_query_timer =", "object self.group_state = {} self.group_state_lock = RWLockWrite() # send general query packet =", "has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\" Change state regarding querier", "GroupState object self.group_state = {} self.group_state_lock = RWLockWrite() # send general query packet", "query packet = PacketIGMPHeader(type=igmp_globals.MEMBERSHIP_QUERY, max_resp_time=igmp_globals.QUERY_RESPONSE_INTERVAL * 10) self.interface.send(packet.bytes()) # set initial general query", "Other Querier Present timer has expired \"\"\" self.interface_state.other_querier_present_timeout(self) def change_interface_state(self, querier: bool): \"\"\"", "the router connected to the network self.interface = interface # state of the", "group_state return group_state def receive_v1_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Version 1 Membership", "self.get_group_state(igmp_group).receive_v1_membership_report() def receive_v2_membership_report(self, packet: ReceivedPacket): \"\"\" Received IGMP Membership Report packet \"\"\" igmp_group", "self.interface_state = Querier self.router_state_logger.debug('change querier state to -> Querier') else: self.interface_state = NonQuerier", "machine (Querier/NonQuerier) \"\"\" if querier: self.interface_state = Querier self.router_state_logger.debug('change querier state to ->", "process group specific query if igmp_group != \"0.0.0.0\" and igmp_group in self.group_state: max_response_time", "IGMP Query packet \"\"\" self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address # process group specific", "\"\"\" Stop other querier present timer \"\"\" if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel()", "from . import igmp_globals from .GroupState import GroupState from .querier.Querier import Querier from", "to the network self.interface = interface # state of the router (Querier/NonQuerier) self.interface_state", "self.general_query_timeout) general_query_timer.start() self.general_query_timer = general_query_timer def clear_general_query_timer(self): \"\"\" Stop general query timer \"\"\"", "present timer \"\"\" if self.other_querier_present_timer is not None: self.other_querier_present_timer.cancel() def general_query_timeout(self): \"\"\" General", "def clear_general_query_timer(self): \"\"\" Stop general query timer \"\"\" if self.general_query_timer is not None:", "packet: ReceivedPacket): \"\"\" Received IGMP Leave packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def", "\"\"\" Received IGMP Leave packet \"\"\" igmp_group = packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet:", "self.interface_state.receive_query(self, packet) igmp_group = packet.payload.group_address # process group specific query if igmp_group !=", "= packet.payload.group_address self.get_group_state(igmp_group).receive_leave_group() def receive_query(self, packet: ReceivedPacket): \"\"\" Received IGMP Query packet \"\"\"" ]
[ "utils.model import BaseModule from utils import globalvar from utils.predo import GetVersion class Controller:", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def", "try: self._get_health_info(client) finally: if client.cookie: client.delete_session() return self.suc_list def _get_health_info(self, client): status_dict =", "client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\", url) if (isinstance(resp, dict)", "= controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"]", "dict(self): return { \"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state,", "self.suc_list def _get_health_info(self, client): status_dict = { \"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\",", "not raid_members: self.suc_list.append(\"Success: raid card resource is empty\") return for member in raid_members:", "BaseModule from utils import globalvar from utils.predo import GetVersion class Controller: def __init__(self):", "dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not raid_members: self.suc_list.append(", "if client.cookie: client.delete_session() return self.suc_list def _get_health_info(self, client): status_dict = { \"0\": \"OK\",", "None self.firmware_version = None self.maintain_pd_fail_history = None self.copy_back_state = None self.jbod_state = None", "None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None)", "else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is", "= member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib = \\", "\"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids } @GetVersion() def run(self, args): is_adapt_b01 =", "def pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls,", "= controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\",", "raid_members: self.suc_list.append( \"Success: raid card resource is empty\") return raid = Raid() ctrl", "\"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\": self.controller } def pack_raid_resource(self, resp): self.name", "= client.send_request(\"GET\", url) if isinstance(resp1, dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members =", "status_dict = { \"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" } url", "\"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\": self.controller } def pack_raid_resource(self,", "super().__init__() self.overall_health = None self.maximum = None self.raids = [] @property def dict(self):", "= oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if", "Constant from utils.model import BaseModule from utils import globalvar from utils.predo import GetVersion", "this file except in compliance with the License. # You may obtain a", "None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None)", "= name raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if", "oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state", "if not raid_members: self.suc_list.append( \"Success: raid card resource is empty\") return raid =", "= None self.jbod_state = None self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None self.memory_size_mib =", "if self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is not", "None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version", "resp): self.name = resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for", "# Copyright 2021 New H3C Technologies Co., Ltd. # # Licensed under the", "is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie:", "name = raid_members.get(\"type\") raid.name = name raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2", "def __init__(self): self.name = None self.location = \"mainboard\" self.manufacturer = None self.serial_number =", "= [] @property def dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids", "raid card resource is empty\") return for member in raid_members: url = member.get(\"@odata.id\",", "= controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health", "= \"mainboard\" self.manufacturer = None self.serial_number = None self.state = None self.health =", "oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self): self.name = None", "ANY KIND, either express or implied. # See the License for the specific", "if oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\",", "resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health else: self.err_list.append(\"Failure: failed", "raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for controller in raid_ctrls: ctrl =", "if isinstance(resp1, dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not", "to get raid card\" \" collection information\") raise FailException(*self.err_list) def _get_b01_raid(self, client): try:", "information\") raise FailException(*self.err_list) def _get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\",", "ctrl.member_id = member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl)", "RedfishClient, RestfulClient from utils.common import Constant from utils.model import BaseModule from utils import", "None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "License. ### # -*- coding: utf-8 -*- from exception.ToolException import FailException from utils.client", "None self.manufacturer = None self.model = None self.supported_device_protocols = None self.sas_address = None", "self.health, \"Controller\": self.controller } def pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None) raid_ctrls =", "self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None self.memory_size_mib = None self.supported_raid_levels = None self.ddrecc_count", "self.name = None self.location = \"mainboard\" self.manufacturer = None self.serial_number = None self.state", "None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\",", "None, \"Raids\": self.raids } @GetVersion() def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01:", "collection information\") raise FailException(*self.err_list) def _get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\" resp1 =", "raid = Raid() ctrl = Controller() name = raid_members.get(\"type\") raid.name = name raid.serial_number", "self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\": self.controller", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if", "url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if isinstance(resp1, dict) and \\ Constant.SUCCESS_0", "\"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\":", "\\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success: raid", "} @GetVersion() def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args)", "client = RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie: client.delete_session() return self.suc_list def _get_health_info(self,", "\"2\": \"Warning\", \"3\": \"Critical\" } url = \"/api/health_info\" resp = client.send_request(\"GET\", url) if", "= RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie: client.delete_session() return self.suc_list def _get_health_info(self, client):", "None) ctrl.model = name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure:", "OF ANY KIND, either express or implied. # See the License for the", "controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history =", "### # -*- coding: utf-8 -*- from exception.ToolException import FailException from utils.client import", "is not None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius", "= \"/api/health_info\" resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\",", "} url = \"/api/health_info\" resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and Constant.SUCCESS_0", "= client.send_request(\"GET\", url) if isinstance(resp2, dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members =", "status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health else: self.err_list.append(\"Failure: failed to get overall health", "args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client) finally: if", "and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success:", "raid_members = resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success: raid card resource is empty\")", "utils import globalvar from utils.predo import GetVersion class Controller: def __init__(self): self.member_id =", "= resp2.get(\"pcie_info\", None) for member in pcie_members: if member.get(\"produce_name\") == name: raid.location =", "url = member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\",", "New H3C Technologies Co., Ltd. # # Licensed under the Apache License, Version", "self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version", "card resource is empty\") return for member in raid_members: url = member.get(\"@odata.id\", None)", "= None self.model = None self.supported_device_protocols = None self.sas_address = None self.firmware_version =", "self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer =", "__init__(self): super().__init__() self.overall_health = None self.maximum = None self.raids = [] @property def", "= oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels = ( \",", "return raid = Raid() ctrl = Controller() name = raid_members.get(\"type\") raid.name = name", "from utils.model import BaseModule from utils import globalvar from utils.predo import GetVersion class", "= RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else: client = RedfishClient(args) self._get_raid(client)", "controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version =", "= None self.maintain_pd_fail_history = None self.copy_back_state = None self.jbod_state = None self.min_stripe_size_bytes =", "None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is", "\" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection", "= controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None)", "dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for member in", "None) class Raid: def __init__(self): self.name = None self.location = \"mainboard\" self.manufacturer =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "if not raid_members: self.suc_list.append(\"Success: raid card resource is empty\") return for member in", "resp2 = client.send_request(\"GET\", url) if isinstance(resp2, dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members", "raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if isinstance(resp2, dict) and Constant.SUCCESS_0", "= controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\",", "self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "# -*- coding: utf-8 -*- from exception.ToolException import FailException from utils.client import RedfishClient,", "self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise FailException(*self.err_list) finally: if", "\"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if isinstance(resp1, dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"):", "in raid_members: url = member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url) if (isinstance(resp, dict)", "url) if isinstance(resp1, dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if", "\"/api/health_info\" resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)):", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "= None self.maximum = None self.raids = [] @property def dict(self): return {", "\"Maximum\": None, \"Raids\": self.raids } @GetVersion() def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if", "= None self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None self.memory_size_mib = None self.supported_raid_levels =", "{ \"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health,", "RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list client = RestfulClient(args) try: self._get_health_info(client) finally: if", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib =", "\"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer", "None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\",", "FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise FailException(*self.err_list)", "self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state,", "oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "raid_members: url = member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and", "resource is empty\") return raid = Raid() ctrl = Controller() name = raid_members.get(\"type\")", "def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client)", "\"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\":", "required by applicable law or agreed to in writing, software # distributed under", "self.copy_back_state = None self.jbod_state = None self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None self.memory_size_mib", "self.supported_device_protocols = None self.sas_address = None self.firmware_version = None self.maintain_pd_fail_history = None self.copy_back_state", "return self.suc_list def _get_health_info(self, client): status_dict = { \"0\": \"OK\", \"1\": \"Caution\", \"2\":", "applicable law or agreed to in writing, software # distributed under the License", "controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None)", "(controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None)", "class Raid: def __init__(self): self.name = None self.location = \"mainboard\" self.manufacturer = None", "self.raids } @GetVersion() def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client =", "self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count,", "return { \"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\":", "import BaseModule from utils import globalvar from utils.predo import GetVersion class Controller: def", "oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes =", "None) if self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is", "self.controller = [] @property def dict(self): return { \"Name\": self.name, \"Location\": self.location, \"Manufacturer\":", "dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address,", "= client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members =", "or agreed to in writing, software # distributed under the License is distributed", "raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card \"", "client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\",", "oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is", "= None self.memory_size_mib = None self.supported_raid_levels = None self.ddrecc_count = None self.temperature_celsius =", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None),", "if isinstance(raid_ctrls, list): for controller in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number", "= None self.health = None self.controller = [] @property def dict(self): return {", "is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count", "None self.supported_raid_levels = None self.ddrecc_count = None self.temperature_celsius = None self.package_version = None", "resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success: raid card resource is empty\") return for", "None self.sas_address = None self.firmware_version = None self.maintain_pd_fail_history = None self.copy_back_state = None", "_get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if isinstance(resp1, dict)", "= controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None) self.supported_device_protocols =", "self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health = None self.maximum", "raise FailException(*self.err_list) def _get_raid(self, client): systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\": self.controller } def pack_raid_resource(self, resp):", "writing, software # distributed under the License is distributed on an \"AS IS\"", "\"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" } url = \"/api/health_info\" resp", "} def pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None) if", "if self.suc_list: return self.suc_list client = RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie: client.delete_session()", "None self.state = None self.health = None self.controller = [] @property def dict(self):", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "= { \"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" } url =", "License. # You may obtain a copy of the License at # #", "controller in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer", "= resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success: raid card resource is empty\") return", "import FailException from utils.client import RedfishClient, RestfulClient from utils.common import Constant from utils.model", "\"3\": \"Critical\" } url = \"/api/health_info\" resp = client.send_request(\"GET\", url) if (isinstance(resp, dict)", "controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None)", "self.suc_list.append(\"Success: raid card resource is empty\") return for member in raid_members: url =", "oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\",", "the License. ### # -*- coding: utf-8 -*- from exception.ToolException import FailException from", "compliance with the License. # You may obtain a copy of the License", "self.package_version = None @property def dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\":", "\"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\":", "return { \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids } @GetVersion() def run(self, args):", "if isinstance(resp2, dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for", "controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state", "card resource is empty\") return raid = Raid() ctrl = Controller() name =", "raid card resource is empty\") return raid = Raid() ctrl = Controller() name", "None self.model = None self.supported_device_protocols = None self.sas_address = None self.firmware_version = None", "utils.common import Constant from utils.model import BaseModule from utils import globalvar from utils.predo", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info =", "Copyright 2021 New H3C Technologies Co., Ltd. # # Licensed under the Apache", "member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else:", "None) if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None) class", "None) self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None) self.health", "url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None)", "-*- from exception.ToolException import FailException from utils.client import RedfishClient, RestfulClient from utils.common import", "else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise FailException(*self.err_list) finally:", "self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None)", "= None self.ddrecc_count = None self.temperature_celsius = None self.package_version = None @property def", "None) raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for controller in raid_ctrls: ctrl", "Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for member in pcie_members: if", "failed to get raid card\" \" collection information\") raise FailException(*self.err_list) finally: if client.cookie:", "self.manufacturer = None self.model = None self.supported_device_protocols = None self.sas_address = None self.firmware_version", "\"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\":", "client = RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list client = RestfulClient(args) try: self._get_health_info(client)", "self.model = None self.supported_device_protocols = None self.sas_address = None self.firmware_version = None self.maintain_pd_fail_history", "not use this file except in compliance with the License. # You may", "failed to get overall health \" \"status information\") raise FailException(*self.err_list) def _get_raid(self, client):", "empty\") return for member in raid_members: url = member.get(\"@odata.id\", None) resp = client.send_request(\"GET\",", "None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\",", "= controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is not None:", "not None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius =", "if client.cookie: client.delete_session() else: client = RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list client", "self.overall_health = None self.maximum = None self.raids = [] @property def dict(self): return", "is empty\") return for member in raid_members: url = member.get(\"@odata.id\", None) resp =", "governing permissions and # limitations under the License. ### # -*- coding: utf-8", "License, Version 2.0 (the \"License\"); # you may not use this file except", "return for member in raid_members: url = member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url)", "finally: if client.cookie: client.delete_session() return self.suc_list def _get_health_info(self, client): status_dict = { \"0\":", "None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\",", "self.sas_address = None self.firmware_version = None self.maintain_pd_fail_history = None self.copy_back_state = None self.jbod_state", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "raid card \" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get raid card\"", "FailException(*self.err_list) def _get_raid(self, client): systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp", "self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history,", "self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None)", "None) if oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count =", "url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and", "card \" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get raid card\" \"", "url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"])", "None self.health = None self.controller = [] @property def dict(self): return { \"Name\":", "try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else: client = RedfishClient(args) self._get_raid(client) if self.suc_list:", "# you may not use this file except in compliance with the License.", "else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise FailException(*self.err_list) def", "self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes,", "pcie_members = resp2.get(\"pcie_info\", None) for member in pcie_members: if member.get(\"produce_name\") == name: raid.location", "None self.jbod_state = None self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None self.memory_size_mib = None", "agreed to in writing, software # distributed under the License is distributed on", "and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success:", "client.delete_session() return self.suc_list def _get_health_info(self, client): status_dict = { \"0\": \"OK\", \"1\": \"Caution\",", "from utils.predo import GetVersion class Controller: def __init__(self): self.member_id = None self.manufacturer =", "None self.location = \"mainboard\" self.manufacturer = None self.serial_number = None self.state = None", "card\" \" collection information\") raise FailException(*self.err_list) def _get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\"", "resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success: raid card resource is", "if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid)", "\"MaintainPDFailHistory\", None) if self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None)", "= resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for controller in", "None) self.overall_health = raid_health else: self.err_list.append(\"Failure: failed to get overall health \" \"status", "import globalvar from utils.predo import GetVersion class Controller: def __init__(self): self.member_id = None", "client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid = Raid()", "(the \"License\"); # you may not use this file except in compliance with", "dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids } @GetVersion() def run(self,", "client.send_request(\"GET\", url) if isinstance(resp1, dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\")", "and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health", "def dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\":", "controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None) self.supported_device_protocols = (", "dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health =", "# Unless required by applicable law or agreed to in writing, software #", "raid.location = member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib =", "self.temperature_celsius = None self.package_version = None @property def dict(self): return { \"MemberId\": self.member_id,", "get overall health \" \"status information\") raise FailException(*self.err_list) def _get_raid(self, client): systems_id =", "by applicable law or agreed to in writing, software # distributed under the", "None self.package_version = None @property def dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer,", "== name: raid.location = member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model = name", "self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise FailException(*self.err_list) def _get_b01_raid(self,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "\"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self,", "self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes", "None self.serial_number = None self.state = None self.health = None self.controller = []", "None self.max_stripe_size_bytes = None self.memory_size_mib = None self.supported_raid_levels = None self.ddrecc_count = None", "@property def dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids } @GetVersion()", "= oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else:", "None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None)", "= Controller() name = raid_members.get(\"type\") raid.name = name raid.serial_number = raid_members.get(\"serial\") url =", "== resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health else: self.err_list.append(\"Failure:", "if (isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None)", "file except in compliance with the License. # You may obtain a copy", "import RedfishClient, RestfulClient from utils.common import Constant from utils.model import BaseModule from utils", "None) in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get", "Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success: raid card", "resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for member in pcie_members: if member.get(\"produce_name\") == name:", "None self.supported_device_protocols = None self.sas_address = None self.firmware_version = None self.maintain_pd_fail_history = None", "GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health = None self.maximum = None self.raids = []", "None self.maintain_pd_fail_history = None self.copy_back_state = None self.jbod_state = None self.min_stripe_size_bytes = None", "def _get_health_info(self, client): status_dict = { \"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\":", "License for the specific language governing permissions and # limitations under the License.", "self.state = None self.health = None self.controller = [] @property def dict(self): return", "self.maintain_pd_fail_history = None self.copy_back_state = None self.jbod_state = None self.min_stripe_size_bytes = None self.max_stripe_size_bytes", "H3C Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0", "RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else: client = RedfishClient(args) self._get_raid(client) if", "to in writing, software # distributed under the License is distributed on an", "( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\",", "\"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\")", "ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None):", "None) resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE):", "= None self.temperature_celsius = None self.package_version = None @property def dict(self): return {", "= name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to", "2021 New H3C Technologies Co., Ltd. # # Licensed under the Apache License,", "implied. # See the License for the specific language governing permissions and #", "utils.predo import GetVersion class Controller: def __init__(self): self.member_id = None self.manufacturer = None", "\"License\"); # you may not use this file except in compliance with the", "from utils import globalvar from utils.predo import GetVersion class Controller: def __init__(self): self.member_id", "client): status_dict = { \"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" }", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure:", "and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for member in pcie_members:", "self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius,", "self._get_raid(client) if self.suc_list: return self.suc_list client = RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie:", "raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise", "\"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self, controller): self.member_id", "= controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self): self.name = None self.location = \"mainboard\"", "= Raid() ctrl = Controller() name = raid_members.get(\"type\") raid.name = name raid.serial_number =", "= None self.raids = [] @property def dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\":", "self.manufacturer = None self.serial_number = None self.state = None self.health = None self.controller", "controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self): self.name = None self.location = \"mainboard\" self.manufacturer", "{ \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version,", "Raid: def __init__(self): self.name = None self.location = \"mainboard\" self.manufacturer = None self.serial_number", "controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def", "Ltd. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "### # Copyright 2021 New H3C Technologies Co., Ltd. # # Licensed under", "self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels,", "or implied. # See the License for the specific language governing permissions and", "== resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success: raid card resource", "self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib,", "Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success: raid card resource is", "None self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None self.memory_size_mib = None self.supported_raid_levels = None", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "def __init__(self): super().__init__() self.overall_health = None self.maximum = None self.raids = [] @property", "finally: if client.cookie: client.delete_session() else: client = RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\")", "self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self, controller):", "def dict(self): return { \"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\":", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is not None:", "= None self.firmware_version = None self.maintain_pd_fail_history = None self.copy_back_state = None self.jbod_state =", "in writing, software # distributed under the License is distributed on an \"AS", "= None self.package_version = None @property def dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\":", "None) self.model = controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\",", "None)), None) self.overall_health = raid_health else: self.err_list.append(\"Failure: failed to get overall health \"", "controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health =", "= ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class", "raid_members.get(\"type\") raid.name = name raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\",", "resp2.get(\"pcie_info\", None) for member in pcie_members: if member.get(\"produce_name\") == name: raid.location = member.get(\"slot\",", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "= Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card \" \"details\")", "self.overall_health, \"Maximum\": None, \"Raids\": self.raids } @GetVersion() def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01", "{ \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids } @GetVersion() def run(self, args): is_adapt_b01", "None @property def dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\":", "self.max_stripe_size_bytes = None self.memory_size_mib = None self.supported_raid_levels = None self.ddrecc_count = None self.temperature_celsius", "Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "= None self.supported_raid_levels = None self.ddrecc_count = None self.temperature_celsius = None self.package_version =", "self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is None: self.copy_back_state", "\"State\": self.state, \"Health\": self.health, \"Controller\": self.controller } def pack_raid_resource(self, resp): self.name = resp.get(\"Name\",", "in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid", "to get raid card \" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get", "exception.ToolException import FailException from utils.client import RedfishClient, RestfulClient from utils.common import Constant from", "self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state", "run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client) finally:", "Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card", "url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if isinstance(resp2, dict) and Constant.SUCCESS_0 ==", "= ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state =", "= \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card\"", "client): systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\", url)", "\"Success: raid card resource is empty\") return raid = Raid() ctrl = Controller()", "for controller in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None)", "resp1 = client.send_request(\"GET\", url) if isinstance(resp1, dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members", "else: self.err_list.append(\"Failure: failed to get overall health \" \"status information\") raise FailException(*self.err_list) def", "ctrl.model = name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed", "client.delete_session() else: client = RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list client = RestfulClient(args)", "raid.name = name raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url)", "globalvar from utils.predo import GetVersion class Controller: def __init__(self): self.member_id = None self.manufacturer", "client): try: url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if isinstance(resp1, dict) and", "specific language governing permissions and # limitations under the License. ### # -*-", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "if is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else: client", "you may not use this file except in compliance with the License. #", "\"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\":", "@property def dict(self): return { \"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number,", "self.err_list.append(\"Failure: failed to get raid card \" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed", "= resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success: raid card resource is empty\") return", "self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self, controller): self.member_id =", "utf-8 -*- from exception.ToolException import FailException from utils.client import RedfishClient, RestfulClient from utils.common", "client = RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else: client = RedfishClient(args)", "= None @property def dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model,", "self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if", "None) is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib", "not raid_members: self.suc_list.append( \"Success: raid card resource is empty\") return raid = Raid()", "None) self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health = None", "Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\",", "if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if", "self.package_version } def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None)", "use this file except in compliance with the License. # You may obtain", "None self.copy_back_state = None self.jbod_state = None self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None", "oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None)", "self.manufacturer = controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None))", "= client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid =", "try: url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if isinstance(resp1, dict) and \\", "resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for controller in raid_ctrls:", "for member in raid_members: url = member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url) if", "\"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\":", "not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\",", "self.serial_number = None self.state = None self.health = None self.controller = [] @property", "pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list):", "None) for member in pcie_members: if member.get(\"produce_name\") == name: raid.location = member.get(\"slot\", None)", "None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\",", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "\"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\":", "= controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\",", "raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health else: self.err_list.append(\"Failure: failed to get", "[] @property def dict(self): return { \"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\":", "@property def dict(self): return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols,", "def dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids } @GetVersion() def", "RestfulClient from utils.common import Constant from utils.model import BaseModule from utils import globalvar", "\"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" } url = \"/api/health_info\" resp =", "limitations under the License. ### # -*- coding: utf-8 -*- from exception.ToolException import", "\"Raids\": self.raids } @GetVersion() def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client", "systems_id resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE):", "\"PackageVersion\": self.package_version } def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\",", "and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version =", "self.firmware_version = None self.maintain_pd_fail_history = None self.copy_back_state = None self.jbod_state = None self.min_stripe_size_bytes", "failed to get raid card \" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to", "2.0 (the \"License\"); # you may not use this file except in compliance", "None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health else: self.err_list.append(\"Failure: failed to", "globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else:", "in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer =", "Raid() ctrl = Controller() name = raid_members.get(\"type\") raid.name = name raid.serial_number = raid_members.get(\"serial\")", "resource is empty\") return for member in raid_members: url = member.get(\"@odata.id\", None) resp", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if not raid_members:", "\", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid: def", "client.cookie: client.delete_session() return self.suc_list def _get_health_info(self, client): status_dict = { \"0\": \"OK\", \"1\":", "utils.client import RedfishClient, RestfulClient from utils.common import Constant from utils.model import BaseModule from", "systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\", url) if", "\" collection information\") raise FailException(*self.err_list) def _get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\" resp1", "self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise FailException(*self.err_list)", "self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state", "# # Unless required by applicable law or agreed to in writing, software", "None) is not None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None)", "express or implied. # See the License for the specific language governing permissions", "RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie: client.delete_session() return self.suc_list def _get_health_info(self, client): status_dict", "self.state = controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self): super().__init__()", "= RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list client = RestfulClient(args) try: self._get_health_info(client) finally:", "Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the", "either express or implied. # See the License for the specific language governing", "for member in pcie_members: if member.get(\"produce_name\") == name: raid.location = member.get(\"slot\", None) ctrl.member_id", "_get_health_info(self, client): status_dict = { \"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\"", "class Controller: def __init__(self): self.member_id = None self.manufacturer = None self.model = None", "self.raids = [] @property def dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\":", "language governing permissions and # limitations under the License. ### # -*- coding:", "= \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if isinstance(resp1, dict) and \\ Constant.SUCCESS_0 ==", "raid_members = resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success: raid card resource is empty\")", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "\"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\": self.memory_size_mib, \"SupportedRAIDLevels\": self.supported_raid_levels, \"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version }", "resp1.get(\"adapter\") if not raid_members: self.suc_list.append( \"Success: raid card resource is empty\") return raid", "import Constant from utils.model import BaseModule from utils import globalvar from utils.predo import", "member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\",", "None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count =", "self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else: client = RedfishClient(args) self._get_raid(client) if self.suc_list: return", "None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None)", "= None self.state = None self.health = None self.controller = [] @property def", "= controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history", "[] @property def dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\": None, \"Raids\": self.raids }", "_get_raid(self, client): systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\",", "\\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for member in pcie_members: if member.get(\"produce_name\") ==", "member.get(\"produce_name\") == name: raid.location = member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model =", "= globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session()", "the License. # You may obtain a copy of the License at #", "pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\",", "is_adapt_b01: client = RestfulClient(args) try: self._get_b01_raid(client) finally: if client.cookie: client.delete_session() else: client =", "return self.suc_list client = RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie: client.delete_session() return self.suc_list", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels", "(isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else:", "isinstance(resp2, dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for member", "raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card \" \"details\") raise FailException(*self.err_list)", "None) if oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address", "resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success: raid", "= \"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\",", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self): self.name", "self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\": self.controller } def pack_raid_resource(self, resp): self.name =", "self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None)", "= oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes =", "the specific language governing permissions and # limitations under the License. ### #", "== \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None) for member in pcie_members: if member.get(\"produce_name\")", "\"Critical\" } url = \"/api/health_info\" resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and", "self.ddrecc_count = None self.temperature_celsius = None self.package_version = None @property def dict(self): return", "self.overall_health = raid_health else: self.err_list.append(\"Failure: failed to get overall health \" \"status information\")", "None) class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health = None self.maximum = None self.raids", "and # limitations under the License. ### # -*- coding: utf-8 -*- from", "is empty\") return raid = Raid() ctrl = Controller() name = raid_members.get(\"type\") raid.name", "if oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address =", "self.maximum = None self.raids = [] @property def dict(self): return { \"OverallHealth\": self.overall_health,", "resp.get(\"status_code\", None) in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to", "client.send_request(\"GET\", url) if isinstance(resp2, dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\",", "None self.ddrecc_count = None self.temperature_celsius = None self.package_version = None @property def dict(self):", "\".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self):", "GetVersion class Controller: def __init__(self): self.member_id = None self.manufacturer = None self.model =", "None self.controller = [] @property def dict(self): return { \"Name\": self.name, \"Location\": self.location,", "__init__(self): self.name = None self.location = \"mainboard\" self.manufacturer = None self.serial_number = None", "to get overall health \" \"status information\") raise FailException(*self.err_list) def _get_raid(self, client): systems_id", "= raid_members.get(\"type\") raid.name = name raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 =", "None) if not raid_members: self.suc_list.append(\"Success: raid card resource is empty\") return for member", "with the License. # You may obtain a copy of the License at", "\"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\": self.controller }", "if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule):", "self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\": self.controller } def", "None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self): self.name = None self.location", "resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid", "else: client = RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list client = RestfulClient(args) try:", "if member.get(\"produce_name\") == name: raid.location = member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model", "= None self.location = \"mainboard\" self.manufacturer = None self.serial_number = None self.state =", "self.supported_raid_levels = None self.ddrecc_count = None self.temperature_celsius = None self.package_version = None @property", "self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels = (", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "import GetVersion class Controller: def __init__(self): self.member_id = None self.manufacturer = None self.model", "self.jbod_state = None self.min_stripe_size_bytes = None self.max_stripe_size_bytes = None self.memory_size_mib = None self.supported_raid_levels", "in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success: raid card resource", "self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card \" \"details\") raise FailException(*self.err_list) else:", "self.suc_list.append( \"Success: raid card resource is empty\") return raid = Raid() ctrl =", "if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\",", "% systems_id resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in", "isinstance(resp1, dict) and \\ Constant.SUCCESS_0 == resp1.get(\"cc\"): raid_members = resp1.get(\"adapter\") if not raid_members:", "FailException from utils.client import RedfishClient, RestfulClient from utils.common import Constant from utils.model import", "\"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None)", "for the specific language governing permissions and # limitations under the License. ###", "= member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None)", "information\") raise FailException(*self.err_list) def _get_raid(self, client): systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" %", "oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get(", "self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\",", "= Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None) if", "self.model = controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None)", "__init__(self): self.member_id = None self.manufacturer = None self.model = None self.supported_device_protocols = None", "self.location = \"mainboard\" self.manufacturer = None self.serial_number = None self.state = None self.health", "law or agreed to in writing, software # distributed under the License is", "oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\",", "resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members", "the License for the specific language governing permissions and # limitations under the", "ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None)", "ctrl = Controller() name = raid_members.get(\"type\") raid.name = name raid.serial_number = raid_members.get(\"serial\") url", "self.health = None self.controller = [] @property def dict(self): return { \"Name\": self.name,", "None self.maximum = None self.raids = [] @property def dict(self): return { \"OverallHealth\":", "url = \"/api/health_info\" resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and Constant.SUCCESS_0 ==", "return { \"MemberId\": self.member_id, \"Manufacturer\": self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\":", "if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is None:", "name raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if isinstance(resp2,", "\"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if isinstance(resp2, dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"):", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes,", "self.suc_list client = RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie: client.delete_session() return self.suc_list def", "None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state =", "raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection", "raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if isinstance(resp2, dict)", "= None self.sas_address = None self.firmware_version = None self.maintain_pd_fail_history = None self.copy_back_state =", "= oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history =", "url) if (isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)),", "self._get_health_info(client) finally: if client.cookie: client.delete_session() return self.suc_list def _get_health_info(self, client): status_dict = {", "Controller() name = raid_members.get(\"type\") raid.name = name raid.serial_number = raid_members.get(\"serial\") url = \"/api/system/pcie\"", "= controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version", "from utils.client import RedfishClient, RestfulClient from utils.common import Constant from utils.model import BaseModule", "None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\")", "= client.send_request(\"GET\", url) if (isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health =", "client.cookie: client.delete_session() else: client = RedfishClient(args) self._get_raid(client) if self.suc_list: return self.suc_list client =", "self.state, \"Health\": self.health, \"Controller\": self.controller } def pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None)", "def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model =", "= None self.supported_device_protocols = None self.sas_address = None self.firmware_version = None self.maintain_pd_fail_history =", "} def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model", "url) if isinstance(resp2, dict) and Constant.SUCCESS_0 == \\ resp1.get(\"cc\"): pcie_members = resp2.get(\"pcie_info\", None)", "(isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health", "in compliance with the License. # You may obtain a copy of the", "name: raid.location = member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib", "member in pcie_members: if member.get(\"produce_name\") == name: raid.location = member.get(\"slot\", None) ctrl.member_id =", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\", None) is not None: self.ddrecc_count = oem_info.get(\"DDRECCCount\") else: self.ddrecc_count", "name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get", "Controller: def __init__(self): self.member_id = None self.manufacturer = None self.model = None self.supported_device_protocols", "empty\") return raid = Raid() ctrl = Controller() name = raid_members.get(\"type\") raid.name =", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "= raid_health else: self.err_list.append(\"Failure: failed to get overall health \" \"status information\") raise", "None self.raids = [] @property def dict(self): return { \"OverallHealth\": self.overall_health, \"Maximum\": None,", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "self.controller } def pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None)", "health \" \"status information\") raise FailException(*self.err_list) def _get_raid(self, client): systems_id = client.get_systems_id() url", "= controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)):", "controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info", "permissions and # limitations under the License. ### # -*- coding: utf-8 -*-", "get raid card\" \" collection information\") raise FailException(*self.err_list) def _get_b01_raid(self, client): try: url", "raid card\" \" collection information\") raise FailException(*self.err_list) def _get_b01_raid(self, client): try: url =", "See the License for the specific language governing permissions and # limitations under", "self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\", None)", "\\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card\" \"", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"Controller\": self.controller } def pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\",", "dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes", "= \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if isinstance(resp2, dict) and Constant.SUCCESS_0 == \\", "resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for controller in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller)", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health else: self.err_list.append(\"Failure: failed to get overall", "\"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" } url = \"/api/health_info\" resp = client.send_request(\"GET\", url)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None)", "from exception.ToolException import FailException from utils.client import RedfishClient, RestfulClient from utils.common import Constant", "= oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history", "get raid card \" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get raid", "# limitations under the License. ### # -*- coding: utf-8 -*- from exception.ToolException", "raise FailException(*self.err_list) else: self.err_list.append(\"Failure: failed to get raid card\" \" collection information\") raise", "self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is not", "= controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health = None self.maximum =", "None) if isinstance(raid_ctrls, list): for controller in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl)", "\"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" } url = \"/api/health_info\" resp = client.send_request(\"GET\",", "def _get_raid(self, client): systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp =", "and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid = Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed", "def __init__(self): self.member_id = None self.manufacturer = None self.model = None self.supported_device_protocols =", "self.member_id = controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None) self.supported_device_protocols", "\"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer, \"SerialNumber\": self.serial_number, \"State\": self.state, \"Health\": self.health, \"Controller\":", "= resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for controller in raid_ctrls: ctrl = Controller()", "controller): self.member_id = controller.get(\"MemberId\", None) self.manufacturer = controller.get(\"Manufacturer\", None) self.model = controller.get(\"Name\", None)", "FailException(*self.err_list) def _get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if", "list): for controller in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\",", "self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state =", "\"mainboard\" self.manufacturer = None self.serial_number = None self.state = None self.health = None", "resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health", "member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url) if (isinstance(resp, dict) and resp.get(\"status_code\", None) in", "Raid() raid.pack_raid_resource(resp[\"resource\"]) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid card \" \"details\") raise", "Version 2.0 (the \"License\"); # you may not use this file except in", "under the License. ### # -*- coding: utf-8 -*- from exception.ToolException import FailException", "except in compliance with the License. # You may obtain a copy of", "def _get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url) if isinstance(resp1,", "\"DDRECCCount\": self.ddrecc_count, \"TemperatureCelsius\": self.temperature_celsius, \"PackageVersion\": self.package_version } def pack_ctrl(self, controller): self.member_id = controller.get(\"MemberId\",", "self.err_list.append(\"Failure: failed to get overall health \" \"status information\") raise FailException(*self.err_list) def _get_raid(self,", "= None self.manufacturer = None self.model = None self.supported_device_protocols = None self.sas_address =", "-*- coding: utf-8 -*- from exception.ToolException import FailException from utils.client import RedfishClient, RestfulClient", "coding: utf-8 -*- from exception.ToolException import FailException from utils.client import RedfishClient, RestfulClient from", "\"Health\": self.health, \"Controller\": self.controller } def pack_raid_resource(self, resp): self.name = resp.get(\"Name\", None) raid_ctrls", "= member.get(\"device_id\", None) ctrl.model = name ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid)", "Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\", None)), None) self.overall_health = raid_health else:", "(isinstance(resp, dict) and resp.get(\"status_code\", None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if not", "overall health \" \"status information\") raise FailException(*self.err_list) def _get_raid(self, client): systems_id = client.get_systems_id()", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health = None self.maximum = None self.raids =", "\"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\": self.min_stripe_size_bytes, \"MaxStripeSizeBytes\": self.max_stripe_size_bytes, \"MemorySizeMiB\":", "isinstance(raid_ctrls, list): for controller in raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number =", "\"status information\") raise FailException(*self.err_list) def _get_raid(self, client): systems_id = client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\"", "raise FailException(*self.err_list) def _get_b01_raid(self, client): try: url = \"/api/settings/storageinfo\" resp1 = client.send_request(\"GET\", url)", "= oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if", "None) self.supported_device_protocols = ( controller.get(\"SupportedDeviceProtocols\", None)) self.firmware_version = controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\")", "= None self.copy_back_state = None self.jbod_state = None self.min_stripe_size_bytes = None self.max_stripe_size_bytes =", "self.member_id = None self.manufacturer = None self.model = None self.supported_device_protocols = None self.sas_address", "{ \"0\": \"OK\", \"1\": \"Caution\", \"2\": \"Warning\", \"3\": \"Critical\" } url = \"/api/health_info\"", "\"Warning\", \"3\": \"Critical\" } url = \"/api/health_info\" resp = client.send_request(\"GET\", url) if (isinstance(resp,", "raid_health else: self.err_list.append(\"Failure: failed to get overall health \" \"status information\") raise FailException(*self.err_list)", "self.memory_size_mib = None self.supported_raid_levels = None self.ddrecc_count = None self.temperature_celsius = None self.package_version", "member in raid_members: url = member.get(\"@odata.id\", None) resp = client.send_request(\"GET\", url) if (isinstance(resp,", "in pcie_members: if member.get(\"produce_name\") == name: raid.location = member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\",", "ctrl.memory_size_mib = \\ raid_members.get(\"ddr_size\", None) raid.controller.append(ctrl) self.raids.append(raid) else: self.err_list.append(\"Failure: failed to get raid", "is None: self.maintain_pd_fail_history = oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is None: self.copy_back_state =", "self.manufacturer, \"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state,", "self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self): self.name = None self.location =", "to get raid card\" \" collection information\") raise FailException(*self.err_list) finally: if client.cookie: client.delete_session()", "pcie_members: if member.get(\"produce_name\") == name: raid.location = member.get(\"slot\", None) ctrl.member_id = member.get(\"device_id\", None)", "= [] @property def dict(self): return { \"Name\": self.name, \"Location\": self.location, \"Manufacturer\": self.manufacturer,", "controller.get(\"FirmwareVersion\", None) self.maintain_pd_fail_history = controller.get(\"MaintainPDFailHistory\") self.copy_back_state = controller.get(\"CopyBackState\", None) if (controller.get(\"Oem\", None) and", "controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\", None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None)", "raid_ctrls: ctrl = Controller() ctrl.pack_ctrl(controller) self.controller.append(ctrl) self.serial_number = controller.get(\"SerialNumber\", None) self.manufacturer = controller.get(\"Manufacturer\",", "None) in Constant.SUC_CODE): raid_members = resp[\"resource\"].get(\"Members\", None) if not raid_members: self.suc_list.append(\"Success: raid card", "else: self.err_list.append(\"Failure: failed to get raid card \" \"details\") raise FailException(*self.err_list) else: self.err_list.append(\"Failure:", "( \", \".join(oem_info[\"SupportedRAIDLevels\"])) self.sas_address = oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid:", "None self.memory_size_mib = None self.supported_raid_levels = None self.ddrecc_count = None self.temperature_celsius = None", "None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels =", "= None self.max_stripe_size_bytes = None self.memory_size_mib = None self.supported_raid_levels = None self.ddrecc_count =", "None) self.min_stripe_size_bytes = oem_info.get(\"MinStripeSizeBytes\", None) self.max_stripe_size_bytes = oem_info.get(\"MaxStripeSizeBytes\", None) if self.maintain_pd_fail_history is None:", "None): self.state = controller[\"Status\"].get(\"State\", None) self.health = controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self):", "= client.get_systems_id() url = \"/redfish/v1/Systems/%s/Storages\" % systems_id resp = client.send_request(\"GET\", url) if (isinstance(resp,", "failed to get raid card\" \" collection information\") raise FailException(*self.err_list) def _get_b01_raid(self, client):", "\" \"status information\") raise FailException(*self.err_list) def _get_raid(self, client): systems_id = client.get_systems_id() url =", "= None self.controller = [] @property def dict(self): return { \"Name\": self.name, \"Location\":", "self.manufacturer = controller.get(\"Manufacturer\", None) if controller.get(\"Status\", None): self.state = controller[\"Status\"].get(\"State\", None) self.health =", "= None self.serial_number = None self.state = None self.health = None self.controller =", "self.name = resp.get(\"Name\", None) raid_ctrls = resp.get(\"StorageControllers\", None) if isinstance(raid_ctrls, list): for controller", "= raid_members.get(\"serial\") url = \"/api/system/pcie\" resp2 = client.send_request(\"GET\", url) if isinstance(resp2, dict) and", "isinstance(controller[\"Oem\"].get(\"Public\", None), dict)): oem_info = controller[\"Oem\"][\"Public\"] self.jbod_state = oem_info.get(\"JBODState\", None) self.package_version = oem_info.get(\"PackageVersion\",", "controller[\"Status\"].get(\"Health\", None) class GetRaid(BaseModule): def __init__(self): super().__init__() self.overall_health = None self.maximum = None", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "client.send_request(\"GET\", url) if (isinstance(resp, dict) and Constant.SUCCESS_0 == resp.get(\"cc\", None)): raid_health = status_dict.get(str(resp.get(\"disk\",", "= oem_info.get(\"SASAddress\", None) self.temperature_celsius = controller.get(\"TemperatureCelsius\", None) class Raid: def __init__(self): self.name =", "raid_members: self.suc_list.append(\"Success: raid card resource is empty\") return for member in raid_members: url", "\"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\": self.jbod_state, \"MinStripeSizeBytes\":", "= oem_info.get(\"DDRECCCount\") else: self.ddrecc_count = controller.get(\"DDRECCCount\", None) self.memory_size_mib = oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\",", "\"Model\": self.model, \"SupportedDeviceProtocols\": self.supported_device_protocols, \"SASAddress\": self.sas_address, \"FirmwareVersion\": self.firmware_version, \"MaintainPDFailHistory\": self.maintain_pd_fail_history, \"CopyBackState\": self.copy_back_state, \"JBODState\":", "from utils.common import Constant from utils.model import BaseModule from utils import globalvar from", "oem_info.get( \"MaintainPDFailHistory\", None) if self.copy_back_state is None: self.copy_back_state = oem_info.get(\"CopyBackState\", None) if oem_info.get(\"DDRECCCount\",", "None self.temperature_celsius = None self.package_version = None @property def dict(self): return { \"MemberId\":", "@GetVersion() def run(self, args): is_adapt_b01 = globalvar.IS_ADAPT_B01 if is_adapt_b01: client = RestfulClient(args) try:", "self.suc_list: return self.suc_list client = RestfulClient(args) try: self._get_health_info(client) finally: if client.cookie: client.delete_session() return", "oem_info.get(\"MemorySizeMiB\", None) if oem_info.get(\"SupportedRAIDLevels\", None) is not None: self.supported_raid_levels = ( \", \".join(oem_info[\"SupportedRAIDLevels\"]))" ]
[ "select_test = \\ \"\"\" select * from yaml \"\"\" def test('test.yaml','from ): return", "<filename>yaml_query/tests/test.py select_test = \\ \"\"\" select * from yaml \"\"\" def test('test.yaml','from ):" ]
[ "length out of range numbers - [{}]\".format(en_cut_count)) print(\"French data`s length out of range", "src_ignore != 0: print(\"Ignored src word counts - [{}]\".format(src_ignore)) if tgt_ignore != 0:", "= Dictionary() def parse(self): def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count = [],", "out of range numbers - [{}]\".format(en_cut_count)) print(\"French data`s length out of range numbers", "word in en_.strip().split()] fra_ws = [word for word in fra_.strip().split()] if len(en_ws) >", "sents, min_count): words = [word for sent in sents for word in sent]", "word_count[w]+=1 ignored_word_count = 0 for word, count in word_count.items(): if count <= min_count:", "[WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count max_len = self._max_len - 2 src_train, tgt_train,", "en_cut_count, fra_cut_count = [], [], 0, 0 for sentences in open(file_): en_, fra_", "!= 0: print(\"Ignored src word counts - [{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored", "data`s length out of range numbers - [{}]\".format(en_cut_count)) print(\"French data`s length out of", "min_word_count self.src_sents = None self.tgt_sents = None self.src_valid_sents = None self.tgt_valid_sents = None", "en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0 for sentences in open(file_):", "gather_file('data/test', max_len) print(\"English data`s length out of range numbers - [{}]\".format(en_cut_count)) print(\"French data`s", "src_train self.tgt_train = tgt_train self.src_valid = src_valid self.tgt_valid = tgt_valid def save(self): data", "= tgt_valid def save(self): data = { 'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx,", "= tgt_train self.src_valid = src_valid self.tgt_valid = tgt_valid def save(self): data = {", "__len__(self): return self.idx def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def", "in words: word_count[w]+=1 ignored_word_count = 0 for word, count in word_count.items(): if count", "help='path to save processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max length of sentence') parser.add_argument('--min-word-count',", "for sentences in open(file_): en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\\t')] en_ws", "!= 0: print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train =", "min_count): words = [word for sent in sents for word in sent] word_count", "self.tgt_train = tgt_train self.src_valid = src_valid self.tgt_valid = tgt_valid def save(self): data =", "[{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe", "return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data", "= None self.src_dict = Dictionary() self.tgt_dict = Dictionary() def parse(self): def gather_file(file_, max_len):", "2 src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid, _, _ =", "__init__(self): self.word2idx = { WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK }", "self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx)", "}, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src':", "[{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore)) self.src_train =", "src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid, _, _ = gather_file('data/test',", "__name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save", "src_valid, tgt_valid, _, _ = gather_file('data/test', max_len) print(\"English data`s length out of range", "range numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if", "self._min_word_count = min_word_count self.src_sents = None self.tgt_sents = None self.src_valid_sents = None self.tgt_valid_sents", "{ 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': { 'src':", "def __init__(self): self.word2idx = { WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK", "= min_word_count self.src_sents = None self.tgt_sents = None self.src_valid_sents = None self.tgt_valid_sents =", "corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } }", "self.idx += 1 def __call__(self, sents, min_count): words = [word for sent in", "max_len=20, min_word_count=1): self._save_data = save_data self._max_len = max_len self._min_word_count = min_word_count self.src_sents =", "en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid, _, _ = gather_file('data/test', max_len) print(\"English data`s", "length - [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse()", "target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save() if __name__ ==", "of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count to discard') args = parser.parse_args()", "gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0 for sentences", "parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count to discard') args = parser.parse_args() corpus =", "'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train,", "len(en_ws) > max_len: en_cut_count += 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws +", "0, 0 for sentences in open(file_): en_, fra_ = [normalizeString(s) for s in", "of range numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count)", "fra_cut_count += 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return fra_sents,", "UNK } self.idx = 4 def add(self, word): if self.word2idx.get(word) is None: self.word2idx[word]", "argparse import logging from utils import corpora2idx, normalizeString from const import * class", "[{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0:", "print(\"French data`s length out of range numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count)", "fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count max_len =", "= argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save processed data') parser.add_argument('--max-lenth', type=int,", "import torch import argparse import logging from utils import corpora2idx, normalizeString from const", "self._max_len - 2 src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid, _,", "tgt word counts - [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train = tgt_train self.src_valid =", "tgt_valid, _, _ = gather_file('data/test', max_len) print(\"English data`s length out of range numbers", "_ = gather_file('data/test', max_len) print(\"English data`s length out of range numbers - [{}]\".format(en_cut_count))", "def add(self, word): if self.word2idx.get(word) is None: self.word2idx[word] = self.idx self.idx += 1", "= {w: 0 for w in set(words)} for w in words: word_count[w]+=1 ignored_word_count", "for sent in sents for word in sent] word_count = {w: 0 for", "= self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0: print(\"Ignored src word counts - [{}]\".format(src_ignore))", "self.src_valid = src_valid self.tgt_valid = tgt_valid def save(self): data = { 'max_word_len': self._max_len,", "in fra_.strip().split()] if len(en_ws) > max_len: en_cut_count += 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]]", "tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0: print(\"Ignored src word counts -", "sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count to discard') args = parser.parse_args() corpus", "utils import corpora2idx, normalizeString from const import * class Dictionary(object): def __init__(self): self.word2idx", "tgt_ignore != 0: print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train", "> max_len: en_cut_count += 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]])", "self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx),", "%d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data = save_data self._max_len", "in open(file_): en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\\t')] en_ws = [word", "max_len): en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0 for sentences in", "corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self):", "None self.src_valid_sents = None self.tgt_valid_sents = None self.src_dict = Dictionary() self.tgt_dict = Dictionary()", "sentences.strip().split('\\t')] en_ws = [word for word in en_.strip().split()] fra_ws = [word for word", "corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max", "<= min_count: ignored_word_count += 1 continue self.add(word) return ignored_word_count def __len__(self): return self.idx", "parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save processed data') parser.add_argument('--max-lenth',", "ignored_word_count = 0 for word, count in word_count.items(): if count <= min_count: ignored_word_count", "return fra_sents, en_sents, fra_cut_count, en_cut_count max_len = self._max_len - 2 src_train, tgt_train, fra_cut_count,", "out of range numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train,", "= max_len self._min_word_count = min_word_count self.src_sents = None self.tgt_sents = None self.src_valid_sents =", "self.src_sents = None self.tgt_sents = None self.src_valid_sents = None self.tgt_valid_sents = None self.src_dict", "len(fra_ws) > max_len: fra_cut_count += 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws +", "+= 1 def __call__(self, sents, min_count): words = [word for sent in sents", "PAD, WORD[UNK]: UNK } self.idx = 4 def add(self, word): if self.word2idx.get(word) is", "src_valid self.tgt_valid = tgt_valid def save(self): data = { 'max_word_len': self._max_len, 'dict': {", "print('src corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def", "self.add(word) return ignored_word_count def __len__(self): return self.idx def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__,", "import * class Dictionary(object): def __init__(self): self.word2idx = { WORD[BOS]: BOS, WORD[EOS]: EOS,", "return self.idx def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self,", "length of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count to discard') args =", "None self.src_dict = Dictionary() self.tgt_dict = Dictionary() def parse(self): def gather_file(file_, max_len): en_sents,", "self.tgt_valid = tgt_valid def save(self): data = { 'max_word_len': self._max_len, 'dict': { 'src':", "__call__(self, sents, min_count): words = [word for sent in sents for word in", "0 for w in set(words)} for w in words: word_count[w]+=1 ignored_word_count = 0", "torch import argparse import logging from utils import corpora2idx, normalizeString from const import", "word_count.items(): if count <= min_count: ignored_word_count += 1 continue self.add(word) return ignored_word_count def", "= en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count +=", "default=20, help='max length of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count to discard')", "counts - [{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore))", "= None self.tgt_valid_sents = None self.src_dict = Dictionary() self.tgt_dict = Dictionary() def parse(self):", "self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train':", "self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0: print(\"Ignored src word", "word_count = {w: 0 for w in set(words)} for w in words: word_count[w]+=1", "def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0 for", "+ en_ws + [WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count += 1 fra_ws =", "type=str, default='data/seq2seq.pt', help='path to save processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max length of", "for w in set(words)} for w in words: word_count[w]+=1 ignored_word_count = 0 for", "corpora2idx, normalizeString from const import * class Dictionary(object): def __init__(self): self.word2idx = {", "\"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save processed data')", "min_word_count=1): self._save_data = save_data self._max_len = max_len self._min_word_count = min_word_count self.src_sents = None", "len(self.idx)) class Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data = save_data self._max_len =", "= %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data = save_data", "en_ws = [word for word in en_.strip().split()] fra_ws = [word for word in", "_, _ = gather_file('data/test', max_len) print(\"English data`s length out of range numbers -", "'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': {", "[word for word in fra_.strip().split()] if len(en_ws) > max_len: en_cut_count += 1 en_ws", "help='max length of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count to discard') args", "| target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save() if __name__", "processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max length of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min", "save processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max length of sentence') parser.add_argument('--min-word-count', type=int, default=1,", "sents for word in sent] word_count = {w: 0 for w in set(words)}", "{ 'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict)", "Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data = save_data self._max_len = max_len self._min_word_count", "sentences in open(file_): en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\\t')] en_ws =", "logging from utils import corpora2idx, normalizeString from const import * class Dictionary(object): def", "en_sents, fra_cut_count, en_cut_count max_len = self._max_len - 2 src_train, tgt_train, fra_cut_count, en_cut_count =", "max_len: en_cut_count += 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if", "if len(en_ws) > max_len: en_cut_count += 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws", "import logging from utils import corpora2idx, normalizeString from const import * class Dictionary(object):", "fra_ws = [word for word in fra_.strip().split()] if len(en_ws) > max_len: en_cut_count +=", "1 continue self.add(word) return ignored_word_count def __len__(self): return self.idx def __str__(self): return \"%s(size", "self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) },", "type=int, default=1, help='min corpora count to discard') args = parser.parse_args() corpus = Corpus(args.save_data,", "return ignored_word_count def __len__(self): return self.idx def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx))", "if len(fra_ws) > max_len: fra_cut_count += 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws", "> max_len: fra_cut_count += 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]])", "in set(words)} for w in words: word_count[w]+=1 ignored_word_count = 0 for word, count", "None self.tgt_sents = None self.src_valid_sents = None self.tgt_valid_sents = None self.src_dict = Dictionary()", "def __call__(self, sents, min_count): words = [word for sent in sents for word", "= Dictionary() self.tgt_dict = Dictionary() def parse(self): def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count,", "0 for sentences in open(file_): en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\\t')]", "self.src_valid_sents = None self.tgt_valid_sents = None self.src_dict = Dictionary() self.tgt_dict = Dictionary() def", "'tgt_size': len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid':", "default=1, help='min corpora count to discard') args = parser.parse_args() corpus = Corpus(args.save_data, args.max_lenth,", "+ [WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count += 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]]", "'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src", "ignored_word_count += 1 continue self.add(word) return ignored_word_count def __len__(self): return self.idx def __str__(self):", "print(\"Ignored src word counts - [{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored tgt word", "WORD[UNK]: UNK } self.idx = 4 def add(self, word): if self.word2idx.get(word) is None:", "} } torch.save(data, self._save_data) print('src corpora length - [{}] | target corpora length", "count in word_count.items(): if count <= min_count: ignored_word_count += 1 continue self.add(word) return", "en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count += 1 fra_ws", "en_cut_count += 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if len(fra_ws)", "save(self): data = { 'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt':", "argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save processed data') parser.add_argument('--max-lenth', type=int, default=20,", "self.src_dict = Dictionary() self.tgt_dict = Dictionary() def parse(self): def gather_file(file_, max_len): en_sents, fra_sents,", "{ 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src corpora", "ignored_word_count def __len__(self): return self.idx def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class", "+= 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if len(fra_ws) >", "self._min_word_count) if src_ignore != 0: print(\"Ignored src word counts - [{}]\".format(src_ignore)) if tgt_ignore", "in sent] word_count = {w: 0 for w in set(words)} for w in", "= src_valid self.tgt_valid = tgt_valid def save(self): data = { 'max_word_len': self._max_len, 'dict':", "numbers - [{}]\".format(en_cut_count)) print(\"French data`s length out of range numbers - [{}]\".format(fra_cut_count)) src_ignore", "'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) }", "normalizeString from const import * class Dictionary(object): def __init__(self): self.word2idx = { WORD[BOS]:", "- [{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore)) self.src_train", "= None self.tgt_sents = None self.src_valid_sents = None self.tgt_valid_sents = None self.src_dict =", "WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx = 4 def add(self, word): if self.word2idx.get(word)", "} self.idx = 4 def add(self, word): if self.word2idx.get(word) is None: self.word2idx[word] =", "'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt':", "for s in sentences.strip().split('\\t')] en_ws = [word for word in en_.strip().split()] fra_ws =", "self._max_len = max_len self._min_word_count = min_word_count self.src_sents = None self.tgt_sents = None self.src_valid_sents", "'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt':", "self.idx self.idx += 1 def __call__(self, sents, min_count): words = [word for sent", "process(self): self.parse() self.save() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str,", "for word, count in word_count.items(): if count <= min_count: ignored_word_count += 1 continue", "max_len) print(\"English data`s length out of range numbers - [{}]\".format(en_cut_count)) print(\"French data`s length", "self.save() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path", "fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0 for sentences in open(file_): en_,", "min_count: ignored_word_count += 1 continue self.add(word) return ignored_word_count def __len__(self): return self.idx def", "1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if len(fra_ws) > max_len:", "= [word for word in fra_.strip().split()] if len(en_ws) > max_len: en_cut_count += 1", "def save(self): data = { 'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict),", "tgt_valid def save(self): data = { 'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size':", "if count <= min_count: ignored_word_count += 1 continue self.add(word) return ignored_word_count def __len__(self):", "max_len = self._max_len - 2 src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid,", "for w in words: word_count[w]+=1 ignored_word_count = 0 for word, count in word_count.items():", "self.idx def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self, save_data,", "'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src corpora length - [{}] |", "word counts - [{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored tgt word counts -", "words: word_count[w]+=1 ignored_word_count = 0 for word, count in word_count.items(): if count <=", "4 def add(self, word): if self.word2idx.get(word) is None: self.word2idx[word] = self.idx self.idx +=", "[word for sent in sents for word in sent] word_count = {w: 0", "def __len__(self): return self.idx def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object):", "fra_ws + [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count max_len = self._max_len - 2", "length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save() if __name__ == \"__main__\": parser", "set(words)} for w in words: word_count[w]+=1 ignored_word_count = 0 for word, count in", "= gather_file('data/test', max_len) print(\"English data`s length out of range numbers - [{}]\".format(en_cut_count)) print(\"French", "def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data = save_data self._max_len = max_len self._min_word_count =", "is None: self.word2idx[word] = self.idx self.idx += 1 def __call__(self, sents, min_count): words", "from utils import corpora2idx, normalizeString from const import * class Dictionary(object): def __init__(self):", "src word counts - [{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored tgt word counts", "* class Dictionary(object): def __init__(self): self.word2idx = { WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]:", "[{}]\".format(en_cut_count)) print(\"French data`s length out of range numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train,", "fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count", "'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx)", "__init__(self, save_data, max_len=20, min_word_count=1): self._save_data = save_data self._max_len = max_len self._min_word_count = min_word_count", "if self.word2idx.get(word) is None: self.word2idx[word] = self.idx self.idx += 1 def __call__(self, sents,", "of range numbers - [{}]\".format(en_cut_count)) print(\"French data`s length out of range numbers -", "def process(self): self.parse() self.save() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data',", "= [word for word in en_.strip().split()] fra_ws = [word for word in fra_.strip().split()]", "BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx = 4 def add(self,", "word in sent] word_count = {w: 0 for w in set(words)} for w", "- [{}]\".format(en_cut_count)) print(\"French data`s length out of range numbers - [{}]\".format(fra_cut_count)) src_ignore =", "[{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save() if", "[word for word in en_.strip().split()] fra_ws = [word for word in fra_.strip().split()] if", "save_data self._max_len = max_len self._min_word_count = min_word_count self.src_sents = None self.tgt_sents = None", "self.word2idx[word] = self.idx self.idx += 1 def __call__(self, sents, min_count): words = [word", "count <= min_count: ignored_word_count += 1 continue self.add(word) return ignored_word_count def __len__(self): return", "self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0: print(\"Ignored src word counts - [{}]\".format(src_ignore)) if", "corpora count to discard') args = parser.parse_args() corpus = Corpus(args.save_data, args.max_lenth, args.min_word_count) corpus.process()", "self.word2idx.get(word) is None: self.word2idx[word] = self.idx self.idx += 1 def __call__(self, sents, min_count):", "word): if self.word2idx.get(word) is None: self.word2idx[word] = self.idx self.idx += 1 def __call__(self,", "help='min corpora count to discard') args = parser.parse_args() corpus = Corpus(args.save_data, args.max_lenth, args.min_word_count)", "[WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count += 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] +", "length out of range numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore =", "en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\\t')] en_ws = [word for word", "+ fra_ws + [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count max_len = self._max_len -", "s in sentences.strip().split('\\t')] en_ws = [word for word in en_.strip().split()] fra_ws = [word", "} torch.save(data, self._save_data) print('src corpora length - [{}] | target corpora length -", "self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src corpora length - [{}] | target corpora", "'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) },", "word, count in word_count.items(): if count <= min_count: ignored_word_count += 1 continue self.add(word)", "corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid,", "= None self.src_valid_sents = None self.tgt_valid_sents = None self.src_dict = Dictionary() self.tgt_dict =", "self.tgt_sents = None self.src_valid_sents = None self.tgt_valid_sents = None self.src_dict = Dictionary() self.tgt_dict", "src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0: print(\"Ignored", "print(\"English data`s length out of range numbers - [{}]\".format(en_cut_count)) print(\"French data`s length out", "EOS, WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx = 4 def add(self, word): if", "corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src corpora length - [{}] | target", "[{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train = tgt_train self.src_valid = src_valid self.tgt_valid = tgt_valid", "parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max length", "parser.add_argument('--max-lenth', type=int, default=20, help='max length of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count", "fra_.strip().split()] if len(en_ws) > max_len: en_cut_count += 1 en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] +", "+= 1 continue self.add(word) return ignored_word_count def __len__(self): return self.idx def __str__(self): return", "= self.idx self.idx += 1 def __call__(self, sents, min_count): words = [word for", "self.parse() self.save() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt',", "if tgt_ignore != 0: print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore)) self.src_train = src_train", "fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count max_len = self._max_len", "numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore", "- [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save()", "__str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1):", "add(self, word): if self.word2idx.get(word) is None: self.word2idx[word] = self.idx self.idx += 1 def", "const import * class Dictionary(object): def __init__(self): self.word2idx = { WORD[BOS]: BOS, WORD[EOS]:", "en_ws = en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count", "sent in sents for word in sent] word_count = {w: 0 for w", "= { 'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size':", "= fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count max_len", "= [], [], 0, 0 for sentences in open(file_): en_, fra_ = [normalizeString(s)", "1 def __call__(self, sents, min_count): words = [word for sent in sents for", "words = [word for sent in sents for word in sent] word_count =", "- 2 src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid, _, _", "print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train = tgt_train self.src_valid", "fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid, _, _ = gather_file('data/test', max_len) print(\"English", "max_len self._min_word_count = min_word_count self.src_sents = None self.tgt_sents = None self.src_valid_sents = None", "def __str__(self): return \"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self, save_data, max_len=20,", "fra_sents, en_sents, fra_cut_count, en_cut_count max_len = self._max_len - 2 src_train, tgt_train, fra_cut_count, en_cut_count", "tgt_train self.src_valid = src_valid self.tgt_valid = tgt_valid def save(self): data = { 'max_word_len':", "open(file_): en_, fra_ = [normalizeString(s) for s in sentences.strip().split('\\t')] en_ws = [word for", "self.tgt_valid_sents = None self.src_dict = Dictionary() self.tgt_dict = Dictionary() def parse(self): def gather_file(file_,", "for word in fra_.strip().split()] if len(en_ws) > max_len: en_cut_count += 1 en_ws =", "WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx = 4 def add(self, word):", "{ 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx),", "= self._max_len - 2 src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid,", "= [word for sent in sents for word in sent] word_count = {w:", "word counts - [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train = tgt_train self.src_valid = src_valid", "data') parser.add_argument('--max-lenth', type=int, default=20, help='max length of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora", "corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src corpora length -", "= gather_file('data/train', max_len) src_valid, tgt_valid, _, _ = gather_file('data/test', max_len) print(\"English data`s length", "}, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data)", "range numbers - [{}]\".format(en_cut_count)) print(\"French data`s length out of range numbers - [{}]\".format(fra_cut_count))", "counts - [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train = tgt_train self.src_valid = src_valid self.tgt_valid", "w in set(words)} for w in words: word_count[w]+=1 ignored_word_count = 0 for word,", "len(self.src_dict), 'tgt': self.tgt_dict.word2idx, 'tgt_size': len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train,", "self.word2idx = { WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx", "in sentences.strip().split('\\t')] en_ws = [word for word in en_.strip().split()] fra_ws = [word for", "0: print(\"Ignored tgt word counts - [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train = tgt_train", "= [normalizeString(s) for s in sentences.strip().split('\\t')] en_ws = [word for word in en_.strip().split()]", "en_cut_count max_len = self._max_len - 2 src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len)", "import corpora2idx, normalizeString from const import * class Dictionary(object): def __init__(self): self.word2idx =", "+= 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return fra_sents, en_sents,", "default='data/seq2seq.pt', help='path to save processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max length of sentence')", "self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src corpora length - [{}]", "= 0 for word, count in word_count.items(): if count <= min_count: ignored_word_count +=", "WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx = 4 def", "len(self.tgt_dict))) def process(self): self.parse() self.save() if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora')", "{ WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx = 4", "en_ws + [WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count += 1 fra_ws = fra_ws[:max_len]", "= { WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]: UNK } self.idx =", "[normalizeString(s) for s in sentences.strip().split('\\t')] en_ws = [word for word in en_.strip().split()] fra_ws", "type=int, default=20, help='max length of sentence') parser.add_argument('--min-word-count', type=int, default=1, help='min corpora count to", "torch.save(data, self._save_data) print('src corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict),", "= self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0: print(\"Ignored src", "self.idx = 4 def add(self, word): if self.word2idx.get(word) is None: self.word2idx[word] = self.idx", "class Dictionary(object): def __init__(self): self.word2idx = { WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD,", "en_.strip().split()] fra_ws = [word for word in fra_.strip().split()] if len(en_ws) > max_len: en_cut_count", "0 for word, count in word_count.items(): if count <= min_count: ignored_word_count += 1", "self.src_train = src_train self.tgt_train = tgt_train self.src_valid = src_valid self.tgt_valid = tgt_valid def", "+ [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count, en_cut_count max_len = self._max_len - 2 src_train,", "Dictionary() self.tgt_dict = Dictionary() def parse(self): def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count", "sent] word_count = {w: 0 for w in set(words)} for w in words:", "max_len: fra_cut_count += 1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return", "== \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to save processed", "to save processed data') parser.add_argument('--max-lenth', type=int, default=20, help='max length of sentence') parser.add_argument('--min-word-count', type=int,", "self.tgt_dict = Dictionary() def parse(self): def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count =", "len(self.tgt_dict) }, 'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': {", "[], [], 0, 0 for sentences in open(file_): en_, fra_ = [normalizeString(s) for", "self._save_data) print('src corpora length - [{}] | target corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict)))", "= save_data self._max_len = max_len self._min_word_count = min_word_count self.src_sents = None self.tgt_sents =", "- [{}]\".format(tgt_ignore)) self.src_train = src_train self.tgt_train = tgt_train self.src_valid = src_valid self.tgt_valid =", "'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data, self._save_data) print('src corpora length", "max_len) src_valid, tgt_valid, _, _ = gather_file('data/test', max_len) print(\"English data`s length out of", "for word in en_.strip().split()] fra_ws = [word for word in fra_.strip().split()] if len(en_ws)", "'train': { 'src': corpora2idx(self.src_train, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_train, self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid,", "None self.tgt_valid_sents = None self.src_dict = Dictionary() self.tgt_dict = Dictionary() def parse(self): def", "data`s length out of range numbers - [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore", "0: print(\"Ignored src word counts - [{}]\".format(src_ignore)) if tgt_ignore != 0: print(\"Ignored tgt", "class Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data = save_data self._max_len = max_len", "Dictionary(object): def __init__(self): self.word2idx = { WORD[BOS]: BOS, WORD[EOS]: EOS, WORD[PAD]: PAD, WORD[UNK]:", "= src_train self.tgt_train = tgt_train self.src_valid = src_valid self.tgt_valid = tgt_valid def save(self):", "= 4 def add(self, word): if self.word2idx.get(word) is None: self.word2idx[word] = self.idx self.idx", "{w: 0 for w in set(words)} for w in words: word_count[w]+=1 ignored_word_count =", "if __name__ == \"__main__\": parser = argparse.ArgumentParser(description='seq2sqe corpora') parser.add_argument('--save-data', type=str, default='data/seq2seq.pt', help='path to", "self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore != 0: print(\"Ignored src word counts", "from const import * class Dictionary(object): def __init__(self): self.word2idx = { WORD[BOS]: BOS,", "fra_ = [normalizeString(s) for s in sentences.strip().split('\\t')] en_ws = [word for word in", "fra_cut_count = [], [], 0, 0 for sentences in open(file_): en_, fra_ =", "in sents for word in sent] word_count = {w: 0 for w in", "en_ws[:max_len] en_sents.append([WORD[BOS]] + en_ws + [WORD[EOS]]) if len(fra_ws) > max_len: fra_cut_count += 1", "None: self.word2idx[word] = self.idx self.idx += 1 def __call__(self, sents, min_count): words =", "- [{}]\".format(fra_cut_count)) src_ignore = self.src_dict(src_train, self._min_word_count) tgt_ignore = self.tgt_dict(tgt_train, self._min_word_count) if src_ignore !=", "\"%s(size = %d)\".format(self.__class__.__name__, len(self.idx)) class Corpus(object): def __init__(self, save_data, max_len=20, min_word_count=1): self._save_data =", "fra_cut_count, en_cut_count max_len = self._max_len - 2 src_train, tgt_train, fra_cut_count, en_cut_count = gather_file('data/train',", "in word_count.items(): if count <= min_count: ignored_word_count += 1 continue self.add(word) return ignored_word_count", "if src_ignore != 0: print(\"Ignored src word counts - [{}]\".format(src_ignore)) if tgt_ignore !=", "gather_file('data/train', max_len) src_valid, tgt_valid, _, _ = gather_file('data/test', max_len) print(\"English data`s length out", "w in words: word_count[w]+=1 ignored_word_count = 0 for word, count in word_count.items(): if", "for word in sent] word_count = {w: 0 for w in set(words)} for", "- [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save() if __name__ == \"__main__\": parser =", "corpora length - [{}]'.format(len(self.src_dict), len(self.tgt_dict))) def process(self): self.parse() self.save() if __name__ == \"__main__\":", "Dictionary() def parse(self): def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count = [], [],", "[], 0, 0 for sentences in open(file_): en_, fra_ = [normalizeString(s) for s", "self._save_data = save_data self._max_len = max_len self._min_word_count = min_word_count self.src_sents = None self.tgt_sents", "in en_.strip().split()] fra_ws = [word for word in fra_.strip().split()] if len(en_ws) > max_len:", "def parse(self): def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0,", "parse(self): def gather_file(file_, max_len): en_sents, fra_sents, en_cut_count, fra_cut_count = [], [], 0, 0", "save_data, max_len=20, min_word_count=1): self._save_data = save_data self._max_len = max_len self._min_word_count = min_word_count self.src_sents", "1 fra_ws = fra_ws[:max_len] fra_sents.append([WORD[BOS]] + fra_ws + [WORD[EOS]]) return fra_sents, en_sents, fra_cut_count,", "import argparse import logging from utils import corpora2idx, normalizeString from const import *", "tgt_train, fra_cut_count, en_cut_count = gather_file('data/train', max_len) src_valid, tgt_valid, _, _ = gather_file('data/test', max_len)", "word in fra_.strip().split()] if len(en_ws) > max_len: en_cut_count += 1 en_ws = en_ws[:max_len]", "continue self.add(word) return ignored_word_count def __len__(self): return self.idx def __str__(self): return \"%s(size =", "data = { 'max_word_len': self._max_len, 'dict': { 'src': self.src_dict.word2idx, 'src_size': len(self.src_dict), 'tgt': self.tgt_dict.word2idx,", "self.tgt_dict.word2idx) }, 'valid': { 'src': corpora2idx(self.src_valid, self.src_dict.word2idx), 'tgt': corpora2idx(self.tgt_valid, self.tgt_dict.word2idx) } } torch.save(data," ]
[ "ImageDirManager(img_dir) if \"files\" not in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"]", "idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\") next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"]", "images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img =", "st.session_state[\"image_index\"] -= 1 else: st.warning('This is the first image.') def next_annotate_file(): image_index =", "idm = ImageDirManager(img_dir) if \"files\" not in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] =", "image_index = st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1 else:", "n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files)", "st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar: show status n_files = len(st.session_state[\"files\"]) n_annotate_files =", "= ImageManager(img_path) img = im.get_img() resized_img = im.resizing_img() resized_rects = im.get_resized_rects() rects =", "annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name)", "not in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else:", "if image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1 else: st.warning('This is the", "0 def next_image(): image_index = st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"]", "on_change=go_to_image, key=\"file\", ) col1, col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image) with", "next_image(): image_index = st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1", "on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate images img_file_name", "Main content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im =", "next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\")", "= os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img = im.get_img() resized_img = im.resizing_img() resized_rects", "col2 = st.columns(2) with col1: col1.image(prev_img[0]) with col2: default_index = 0 if prev_img[1]:", "im.set_annotation(i, select_label) if __name__ == \"__main__\": custom_labels = [\"\", \"dog\", \"cat\"] run(\"img_dir\", custom_labels)", "st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm =", "= st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images", "st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir,", "os from streamlit_img_label import st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir, labels):", "image.') def previous_image(): image_index = st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"] -= 1", "im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] +", "index=default_index ) im.set_annotation(i, select_label) if __name__ == \"__main__\": custom_labels = [\"\", \"dog\", \"cat\"]", "if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\") next_image() def go_to_image():", "idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0", "else: st.warning(\"All images are annotated.\") next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] =", "200)) col1, col2 = st.columns(2) with col1: col1.image(prev_img[0]) with col2: default_index = 0", "ImageManager(img_path) img = im.get_img() resized_img = im.resizing_img() resized_rects = im.get_resized_rects() rects = st_img_label(resized_img,", "st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\") next_image() def go_to_image(): file_index =", "next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\") next_image() def go_to_image(): file_index", "st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox(", "def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index)", "on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) #", "annotated.\") next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar: show", "on_click=annotate) preview_imgs = im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2", "if image_index > 0: st.session_state[\"image_index\"] -= 1 else: st.warning('This is the first image.')", "index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image)", "st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name", "select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if __name__ ==", "next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar: show status", "go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar: show status n_files =", "streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if", "idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def", "< len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1 else: st.warning('This is the last image.')", "labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if \"files\" not in st.session_state: st.session_state[\"files\"] =", "with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main", "on_click=refresh) # Main content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name)", "= im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0]", "1 else: st.warning('This is the last image.') def previous_image(): image_index = st.session_state[\"image_index\"] if", "if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200,", "idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\") next_image() def", "key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if __name__ == \"__main__\": custom_labels = [\"\", \"dog\",", "st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] =", "st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\",", "idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"]", "col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if __name__ == \"__main__\": custom_labels", "col1: col1.image(prev_img[0]) with col2: default_index = 0 if prev_img[1]: default_index = labels.index(prev_img[1]) select_label", "= idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image(): image_index = st.session_state[\"image_index\"]", "st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1,", "if \"files\" not in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] =", "col2: default_index = 0 if prev_img[1]: default_index = labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\",", "n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2 = st.sidebar.columns(2) with", "im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file()", "image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All", "col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image)", "def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]:", "0 if prev_img[1]: default_index = labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index", "> 0: st.session_state[\"image_index\"] -= 1 else: st.warning('This is the first image.') def next_annotate_file():", "= file_index # Sidebar: show status n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total", "= idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\") next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"])", "st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image(): image_index =", "default_index = labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label)", "annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image,", "image_index = st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"] -= 1 else: st.warning('This is", "key=\"file\", ) col1, col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image) with col2:", "for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2) with col1:", "with col2: default_index = 0 if prev_img[1]: default_index = labels.index(prev_img[1]) select_label = col2.selectbox(", "= col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if __name__ == \"__main__\":", ") im.set_annotation(i, select_label) if __name__ == \"__main__\": custom_labels = [\"\", \"dog\", \"cat\"] run(\"img_dir\",", "if prev_img[1]: default_index = labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index )", "image_index > 0: st.session_state[\"image_index\"] -= 1 else: st.warning('This is the first image.') def", "def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if \"files\" not in st.session_state:", "prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2) with col1: col1.image(prev_img[0]) with col2: default_index =", "image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate images", "st.warning('This is the first image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index)", "\"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if __name__ == \"__main__\": custom_labels =", "im = ImageManager(img_path) img = im.get_img() resized_img = im.resizing_img() resized_rects = im.get_resized_rects() rects", "import streamlit as st import os from streamlit_img_label import st_img_label from streamlit_img_label.manage import", "st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"])", "\".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs", "default_index = 0 if prev_img[1]: default_index = labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels,", "def next_image(): image_index = st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] +=", "= idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images are annotated.\") next_image()", "= 0 def next_image(): image_index = st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) - 1:", "prev_img[1]: default_index = labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i,", "streamlit_img_label import st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False)", "images are annotated.\") next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index #", "def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar: show status n_files", "= im.resizing_img() resized_rects = im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation()", "= st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar: show status n_files = len(st.session_state[\"files\"]) n_annotate_files", "run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if \"files\" not in st.session_state: st.session_state[\"files\"]", "= len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files -", "n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", )", "rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200))", "col1.image(prev_img[0]) with col2: default_index = 0 if prev_img[1]: default_index = labels.index(prev_img[1]) select_label =", "st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"] -= 1 else: st.warning('This is the first", "image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects)", "idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image(): image_index = st.session_state[\"image_index\"] if", "else: st.warning('This is the last image.') def previous_image(): image_index = st.session_state[\"image_index\"] if image_index", "import os from streamlit_img_label import st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir,", "st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1,", "st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] =", "first image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"]", "else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] =", "- n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2 = st.sidebar.columns(2)", "enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2) with col1: col1.image(prev_img[0]) with col2: default_index", "st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if \"files\" not in st.session_state: st.session_state[\"files\"] = idm.get_all_files()", "= 0 if prev_img[1]: default_index = labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\",", "def previous_image(): image_index = st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"] -= 1 else:", "is the first image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if", "need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"])", "import ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if \"files\"", "streamlit as st import os from streamlit_img_label import st_img_label from streamlit_img_label.manage import ImageManager,", "ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if \"files\" not in", "annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path", "-= 1 else: st.warning('This is the first image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"]", "idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img = im.get_img() resized_img =", "image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] =", "st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1 else: st.warning('This is", "im.get_img() resized_img = im.resizing_img() resized_rects = im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def", "= st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1 else: st.warning('This", "# Sidebar: show status n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files)", "st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous image\",", "col1: st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file)", "st.warning('This is the last image.') def previous_image(): image_index = st.session_state[\"image_index\"] if image_index >", "= im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2)", "rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not in", "= img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects:", "resized_rects = im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name =", "box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not", "image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if", "in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for i,", "= st.columns(2) with col1: col1.image(prev_img[0]) with col2: default_index = 0 if prev_img[1]: default_index", "im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2) with", "the last image.') def previous_image(): image_index = st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"]", "+= 1 else: st.warning('This is the last image.') def previous_image(): image_index = st.session_state[\"image_index\"]", "st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else: st.warning(\"All images are", "previous_image(): image_index = st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"] -= 1 else: st.warning('This", "Sidebar: show status n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total", "= 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files()", "st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate", "from streamlit_img_label import st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\",", "files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\",", "= st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\" if", "= st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next", "st.warning(\"All images are annotated.\") next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index", "st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"],", "files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\",", "image\", on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh)", "= idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh():", "def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image():", "\"files\" not in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0", "annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img", "i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2) with col1: col1.image(prev_img[0])", "prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2) with col1: col1.image(prev_img[0]) with", "on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path =", "as st import os from streamlit_img_label import st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager", "= labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if", "are annotated.\") next_image() def go_to_image(): file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar:", "if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs =", "= im.get_img() resized_img = im.resizing_img() resized_rects = im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects)", "os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img = im.get_img() resized_img = im.resizing_img() resized_rects =", "len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\",", "img_file_name.split(\".\")[0] + \".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\",", "st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for i, prev_img in", "ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir) if \"files\" not", "0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"]", "\"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous", "st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content: annotate images img_file_name =", "idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"]", "0: st.session_state[\"image_index\"] -= 1 else: st.warning('This is the first image.') def next_annotate_file(): image_index", "file_index # Sidebar: show status n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\",", "img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img = im.get_img()", "im.resizing_img() resized_rects = im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name", "labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if __name__ == \"__main__\": custom_labels = [\"\",", "labels.index(prev_img[1]) select_label = col2.selectbox( \"Label\", labels, key=f\"label_{i}\", index=default_index ) im.set_annotation(i, select_label) if __name__", "st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image(): image_index = st.session_state[\"image_index\"] if image_index", "# Main content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im", "+ \".xml\" if image_annotate_file_name not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate)", "st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2 = st.sidebar.columns(2) with col1:", "next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs):", "show status n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate", "is the last image.') def previous_image(): image_index = st.session_state[\"image_index\"] if image_index > 0:", "col1, col2 = st.columns(2) with col1: col1.image(prev_img[0]) with col2: default_index = 0 if", "img_file_name) im = ImageManager(img_path) img = im.get_img() resized_img = im.resizing_img() resized_rects = im.get_resized_rects()", "rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate(): im.save_annotation() image_annotate_file_name = img_file_name.split(\".\")[0] + \".xml\"", "st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def", "file_index = st.session_state[\"files\"].index(st.session_state[\"file\"]) st.session_state[\"image_index\"] = file_index # Sidebar: show status n_files = len(st.session_state[\"files\"])", "with col1: col1.image(prev_img[0]) with col2: default_index = 0 if prev_img[1]: default_index = labels.index(prev_img[1])", "refresh(): st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image(): image_index", "import st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm", "not in st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for", "st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need", "n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2 =", "n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files", "st.session_state[\"image_index\"] += 1 else: st.warning('This is the last image.') def previous_image(): image_index =", "the first image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index:", "False) idm = ImageDirManager(img_dir) if \"files\" not in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"]", "= ImageDirManager(img_dir) if \"files\" not in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files()", "n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"],", "idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image(): image_index = st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"])", "last image.') def previous_image(): image_index = st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"] -=", "content: annotate images img_file_name = idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im = ImageManager(img_path)", "resized_img = im.resizing_img() resized_rects = im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\", rects=resized_rects) def annotate():", "- 1: st.session_state[\"image_index\"] += 1 else: st.warning('This is the last image.') def previous_image():", "st.session_state[\"annotation_files\"]: st.session_state[\"annotation_files\"].append(image_annotate_file_name) next_annotate_file() if rects: st.button(label=\"Save\", on_click=annotate) preview_imgs = im.init_annotation(rects) for i, prev_img", "st.columns(2) with col1: col1.image(prev_img[0]) with col2: default_index = 0 if prev_img[1]: default_index =", "status n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\",", "st.session_state[\"image_index\"] = 0 def next_image(): image_index = st.session_state[\"image_index\"] if image_index < len(st.session_state[\"files\"]) -", "image_index < len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1 else: st.warning('This is the last", "len(st.session_state[\"files\"]) - 1: st.session_state[\"image_index\"] += 1 else: st.warning('This is the last image.') def", "1: st.session_state[\"image_index\"] += 1 else: st.warning('This is the last image.') def previous_image(): image_index", ") col1, col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next", "else: st.warning('This is the first image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index =", "files:\", n_files - n_annotate_files) st.sidebar.selectbox( \"Files\", st.session_state[\"files\"], index=st.session_state[\"image_index\"], on_change=go_to_image, key=\"file\", ) col1, col2", "col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\", on_click=next_annotate_file) st.sidebar.button(label=\"Refresh\", on_click=refresh) # Main content:", "img = im.get_img() resized_img = im.resizing_img() resized_rects = im.get_resized_rects() rects = st_img_label(resized_img, box_color=\"red\",", "preview_imgs = im.init_annotation(rects) for i, prev_img in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 =", "= len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining", "= idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 def next_image(): image_index = st.session_state[\"image_index\"] if image_index <", "= idm.get_image(st.session_state[\"image_index\"]) img_path = os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img = im.get_img() resized_img", "st import os from streamlit_img_label import st_img_label from streamlit_img_label.manage import ImageManager, ImageDirManager def", "1 else: st.warning('This is the first image.') def next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index", "img_path = os.path.join(img_dir, img_file_name) im = ImageManager(img_path) img = im.get_img() resized_img = im.resizing_img()", "in enumerate(preview_imgs): prev_img[0].thumbnail((200, 200)) col1, col2 = st.columns(2) with col1: col1.image(prev_img[0]) with col2:", "from streamlit_img_label.manage import ImageManager, ImageDirManager def run(img_dir, labels): st.set_option(\"deprecation.showfileUploaderEncoding\", False) idm = ImageDirManager(img_dir)", "in st.session_state: st.session_state[\"files\"] = idm.get_all_files() st.session_state[\"annotation_files\"] = idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"])", "len(st.session_state[\"annotation_files\"]) st.sidebar.write(\"Total files:\", n_files) st.sidebar.write(\"Total annotate files:\", n_annotate_files) st.sidebar.write(\"Remaining files:\", n_files - n_annotate_files)", "= st.session_state[\"image_index\"] if image_index > 0: st.session_state[\"image_index\"] -= 1 else: st.warning('This is the", "with col1: st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next image\", on_click=next_image) st.sidebar.button(label=\"Next need annotate\",", "col1, col2 = st.sidebar.columns(2) with col1: st.button(label=\"Previous image\", on_click=previous_image) with col2: st.button(label=\"Next image\",", "st.session_state[\"image_index\"] = file_index # Sidebar: show status n_files = len(st.session_state[\"files\"]) n_annotate_files = len(st.session_state[\"annotation_files\"])", "next_annotate_file(): image_index = st.session_state[\"image_index\"] next_image_index = idm.get_next_annotation_image(image_index) if next_image_index: st.session_state[\"image_index\"] = idm.get_next_annotation_image(image_index) else:", "= idm.get_exist_annotation_files() st.session_state[\"image_index\"] = 0 else: idm.set_all_files(st.session_state[\"files\"]) idm.set_annotation_files(st.session_state[\"annotation_files\"]) def refresh(): st.session_state[\"files\"] = idm.get_all_files()" ]
[ "for minimally 3-connected graph generation. This program requires cython. \"\"\" import pyximport pyximport.install(language_level=3)", "Library routines for minimally 3-connected graph generation. This program requires cython. \"\"\" import", "routines for minimally 3-connected graph generation. This program requires cython. \"\"\" import pyximport", "r\"\"\" Library routines for minimally 3-connected graph generation. This program requires cython. \"\"\"" ]
[]
[ "request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000)", "if request_num != returned_request: raise RuntimeError(\"Incorrect result ID returned!\") return_count = data_reader.readInt() approximate_matches", "and smiles.lower() not in ('quit', 'exit'): return_count = 20 similarity_cutoff = 0 fp_binary,", "result ID returned!\") return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64() for i in range(return_count):", "smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles = input(\"Smiles: \") if __name__", "range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning {1}\".format( approximate_matches, return_count)) for cid, smi,", "QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount = 1 dbname = args.dbname dbkey =", "= args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not in ('quit',", "file containing fingerprint \" \"data to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim", "returned_request: raise RuntimeError(\"Incorrect result ID returned!\") return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64() for", "= argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run an HTTP server that loads fingerprint", "QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds", "= [] data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num != returned_request: raise", "smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode())", "= QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff)", "output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0,", "returned_request = data_reader.readInt() if request_num != returned_request: raise RuntimeError(\"Incorrect result ID returned!\") return_count", "{1}\".format( approximate_matches, return_count)) for cid, smi, score in zip(ids, smiles, scores): print(\"{0} {1}:", "\") dbcount = 1 dbname = args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles", "return_count = 20 similarity_cutoff = 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary)", "\" \"data to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\") return parser.parse_args()", "output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count)", "QtCore, QtNetwork import random from gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import argparse parser", "gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server -", "QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount = 1 dbname =", "range(return_count): smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate", "def parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run an", "('quit', 'exit'): return_count = 20 similarity_cutoff = 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba", "GPU and \" #noqa \"responds to queries to find most similar fingperints.\") #noqa", "parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\") return parser.parse_args() def main(): args = parse_args()", "random from gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim", "parser.parse_args() def main(): args = parse_args() app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles", "args = parse_args() app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \")", "that loads fingerprint data onto GPU and \" #noqa \"responds to queries to", "help=\".fsim file containing fingerprint \" \"data to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for", "searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\") return parser.parse_args() def main(): args =", "= 1 dbname = args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower()", "'exit'): return_count = 20 similarity_cutoff = 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba =", "scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning {1}\".format( approximate_matches, return_count)) for cid, smi, score", "socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles = [] scores = [] ids =", "QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num != returned_request: raise RuntimeError(\"Incorrect result ID returned!\")", "smiles = [] scores = [] ids = [] data_reader = QtCore.QDataStream(output_qba) returned_request", "zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles = input(\"Smiles: \") if", "argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run an HTTP server that loads fingerprint data", "ids = [] data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num != returned_request:", "an HTTP server that loads fingerprint data onto GPU and \" #noqa \"responds", "return parser.parse_args() def main(): args = parse_args() app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app)", "= 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds", "from PyQt5 import QtCore, QtNetwork import random from gpusim_utils import smiles_to_fingerprint_bin def parse_args():", "_ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly)", "scores = [] ids = [] data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if", "!= returned_request: raise RuntimeError(\"Incorrect result ID returned!\") return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64()", "argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run an HTTP server that", "smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total", "{0}, returning {1}\".format( approximate_matches, return_count)) for cid, smi, score in zip(ids, smiles, scores):", "RuntimeError(\"Incorrect result ID returned!\") return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64() for i in", "print(\"Approximate total matches: {0}, returning {1}\".format( approximate_matches, return_count)) for cid, smi, score in", "total matches: {0}, returning {1}\".format( approximate_matches, return_count)) for cid, smi, score in zip(ids,", "import smiles_to_fingerprint_bin def parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server - \"", "find most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint \" \"data to", "socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount = 1 dbname = args.dbname", "#noqa \"responds to queries to find most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file", "similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint \" \"data to be searched\")", "smiles and smiles.lower() not in ('quit', 'exit'): return_count = 20 similarity_cutoff = 0", "for fsim file\") return parser.parse_args() def main(): args = parse_args() app = QtCore.QCoreApplication([])", "main(): args = parse_args() app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles:", "= data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString()) for", "\" #noqa \"responds to queries to find most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim", "0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds =", "2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll()", "data onto GPU and \" #noqa \"responds to queries to find most similar", "[] scores = [] ids = [] data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt()", "for cid, smi, score in zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi, score))", "smi, score in zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles =", "= QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31)", "= smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount)", "output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles = []", "return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString()) for i", "GPUSim Server - \" \"run an HTTP server that loads fingerprint data onto", "socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles = [] scores = [] ids = []", "and \" #noqa \"responds to queries to find most similar fingperints.\") #noqa parser.add_argument('dbname',", "not in ('quit', 'exit'): return_count = 20 similarity_cutoff = 0 fp_binary, _ =", "args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not in ('quit', 'exit'):", "dbname = args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not in", "from gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server", "[] ids = [] data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num !=", "for i in range(return_count): ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches:", "- \" \"run an HTTP server that loads fingerprint data onto GPU and", "def main(): args = parse_args() app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles =", "approximate_matches = data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString())", "fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles = [] scores = []", "approximate_matches, return_count)) for cid, smi, score in zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid,", "1 dbname = args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not", "to find most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint \" \"data", "fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint \" \"data to be searched\") parser.add_argument('dbkey',", "file\") return parser.parse_args() def main(): args = parse_args() app = QtCore.QCoreApplication([]) socket =", "in range(return_count): smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat())", "parser = argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run an HTTP server that loads", "= [] scores = [] ids = [] data_reader = QtCore.QDataStream(output_qba) returned_request =", "output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba", "parser.add_argument('dbname', help=\".fsim file containing fingerprint \" \"data to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key", "output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush()", "for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning {1}\".format( approximate_matches, return_count))", "to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\") return parser.parse_args() def main():", "matches: {0}, returning {1}\".format( approximate_matches, return_count)) for cid, smi, score in zip(ids, smiles,", "socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles = [] scores = [] ids", "i in range(return_count): ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0},", "dbcount = 1 dbname = args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles and", "ID returned!\") return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString())", "smiles_to_fingerprint_bin def parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run", "loads fingerprint data onto GPU and \" #noqa \"responds to queries to find", "queries to find most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint \"", "similarity_cutoff = 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray()", "request_num != returned_request: raise RuntimeError(\"Incorrect result ID returned!\") return_count = data_reader.readInt() approximate_matches =", "onto GPU and \" #noqa \"responds to queries to find most similar fingperints.\")", "= QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num", "print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles = input(\"Smiles: \") if __name__ == '__main__':", "args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not in ('quit', 'exit'): return_count = 20", "= socket.readAll() smiles = [] scores = [] ids = [] data_reader =", "data_reader.readInt() approximate_matches = data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString()) for i in range(return_count):", "ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning {1}\".format( approximate_matches,", "smiles.lower() not in ('quit', 'exit'): return_count = 20 similarity_cutoff = 0 fp_binary, _", "output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles =", "i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning {1}\".format( approximate_matches, return_count)) for", "to queries to find most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint", "[] data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num != returned_request: raise RuntimeError(\"Incorrect", "most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint \" \"data to be", "fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode())", "be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\") return parser.parse_args() def main(): args", "QtNetwork import random from gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import argparse parser =", "returning {1}\".format( approximate_matches, return_count)) for cid, smi, score in zip(ids, smiles, scores): print(\"{0}", "i in range(return_count): smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString()) for i in range(return_count):", "import QtCore, QtNetwork import random from gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import argparse", "output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba)", "help=\"Key for fsim file\") return parser.parse_args() def main(): args = parse_args() app =", "in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning {1}\".format( approximate_matches, return_count)) for cid,", "\"responds to queries to find most similar fingperints.\") #noqa parser.add_argument('dbname', help=\".fsim file containing", "data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString()) for i", "output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles", "socket.readAll() smiles = [] scores = [] ids = [] data_reader = QtCore.QDataStream(output_qba)", "for i in range(return_count): smiles.append(data_reader.readString()) for i in range(return_count): ids.append(data_reader.readString()) for i in", "data_reader.readInt() if request_num != returned_request: raise RuntimeError(\"Incorrect result ID returned!\") return_count = data_reader.readInt()", "parse_args() app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount =", "output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles = [] scores", "fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba,", "= random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba", "\"data to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\") return parser.parse_args() def", "score in zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles = input(\"Smiles:", "fingerprint \" \"data to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\") return", "containing fingerprint \" \"data to be searched\") parser.add_argument('dbkey', default=\"\", help=\"Key for fsim file\")", "range(return_count): ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning {1}\".format(", "parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run an HTTP", "QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds <<", "while smiles and smiles.lower() not in ('quit', 'exit'): return_count = 20 similarity_cutoff =", "\" \"run an HTTP server that loads fingerprint data onto GPU and \"", "server that loads fingerprint data onto GPU and \" #noqa \"responds to queries", "<< fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba = socket.readAll() smiles = [] scores =", "default=\"\", help=\"Key for fsim file\") return parser.parse_args() def main(): args = parse_args() app", "data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num != returned_request: raise RuntimeError(\"Incorrect result", "in range(return_count): ids.append(data_reader.readString()) for i in range(return_count): scores.append(data_reader.readFloat()) print(\"Approximate total matches: {0}, returning", "{1}: {2}\".format(cid, smi, score)) smiles = input(\"Smiles: \") if __name__ == '__main__': main()", "import random from gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import argparse parser = argparse.ArgumentParser(description=\"Sample", "returned!\") return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString()) for", "scores): print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles = input(\"Smiles: \") if __name__ ==", "smiles = input(\"Smiles: \") dbcount = 1 dbname = args.dbname dbkey = args.dbkey", "20 similarity_cutoff = 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba =", "random.randint(0, 2**31) output_qds.writeInt(request_num) output_qds.writeInt(return_count) output_qds.writeFloat(similarity_cutoff) output_qds << fp_qba socket.write(output_qba) socket.flush() socket.waitForReadyRead(30000) output_qba =", "\"run an HTTP server that loads fingerprint data onto GPU and \" #noqa", "app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount = 1", "fingerprint data onto GPU and \" #noqa \"responds to queries to find most", "QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num = random.randint(0, 2**31) output_qds.writeInt(request_num)", "Server - \" \"run an HTTP server that loads fingerprint data onto GPU", "= [] ids = [] data_reader = QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num", "cid, smi, score in zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles", "in zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi, score)) smiles = input(\"Smiles: \")", "QtCore.QByteArray(fp_binary) output_qba = QtCore.QByteArray() output_qds = QtCore.QDataStream(output_qba, QtCore.QIODevice.WriteOnly) output_qds.writeInt(dbcount) output_qds.writeString(dbname.encode()) output_qds.writeString(dbkey.encode()) request_num =", "#noqa parser.add_argument('dbname', help=\".fsim file containing fingerprint \" \"data to be searched\") parser.add_argument('dbkey', default=\"\",", "socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not in ('quit', 'exit'): return_count = 20 similarity_cutoff", "= QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount = 1 dbname = args.dbname dbkey", "PyQt5 import QtCore, QtNetwork import random from gpusim_utils import smiles_to_fingerprint_bin def parse_args(): import", "input(\"Smiles: \") dbcount = 1 dbname = args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity') while", "= parse_args() app = QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount", "HTTP server that loads fingerprint data onto GPU and \" #noqa \"responds to", "import argparse parser = argparse.ArgumentParser(description=\"Sample GPUSim Server - \" \"run an HTTP server", "= QtCore.QCoreApplication([]) socket = QtNetwork.QLocalSocket(app) smiles = input(\"Smiles: \") dbcount = 1 dbname", "= input(\"Smiles: \") dbcount = 1 dbname = args.dbname dbkey = args.dbkey socket.connectToServer('gpusimilarity')", "= args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not in ('quit', 'exit'): return_count =", "raise RuntimeError(\"Incorrect result ID returned!\") return_count = data_reader.readInt() approximate_matches = data_reader.readUInt64() for i", "in ('quit', 'exit'): return_count = 20 similarity_cutoff = 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles)", "= 20 similarity_cutoff = 0 fp_binary, _ = smiles_to_fingerprint_bin(smiles) fp_qba = QtCore.QByteArray(fp_binary) output_qba", "output_qba = socket.readAll() smiles = [] scores = [] ids = [] data_reader", "fsim file\") return parser.parse_args() def main(): args = parse_args() app = QtCore.QCoreApplication([]) socket", "return_count)) for cid, smi, score in zip(ids, smiles, scores): print(\"{0} {1}: {2}\".format(cid, smi,", "dbkey = args.dbkey socket.connectToServer('gpusimilarity') while smiles and smiles.lower() not in ('quit', 'exit'): return_count", "= data_reader.readInt() if request_num != returned_request: raise RuntimeError(\"Incorrect result ID returned!\") return_count =", "= data_reader.readInt() approximate_matches = data_reader.readUInt64() for i in range(return_count): smiles.append(data_reader.readString()) for i in", "= QtCore.QDataStream(output_qba) returned_request = data_reader.readInt() if request_num != returned_request: raise RuntimeError(\"Incorrect result ID" ]
[ "expiry: str = '' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username username: str = ''", "user ID user_id: str = '' # Associated e-mail address email: str =", "abc import ABC from dataclasses import dataclass, field from typing import List @dataclass", "str = '' # OAuth2 refresh token refresh_token: str = '' # OAuth2", "@dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id: str = '' # Associated", "str = '' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username username: str = '' #", "# Username username: str = '' # Token password: str = '' #", "dataclasses import dataclass, field from typing import List @dataclass class LearningEnvCredentials(ABC): pass @dataclass", "= '' # OAuth2 scopes scopes: List[str] = field(default_factory=list) # OpenID token id_token:", "# OAuth2 client secret client_secret: str = '' # OAuth2 scopes scopes: List[str]", "typing import List @dataclass class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user", "# OAuth2 access token token: str = '' # OAuth2 refresh token refresh_token:", "= '' # OAuth2 access token token: str = '' # OAuth2 refresh", "# OAuth2 scopes scopes: List[str] = field(default_factory=list) # OpenID token id_token: str =", "@dataclass class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id: str", "str = '' # OAuth2 client ID client_id: str = '' # OAuth2", "str = '' # Associated e-mail address email: str = '' # OAuth2", "LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id: str = ''", "# OAuth2 token URI token_uri: str = '' # OAuth2 client ID client_id:", "token: str = '' # OAuth2 refresh token refresh_token: str = '' #", "field(default_factory=list) # OpenID token id_token: str = '' # OAuth2 expiry expiry: str", "'' # OAuth2 client secret client_secret: str = '' # OAuth2 scopes scopes:", "field from typing import List @dataclass class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): #", "client_id: str = '' # OAuth2 client secret client_secret: str = '' #", "'' # OAuth2 scopes scopes: List[str] = field(default_factory=list) # OpenID token id_token: str", "'' # OAuth2 refresh token refresh_token: str = '' # OAuth2 token URI", "client_secret: str = '' # OAuth2 scopes scopes: List[str] = field(default_factory=list) # OpenID", "class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id: str =", "address email: str = '' # OAuth2 access token token: str = ''", "ID client_id: str = '' # OAuth2 client secret client_secret: str = ''", "# Token password: str = '' # Moodle server URL server: str =", "import dataclass, field from typing import List @dataclass class LearningEnvCredentials(ABC): pass @dataclass class", "= '' # OAuth2 expiry expiry: str = '' @dataclass class MoodleCredentials(LearningEnvCredentials): #", "refresh_token: str = '' # OAuth2 token URI token_uri: str = '' #", "OAuth2 scopes scopes: List[str] = field(default_factory=list) # OpenID token id_token: str = ''", "OAuth2 token URI token_uri: str = '' # OAuth2 client ID client_id: str", "OAuth2 client secret client_secret: str = '' # OAuth2 scopes scopes: List[str] =", "Token password: str = '' # Moodle server URL server: str = ''", "secret client_secret: str = '' # OAuth2 scopes scopes: List[str] = field(default_factory=list) #", "str = '' # OAuth2 token URI token_uri: str = '' # OAuth2", "from typing import List @dataclass class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated", "dataclass, field from typing import List @dataclass class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials):", "<gh_stars>0 from abc import ABC from dataclasses import dataclass, field from typing import", "# OAuth2 client ID client_id: str = '' # OAuth2 client secret client_secret:", "# Associated e-mail address email: str = '' # OAuth2 access token token:", "token token: str = '' # OAuth2 refresh token refresh_token: str = ''", "from dataclasses import dataclass, field from typing import List @dataclass class LearningEnvCredentials(ABC): pass", "scopes: List[str] = field(default_factory=list) # OpenID token id_token: str = '' # OAuth2", "str = '' # OAuth2 client secret client_secret: str = '' # OAuth2", "OAuth2 access token token: str = '' # OAuth2 refresh token refresh_token: str", "token URI token_uri: str = '' # OAuth2 client ID client_id: str =", "MoodleCredentials(LearningEnvCredentials): # Username username: str = '' # Token password: str = ''", "user_id: str = '' # Associated e-mail address email: str = '' #", "Associated e-mail address email: str = '' # OAuth2 access token token: str", "str = '' # OAuth2 access token token: str = '' # OAuth2", "'' # OAuth2 token URI token_uri: str = '' # OAuth2 client ID", "= '' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username username: str = '' # Token", "'' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username username: str = '' # Token password:", "from abc import ABC from dataclasses import dataclass, field from typing import List", "import ABC from dataclasses import dataclass, field from typing import List @dataclass class", "token refresh_token: str = '' # OAuth2 token URI token_uri: str = ''", "# OAuth2 refresh token refresh_token: str = '' # OAuth2 token URI token_uri:", "List @dataclass class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id:", "OAuth2 refresh token refresh_token: str = '' # OAuth2 token URI token_uri: str", "= '' # OAuth2 token URI token_uri: str = '' # OAuth2 client", "pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id: str = '' #", "class MoodleCredentials(LearningEnvCredentials): # Username username: str = '' # Token password: str =", "GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id: str = '' # Associated e-mail address", "'' # OAuth2 access token token: str = '' # OAuth2 refresh token", "str = '' # Token password: str = '' # Moodle server URL", "token id_token: str = '' # OAuth2 expiry expiry: str = '' @dataclass", "URI token_uri: str = '' # OAuth2 client ID client_id: str = ''", "'' # Token password: str = '' # Moodle server URL server: str", "Username username: str = '' # Token password: str = '' # Moodle", "id_token: str = '' # OAuth2 expiry expiry: str = '' @dataclass class", "OpenID token id_token: str = '' # OAuth2 expiry expiry: str = ''", "List[str] = field(default_factory=list) # OpenID token id_token: str = '' # OAuth2 expiry", "scopes scopes: List[str] = field(default_factory=list) # OpenID token id_token: str = '' #", "# OpenID token id_token: str = '' # OAuth2 expiry expiry: str =", "= '' # Associated e-mail address email: str = '' # OAuth2 access", "= '' # OAuth2 client ID client_id: str = '' # OAuth2 client", "= '' # Token password: str = '' # Moodle server URL server:", "e-mail address email: str = '' # OAuth2 access token token: str =", "# Associated user ID user_id: str = '' # Associated e-mail address email:", "= '' # OAuth2 refresh token refresh_token: str = '' # OAuth2 token", "client secret client_secret: str = '' # OAuth2 scopes scopes: List[str] = field(default_factory=list)", "str = '' # OAuth2 scopes scopes: List[str] = field(default_factory=list) # OpenID token", "str = '' # OAuth2 expiry expiry: str = '' @dataclass class MoodleCredentials(LearningEnvCredentials):", "email: str = '' # OAuth2 access token token: str = '' #", "OAuth2 expiry expiry: str = '' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username username: str", "token_uri: str = '' # OAuth2 client ID client_id: str = '' #", "# OAuth2 expiry expiry: str = '' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username username:", "= field(default_factory=list) # OpenID token id_token: str = '' # OAuth2 expiry expiry:", "OAuth2 client ID client_id: str = '' # OAuth2 client secret client_secret: str", "import List @dataclass class LearningEnvCredentials(ABC): pass @dataclass class GoogleCredentials(LearningEnvCredentials): # Associated user ID", "ID user_id: str = '' # Associated e-mail address email: str = ''", "ABC from dataclasses import dataclass, field from typing import List @dataclass class LearningEnvCredentials(ABC):", "Associated user ID user_id: str = '' # Associated e-mail address email: str", "@dataclass class MoodleCredentials(LearningEnvCredentials): # Username username: str = '' # Token password: str", "'' # OAuth2 expiry expiry: str = '' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username", "= '' # OAuth2 client secret client_secret: str = '' # OAuth2 scopes", "refresh token refresh_token: str = '' # OAuth2 token URI token_uri: str =", "'' # Associated e-mail address email: str = '' # OAuth2 access token", "'' # OAuth2 client ID client_id: str = '' # OAuth2 client secret", "username: str = '' # Token password: str = '' # Moodle server", "expiry expiry: str = '' @dataclass class MoodleCredentials(LearningEnvCredentials): # Username username: str =", "class GoogleCredentials(LearningEnvCredentials): # Associated user ID user_id: str = '' # Associated e-mail", "access token token: str = '' # OAuth2 refresh token refresh_token: str =", "client ID client_id: str = '' # OAuth2 client secret client_secret: str =" ]
[ "== 12: if now.hour == 23: # if it's right before 12AM in", "12AM in December, use tomorrow as the default date # because it's almost", "f\"$EDITOR {script_file}\", shell=True, ) def _get_year() -> int: east = \"US/Eastern\" now =", "aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def new(year: str,", "default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created script {script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen(", "date in the future\") else: raise RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True)", "day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir = script.path.parent if script_dir.parent.exists() and not overwrite:", "default=_get_year()) if not day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created", "if now.hour == 23: # if it's right before 12AM in December, use", "year = click.prompt(f\"Year\", default=_get_year()) if not day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file =", "pendulum.now(tz=east) if now.month == 12: if now.hour == 23: # if it's right", "= click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created script {script_file}!\") if \"EDITOR\" in", "12: if now.hour == 23: # if it's right before 12AM in December,", "future\") else: raise RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG", "pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def new(year: str, day: str):", "return \"1\" else: return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1) def _new_script(year:", "Script.from_year_day(year=year, day=day) script_dir = script.path.parent if script_dir.parent.exists() and not overwrite: if pendulum.now() >", "now.hour == 23: # if it's right before 12AM in December, use tomorrow", "tomorrow as the default date # because it's almost AOC time return pendulum.tomorrow(east).year", "script = Script.from_year_day(year=year, day=day) script_dir = script.path.parent if script_dir.parent.exists() and not overwrite: if", "the default date because # you probably want to do yesteray's date return", "_get_day(year: str) -> str: year_dir = Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists(): return", "-> str: year_dir = Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists(): return \"1\" else:", "str(year) if not year_dir.exists(): return \"1\" else: return str(max([int(p.stem) for p in year_dir.iterdir()])", "date # because it's almost AOC time return pendulum.tomorrow(east).year elif now.hour == 0:", "AOC time return pendulum.tomorrow(east).year elif now.hour == 0: # if it's after 12AM", ") def _get_year() -> int: east = \"US/Eastern\" now = pendulum.now(tz=east) if now.month", "in the future\") else: raise RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True)", "for p in year_dir.iterdir()]) + 1) def _new_script(year: str, day: str, overwrite: bool", "pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of solution file, because script date in the", "file, because script date in the future\") else: raise RuntimeError(f\"Script already exists for", "if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def _get_year() -> int:", "if not year_dir.exists(): return \"1\" else: return str(max([int(p.stem) for p in year_dir.iterdir()]) +", "day: str, overwrite: bool = False) -> Path: day = day.zfill(2) script =", "December, use yestrday as the default date because # you probably want to", "it's right before 12AM in December, use tomorrow as the default date #", "use tomorrow as the default date # because it's almost AOC time return", "int: east = \"US/Eastern\" now = pendulum.now(tz=east) if now.month == 12: if now.hour", "day: str): \"\"\"Create new script for AOC\"\"\" if not year: year = click.prompt(f\"Year\",", "now.year def _get_day(year: str) -> str: year_dir = Path(__file__).parent.parent.parent.parent / str(year) if not", "return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year: str) -> str: year_dir = Path(__file__).parent.parent.parent.parent", "= pendulum.now(tz=east) if now.month == 12: if now.hour == 23: # if it's", "new script for AOC\"\"\" if not year: year = click.prompt(f\"Year\", default=_get_year()) if not", "23: # if it's right before 12AM in December, use tomorrow as the", "elif now.hour == 0: # if it's after 12AM in December, use yestrday", "or now.year def _get_day(year: str) -> str: year_dir = Path(__file__).parent.parent.parent.parent / str(year) if", "0: # if it's after 12AM in December, use yestrday as the default", "do yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year: str)", "else: raise RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG /", "probably want to do yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year", "because script date in the future\") else: raise RuntimeError(f\"Script already exists for {year}-{day}!!!\")", "= click.prompt(f\"Year\", default=_get_year()) if not day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year,", "now = pendulum.now(tz=east) if now.month == 12: if now.hour == 23: # if", "= script.path.parent if script_dir.parent.exists() and not overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)):", "/ str(year) if not year_dir.exists(): return \"1\" else: return str(max([int(p.stem) for p in", "not overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of solution file,", "from aoc.script import Script import aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\",", "\"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def new(year: str, day: str): \"\"\"Create new script", "script for AOC\"\"\" if not year: year = click.prompt(f\"Year\", default=_get_year()) if not day:", "return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1) def _new_script(year: str, day: str,", "= False) -> Path: day = day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir =", "script_dir = script.path.parent if script_dir.parent.exists() and not overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12,", "of solution file, because script date in the future\") else: raise RuntimeError(f\"Script already", "the future\") else: raise RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text(", "return pendulum.tomorrow(east).year elif now.hour == 0: # if it's after 12AM in December,", "date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year: str) -> str:", "Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists(): return \"1\" else: return str(max([int(p.stem) for p", "the default date # because it's almost AOC time return pendulum.tomorrow(east).year elif now.hour", "subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def _get_year() -> int: east = \"US/Eastern\" now", "solution file, because script date in the future\") else: raise RuntimeError(f\"Script already exists", "click import pendulum import subprocess import os from pathlib import Path from aoc.script", "day=day) script_dir = script.path.parent if script_dir.parent.exists() and not overwrite: if pendulum.now() > pendulum.datetime(year=int(year),", "= \"US/Eastern\" now = pendulum.now(tz=east) if now.month == 12: if now.hour == 23:", "if script_dir.parent.exists() and not overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override", "-> int: east = \"US/Eastern\" now = pendulum.now(tz=east) if now.month == 12: if", "== 23: # if it's right before 12AM in December, use tomorrow as", "not year_dir.exists(): return \"1\" else: return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1)", "= _new_script(year=year, day=day) print(f\"Created script {script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\",", "for AOC\"\"\" if not year: year = click.prompt(f\"Year\", default=_get_year()) if not day: day", "day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created script {script_file}!\") if", "day=day) print(f\"Created script {script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, )", "month=12, day=int(day)): print(\"Allow override of solution file, because script date in the future\")", "default date # because it's almost AOC time return pendulum.tomorrow(east).year elif now.hour ==", "now.hour == 0: # if it's after 12AM in December, use yestrday as", "pendulum import subprocess import os from pathlib import Path from aoc.script import Script", "raise RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\"", "print(f\"Created script {script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def", "shell=True, ) def _get_year() -> int: east = \"US/Eastern\" now = pendulum.now(tz=east) if", "print(\"Allow override of solution file, because script date in the future\") else: raise", "pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year: str) -> str: year_dir =", "script date in the future\") else: raise RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True,", "to do yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year:", "year_dir.exists(): return \"1\" else: return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1) def", "def _get_year() -> int: east = \"US/Eastern\" now = pendulum.now(tz=east) if now.month ==", "yestrday as the default date because # you probably want to do yesteray's", "+ 1) def _new_script(year: str, day: str, overwrite: bool = False) -> Path:", "Path: day = day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir = script.path.parent if script_dir.parent.exists()", "AOC\"\"\" if not year: year = click.prompt(f\"Year\", default=_get_year()) if not day: day =", "if not day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created script", "{script_file}\", shell=True, ) def _get_year() -> int: east = \"US/Eastern\" now = pendulum.now(tz=east)", "{year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\" / \"script\" / script.path.name).read_text() )", "year_dir = Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists(): return \"1\" else: return str(max([int(p.stem)", "December, use tomorrow as the default date # because it's almost AOC time", "year: year = click.prompt(f\"Year\", default=_get_year()) if not day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file", "in year_dir.iterdir()]) + 1) def _new_script(year: str, day: str, overwrite: bool = False)", "str, overwrite: bool = False) -> Path: day = day.zfill(2) script = Script.from_year_day(year=year,", "as the default date # because it's almost AOC time return pendulum.tomorrow(east).year elif", "in December, use tomorrow as the default date # because it's almost AOC", "script_dir.parent.exists() and not overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of", "it's after 12AM in December, use yestrday as the default date because #", "yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year: str) ->", "click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created script {script_file}!\") if \"EDITOR\" in os.environ:", "right before 12AM in December, use tomorrow as the default date # because", "day=int(day)): print(\"Allow override of solution file, because script date in the future\") else:", "subprocess import os from pathlib import Path from aoc.script import Script import aoc.paths", "in December, use yestrday as the default date because # you probably want", "str(max([int(p.stem) for p in year_dir.iterdir()]) + 1) def _new_script(year: str, day: str, overwrite:", "exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\" / \"script\" / script.path.name).read_text() ) return script.path", "import click import pendulum import subprocess import os from pathlib import Path from", "os from pathlib import Path from aoc.script import Script import aoc.paths import pendulum", "pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of solution file, because script date", "False) -> Path: day = day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir = script.path.parent", "date because # you probably want to do yesteray's date return pendulum.today(east).year return", "RuntimeError(f\"Script already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\" /", "= Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists(): return \"1\" else: return str(max([int(p.stem) for", "for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\" / \"script\" / script.path.name).read_text()", "if not year: year = click.prompt(f\"Year\", default=_get_year()) if not day: day = click.prompt(f\"Day\",", "not day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created script {script_file}!\")", "as the default date because # you probably want to do yesteray's date", "because # you probably want to do yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\",", "@click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def new(year: str, day: str): \"\"\"Create", "from pathlib import Path from aoc.script import Script import aoc.paths import pendulum @click.command()", "not year: year = click.prompt(f\"Year\", default=_get_year()) if not day: day = click.prompt(f\"Day\", default=_get_day(year))", "# because it's almost AOC time return pendulum.tomorrow(east).year elif now.hour == 0: #", "script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\" / \"script\" / script.path.name).read_text() ) return", "and not overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of solution", "if it's right before 12AM in December, use tomorrow as the default date", "Path from aoc.script import Script import aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str)", "Script import aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def", "default date because # you probably want to do yesteray's date return pendulum.today(east).year", "aoc.script import Script import aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\",", "int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year: str) -> str: year_dir = Path(__file__).parent.parent.parent.parent /", "type=str) def new(year: str, day: str): \"\"\"Create new script for AOC\"\"\" if not", "override of solution file, because script date in the future\") else: raise RuntimeError(f\"Script", "if now.month == 12: if now.hour == 23: # if it's right before", "os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def _get_year() -> int: east = \"US/Eastern\"", "-> Path: day = day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir = script.path.parent if", "before 12AM in December, use tomorrow as the default date # because it's", "import pendulum import subprocess import os from pathlib import Path from aoc.script import", "# if it's right before 12AM in December, use tomorrow as the default", "# if it's after 12AM in December, use yestrday as the default date", "== 0: # if it's after 12AM in December, use yestrday as the", "if it's after 12AM in December, use yestrday as the default date because", "0)) or now.year def _get_day(year: str) -> str: year_dir = Path(__file__).parent.parent.parent.parent / str(year)", "\"--day\", type=str) def new(year: str, day: str): \"\"\"Create new script for AOC\"\"\" if", "now.month == 12: if now.hour == 23: # if it's right before 12AM", "str, day: str): \"\"\"Create new script for AOC\"\"\" if not year: year =", "# you probably want to do yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0))", "str, day: str, overwrite: bool = False) -> Path: day = day.zfill(2) script", "_get_year() -> int: east = \"US/Eastern\" now = pendulum.now(tz=east) if now.month == 12:", "because it's almost AOC time return pendulum.tomorrow(east).year elif now.hour == 0: # if", "in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def _get_year() -> int: east =", "type=str) @click.option(\"-d\", \"--day\", type=str) def new(year: str, day: str): \"\"\"Create new script for", "after 12AM in December, use yestrday as the default date because # you", "want to do yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def", "new(year: str, day: str): \"\"\"Create new script for AOC\"\"\" if not year: year", "@click.option(\"-d\", \"--day\", type=str) def new(year: str, day: str): \"\"\"Create new script for AOC\"\"\"", "p in year_dir.iterdir()]) + 1) def _new_script(year: str, day: str, overwrite: bool =", "almost AOC time return pendulum.tomorrow(east).year elif now.hour == 0: # if it's after", "\"1\" else: return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1) def _new_script(year: str,", "pendulum.tomorrow(east).year elif now.hour == 0: # if it's after 12AM in December, use", "else: return str(max([int(p.stem) for p in year_dir.iterdir()]) + 1) def _new_script(year: str, day:", "> pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of solution file, because script date in", "@click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def new(year: str, day: str): \"\"\"Create new", "\"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def _get_year() -> int: east", "_new_script(year: str, day: str, overwrite: bool = False) -> Path: day = day.zfill(2)", "= day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir = script.path.parent if script_dir.parent.exists() and not", "pathlib import Path from aoc.script import Script import aoc.paths import pendulum @click.command() @click.option(\"-y\",", "bool = False) -> Path: day = day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir", "already exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\" / \"script\"", "script_file = _new_script(year=year, day=day) print(f\"Created script {script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR", "12AM in December, use yestrday as the default date because # you probably", "click.prompt(f\"Year\", default=_get_year()) if not day: day = click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day)", "year_dir.iterdir()]) + 1) def _new_script(year: str, day: str, overwrite: bool = False) ->", "def _get_day(year: str) -> str: year_dir = Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists():", "def _new_script(year: str, day: str, overwrite: bool = False) -> Path: day =", "1) def _new_script(year: str, day: str, overwrite: bool = False) -> Path: day", "import os from pathlib import Path from aoc.script import Script import aoc.paths import", "import Script import aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str)", "\"US/Eastern\" now = pendulum.now(tz=east) if now.month == 12: if now.hour == 23: #", "str) -> str: year_dir = Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists(): return \"1\"", "day = click.prompt(f\"Day\", default=_get_day(year)) script_file = _new_script(year=year, day=day) print(f\"Created script {script_file}!\") if \"EDITOR\"", "it's almost AOC time return pendulum.tomorrow(east).year elif now.hour == 0: # if it's", "exists for {year}-{day}!!!\") script_dir.mkdir(parents=True, exist_ok=True) script.path.touch(exist_ok=True) script.path.write_text( (aoc.paths.AOC_PKG / \"templates\" / \"script\" /", "def new(year: str, day: str): \"\"\"Create new script for AOC\"\"\" if not year:", "script.path.parent if script_dir.parent.exists() and not overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow", "_new_script(year=year, day=day) print(f\"Created script {script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True,", "{script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def _get_year() ->", "day = day.zfill(2) script = Script.from_year_day(year=year, day=day) script_dir = script.path.parent if script_dir.parent.exists() and", "script {script_file}!\") if \"EDITOR\" in os.environ: subprocess.Popen( f\"$EDITOR {script_file}\", shell=True, ) def _get_year()", "\"\"\"Create new script for AOC\"\"\" if not year: year = click.prompt(f\"Year\", default=_get_year()) if", "import subprocess import os from pathlib import Path from aoc.script import Script import", "overwrite: bool = False) -> Path: day = day.zfill(2) script = Script.from_year_day(year=year, day=day)", "you probably want to do yesteray's date return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or", "str): \"\"\"Create new script for AOC\"\"\" if not year: year = click.prompt(f\"Year\", default=_get_year())", "time return pendulum.tomorrow(east).year elif now.hour == 0: # if it's after 12AM in", "overwrite: if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of solution file, because", "if pendulum.now() > pendulum.datetime(year=int(year), month=12, day=int(day)): print(\"Allow override of solution file, because script", "east = \"US/Eastern\" now = pendulum.now(tz=east) if now.month == 12: if now.hour ==", "return pendulum.today(east).year return int(os.environ.get(\"AOC_YEAR\", 0)) or now.year def _get_day(year: str) -> str: year_dir", "str: year_dir = Path(__file__).parent.parent.parent.parent / str(year) if not year_dir.exists(): return \"1\" else: return", "import aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def new(year:", "= Script.from_year_day(year=year, day=day) script_dir = script.path.parent if script_dir.parent.exists() and not overwrite: if pendulum.now()", "use yestrday as the default date because # you probably want to do", "import pendulum @click.command() @click.option(\"-y\", \"--year\", type=str) @click.option(\"-d\", \"--day\", type=str) def new(year: str, day:", "import Path from aoc.script import Script import aoc.paths import pendulum @click.command() @click.option(\"-y\", \"--year\"," ]
[ "Flatten x = torch.unsqueeze(x, dim=0) # Add a seq_len dimension of size 1", "stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn", "input at a time. Thus, seq_len = 1 and the input should be", "# CNN encoder x = cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x", "= nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128,", "= rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden): \"\"\"Forward", "now has size torch.Size([32, 512, 2, 3]) batch_size = x.size(0) x = torch.reshape(x,", "would like to reuse this code for train and test, we only process", "= nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512 *", "hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output, dim=0) output =", "num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden): \"\"\"Forward pass a single", "default hidden state is zeros if not provided. \"\"\" if (self.rnn_type == 'rnn')", "torch.nn as nn import torch.nn.functional as F class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size,", "x = torch.reshape(x, (batch_size, -1)) # Flatten x = torch.unsqueeze(x, dim=0) # Add", "will generally be of shape (1 x batch_size x hidden_size). \"\"\" # CNN", "def initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note that the default hidden state is", "state. Returns: output: Hidden state for each output. It has shape (seq_len x", "x = F.relu(self.conv6_bn(self.conv6(x))) # x now has size torch.Size([32, 512, 2, 3]) batch_size", "Since we would like to reuse this code for train and test, we", "hidden state. Returns: output: Hidden state for each output. It has shape (seq_len", "= nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size", "kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0)", "cur_hidden): \"\"\"Forward pass a single input (seq_len == 1) through the CNN-RNN. Args:", "= nn.GRU elif self.rnn_type == 'lstm': rnn_class = nn.LSTM else: raise ValueError('Invalid RNN", "= 512 * 2 * 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 =", "x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) #", "dim=0) output = self.fc4(output) return output, hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden state.", "return output, hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note that the default", "shape (seq_len x batch_size x hidden_size). Since our seq_len is usually 1, this", "nn import torch.nn.functional as F class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN,", "out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2,", "elif self.rnn_type == 'gru': rnn_class = nn.GRU elif self.rnn_type == 'lstm': rnn_class =", "3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def forward(self, cur_input,", "for train and test, we only process one input at a time. Thus,", "Note that the default hidden state is zeros if not provided. \"\"\" if", "self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512)", "n_channels x height x width). Since we would like to reuse this code", "batch_size): \"\"\"Initial hidden state. Note that the default hidden state is zeros if", "= hidden_size self.n_layers = num_layers self.rnn_type = rnn_type if self.rnn_type == 'rnn': rnn_class", "x hidden_size). \"\"\" # CNN encoder x = cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x", "self.rnn_type == 'gru': rnn_class = nn.GRU elif self.rnn_type == 'lstm': rnn_class = nn.LSTM", "F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x now has size torch.Size([32, 512, 2, 3])", "be (1 x batch_size x input_size). cur_hidden: Current (previous?) hidden state. Returns: output:", "batch_size x input_size). cur_hidden: Current (previous?) hidden state. Returns: output: Hidden state for", "cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x)))", "batch_size, self.hidden_size) for _ in range(2)] # 2 because cell state and hidden", "nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 =", "= self.rnn(x, cur_hidden) # output should have shape torch.Size([1, batch_size, hidden_size]) # hidden", "-1)) # Flatten x = torch.unsqueeze(x, dim=0) # Add a seq_len dimension of", "__init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size = hidden_size self.n_layers", "self.fc4 = nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden): \"\"\"Forward pass a single input", "torch.reshape(x, (batch_size, -1)) # Flatten x = torch.unsqueeze(x, dim=0) # Add a seq_len", "# Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn =", "elif self.rnn_type == 'lstm': rnn_class = nn.LSTM else: raise ValueError('Invalid RNN type.') #", "ValueError('Invalid RNN type.') # Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2,", "True self.hidden_size = hidden_size self.n_layers = num_layers self.rnn_type = rnn_type if self.rnn_type ==", "[torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)] # 2 because cell state and", "the default hidden state is zeros if not provided. \"\"\" if (self.rnn_type ==", "we only process one input at a time. Thus, seq_len = 1 and", "nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size =", "this will generally be of shape (1 x batch_size x hidden_size). \"\"\" #", "batch_size, hidden_size]) output = torch.squeeze(output, dim=0) output = self.fc4(output) return output, hidden def", "self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64)", "import torch.nn.functional as F class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__()", "batch_size = x.size(0) x = torch.reshape(x, (batch_size, -1)) # Flatten x = torch.unsqueeze(x,", "F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x now has size torch.Size([32,", "self.rnn_type == 'lstm': rnn_class = nn.LSTM else: raise ValueError('Invalid RNN type.') # Input", "320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 =", "= nn.BatchNorm2d(512) rnn_input_size = 512 * 2 * 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size,", "output = self.fc4(output) return output, hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note", "hidden_size). \"\"\" # CNN encoder x = cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x =", "dimension of size 1 # RNN output, hidden = self.rnn(x, cur_hidden) # output", "each output. It has shape (seq_len x batch_size x hidden_size). Since our seq_len", "if self.rnn_type == 'rnn': rnn_class = nn.RNN elif self.rnn_type == 'gru': rnn_class =", "rnn_class = nn.RNN elif self.rnn_type == 'gru': rnn_class = nn.GRU elif self.rnn_type ==", "= F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x =", "stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn", "CNN-RNN. Args: cur_input: Input of shape (batch_size x n_channels x height x width).", "== 'rnn') or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type ==", "512, 2, 3]) batch_size = x.size(0) x = torch.reshape(x, (batch_size, -1)) # Flatten", "x batch_size x hidden_size). Since our seq_len is usually 1, this will generally", "stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn", "have shape torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output, dim=0) output = self.fc4(output) return", "nn.LSTM else: raise ValueError('Invalid RNN type.') # Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1,", "(1 x batch_size x input_size). cur_hidden: Current (previous?) hidden state. Returns: output: Hidden", "= torch.squeeze(output, dim=0) output = self.fc4(output) return output, hidden def initial_hidden(self, batch_size): \"\"\"Initial", "the CNN-RNN. Args: cur_input: Input of shape (batch_size x n_channels x height x", "= nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64,", "to reuse this code for train and test, we only process one input", "generally be of shape (1 x batch_size x hidden_size). \"\"\" # CNN encoder", "\"\"\"Forward pass a single input (seq_len == 1) through the CNN-RNN. Args: cur_input:", "provided. \"\"\" if (self.rnn_type == 'rnn') or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size,", "2 * 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def", "time. Thus, seq_len = 1 and the input should be (1 x batch_size", "= F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x", "height x width). Since we would like to reuse this code for train", "(self.rnn_type == 'rnn') or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type", "1 # RNN output, hidden = self.rnn(x, cur_hidden) # output should have shape", "else: raise ValueError('Invalid RNN type.') # Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32,", "x batch_size x hidden_size). \"\"\" # CNN encoder x = cur_input x =", "self.is_recurrent = True self.hidden_size = hidden_size self.n_layers = num_layers self.rnn_type = rnn_type if", "nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 =", "= 1 and the input should be (1 x batch_size x input_size). cur_hidden:", "hidden state is zeros if not provided. \"\"\" if (self.rnn_type == 'rnn') or", "Thus, seq_len = 1 and the input should be (1 x batch_size x", "= F.relu(self.conv6_bn(self.conv6(x))) # x now has size torch.Size([32, 512, 2, 3]) batch_size =", "F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x)))", "out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2,", "(batch_size, -1)) # Flatten x = torch.unsqueeze(x, dim=0) # Add a seq_len dimension", "self.hidden_size = hidden_size self.n_layers = num_layers self.rnn_type = rnn_type if self.rnn_type == 'rnn':", "output: Hidden state for each output. It has shape (seq_len x batch_size x", "nn.BatchNorm2d(512) rnn_input_size = 512 * 2 * 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers)", "hidden_size]) # hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output, dim=0)", "self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden):", "output, hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note that the default hidden", "cur_input, cur_hidden): \"\"\"Forward pass a single input (seq_len == 1) through the CNN-RNN.", "kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512 * 2 * 3", "* 2 * 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2)", "self).__init__() self.is_recurrent = True self.hidden_size = hidden_size self.n_layers = num_layers self.rnn_type = rnn_type", "of shape (batch_size x n_channels x height x width). Since we would like", "import torch.nn as nn import torch.nn.functional as F class BehaviorRNN(nn.Module): def __init__(self, rnn_type,", "and the input should be (1 x batch_size x input_size). cur_hidden: Current (previous?)", "seq_len = 1 and the input should be (1 x batch_size x input_size).", "kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0)", "* 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def forward(self,", "zeros if not provided. \"\"\" if (self.rnn_type == 'rnn') or (self.rnn_type == 'gru'):", "super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size = hidden_size self.n_layers = num_layers self.rnn_type =", "nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512 * 2", "like to reuse this code for train and test, we only process one", "as nn import torch.nn.functional as F class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers):", "shape (batch_size x n_channels x height x width). Since we would like to", "should be (1 x batch_size x input_size). cur_hidden: Current (previous?) hidden state. Returns:", "has size torch.Size([32, 512, 2, 3]) batch_size = x.size(0) x = torch.reshape(x, (batch_size,", "self.hidden_size) for _ in range(2)] # 2 because cell state and hidden state", "== 'gru': rnn_class = nn.GRU elif self.rnn_type == 'lstm': rnn_class = nn.LSTM else:", "= nn.LSTM else: raise ValueError('Invalid RNN type.') # Input resolution: 320x240 self.conv1 =", "width). Since we would like to reuse this code for train and test,", "Add a seq_len dimension of size 1 # RNN output, hidden = self.rnn(x,", "= nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3", "nn.RNN elif self.rnn_type == 'gru': rnn_class = nn.GRU elif self.rnn_type == 'lstm': rnn_class", "== 1) through the CNN-RNN. Args: cur_input: Input of shape (batch_size x n_channels", "x n_channels x height x width). Since we would like to reuse this", "\"\"\"Initial hidden state. Note that the default hidden state is zeros if not", "= True self.hidden_size = hidden_size self.n_layers = num_layers self.rnn_type = rnn_type if self.rnn_type", "usually 1, this will generally be of shape (1 x batch_size x hidden_size).", "torch import torch.nn as nn import torch.nn.functional as F class BehaviorRNN(nn.Module): def __init__(self,", "dim=0) # Add a seq_len dimension of size 1 # RNN output, hidden", "seq_len is usually 1, this will generally be of shape (1 x batch_size", "nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5,", "Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32)", "has shape (seq_len x batch_size x hidden_size). Since our seq_len is usually 1,", "RNN type.') # Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0)", "rnn_class = nn.GRU elif self.rnn_type == 'lstm': rnn_class = nn.LSTM else: raise ValueError('Invalid", "our seq_len is usually 1, this will generally be of shape (1 x", "torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output, dim=0) output = self.fc4(output) return output, hidden", "'lstm': rnn_class = nn.LSTM else: raise ValueError('Invalid RNN type.') # Input resolution: 320x240", "be of shape (1 x batch_size x hidden_size). \"\"\" # CNN encoder x", "class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size", "# hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output, dim=0) output", "process one input at a time. Thus, seq_len = 1 and the input", "at a time. Thus, seq_len = 1 and the input should be (1", "hidden_size]) output = torch.squeeze(output, dim=0) output = self.fc4(output) return output, hidden def initial_hidden(self,", "padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn =", "(1 x batch_size x hidden_size). \"\"\" # CNN encoder x = cur_input x", "seq_len dimension of size 1 # RNN output, hidden = self.rnn(x, cur_hidden) #", "out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2,", "It has shape (seq_len x batch_size x hidden_size). Since our seq_len is usually", "this code for train and test, we only process one input at a", "rnn_class = nn.LSTM else: raise ValueError('Invalid RNN type.') # Input resolution: 320x240 self.conv1", "def forward(self, cur_input, cur_hidden): \"\"\"Forward pass a single input (seq_len == 1) through", "size torch.Size([32, 512, 2, 3]) batch_size = x.size(0) x = torch.reshape(x, (batch_size, -1))", "Args: cur_input: Input of shape (batch_size x n_channels x height x width). Since", "self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32,", "x width). Since we would like to reuse this code for train and", "\"\"\" # CNN encoder x = cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x)))", "type.') # Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn", "or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type == 'lstm': return", "kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0)", "Input of shape (batch_size x n_channels x height x width). Since we would", "# output should have shape torch.Size([1, batch_size, hidden_size]) # hidden should have shape", "2, 3]) batch_size = x.size(0) x = torch.reshape(x, (batch_size, -1)) # Flatten x", "== 'lstm': rnn_class = nn.LSTM else: raise ValueError('Invalid RNN type.') # Input resolution:", "<filename>src/semnav/learning/behavior_net/behavior_rnn.py import torch import torch.nn as nn import torch.nn.functional as F class BehaviorRNN(nn.Module):", "= nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4", "resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2", "'rnn') or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type == 'lstm':", "rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden): \"\"\"Forward pass", "# Flatten x = torch.unsqueeze(x, dim=0) # Add a seq_len dimension of size", "pass a single input (seq_len == 1) through the CNN-RNN. Args: cur_input: Input", "input should be (1 x batch_size x input_size). cur_hidden: Current (previous?) hidden state.", "is usually 1, this will generally be of shape (1 x batch_size x", "input (seq_len == 1) through the CNN-RNN. Args: cur_input: Input of shape (batch_size", "def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size = hidden_size", "x batch_size x input_size). cur_hidden: Current (previous?) hidden state. Returns: output: Hidden state", "nn.GRU elif self.rnn_type == 'lstm': rnn_class = nn.LSTM else: raise ValueError('Invalid RNN type.')", "self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)] # 2", "only process one input at a time. Thus, seq_len = 1 and the", "batch_size, hidden_size]) # hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output,", "nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3,", "one input at a time. Thus, seq_len = 1 and the input should", "stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512 * 2 * 3 self.rnn", "is zeros if not provided. \"\"\" if (self.rnn_type == 'rnn') or (self.rnn_type ==", "# Add a seq_len dimension of size 1 # RNN output, hidden =", "stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn", "import torch import torch.nn as nn import torch.nn.functional as F class BehaviorRNN(nn.Module): def", "batch_size, self.hidden_size) elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in", "state for each output. It has shape (seq_len x batch_size x hidden_size). Since", "nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3,", "hidden_size). Since our seq_len is usually 1, this will generally be of shape", "single input (seq_len == 1) through the CNN-RNN. Args: cur_input: Input of shape", "'rnn': rnn_class = nn.RNN elif self.rnn_type == 'gru': rnn_class = nn.GRU elif self.rnn_type", "1 and the input should be (1 x batch_size x input_size). cur_hidden: Current", "nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden): \"\"\"Forward pass a single input (seq_len ==", "torch.Size([32, 512, 2, 3]) batch_size = x.size(0) x = torch.reshape(x, (batch_size, -1)) #", "output = torch.squeeze(output, dim=0) output = self.fc4(output) return output, hidden def initial_hidden(self, batch_size):", "forward(self, cur_input, cur_hidden): \"\"\"Forward pass a single input (seq_len == 1) through the", "batch_size x hidden_size). \"\"\" # CNN encoder x = cur_input x = F.relu(self.conv1_bn(self.conv1(x)))", "F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x now", "= nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512,", "F class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True", "= rnn_type if self.rnn_type == 'rnn': rnn_class = nn.RNN elif self.rnn_type == 'gru':", "padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn =", "RNN output, hidden = self.rnn(x, cur_hidden) # output should have shape torch.Size([1, batch_size,", "torch.Size([1, batch_size, hidden_size]) # hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size]) output =", "of size 1 # RNN output, hidden = self.rnn(x, cur_hidden) # output should", "num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size = hidden_size self.n_layers = num_layers self.rnn_type", "512 * 2 * 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size,", "F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x)))", "F.relu(self.conv6_bn(self.conv6(x))) # x now has size torch.Size([32, 512, 2, 3]) batch_size = x.size(0)", "2) def forward(self, cur_input, cur_hidden): \"\"\"Forward pass a single input (seq_len == 1)", "self.rnn(x, cur_hidden) # output should have shape torch.Size([1, batch_size, hidden_size]) # hidden should", "self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512,", "shape torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output, dim=0) output = self.fc4(output) return output,", "x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x", "x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x", "hidden = self.rnn(x, cur_hidden) # output should have shape torch.Size([1, batch_size, hidden_size]) #", "self.fc4(output) return output, hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note that the", "not provided. \"\"\" if (self.rnn_type == 'rnn') or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers,", "self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128)", "if (self.rnn_type == 'rnn') or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif", "state is zeros if not provided. \"\"\" if (self.rnn_type == 'rnn') or (self.rnn_type", "padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn =", "= torch.reshape(x, (batch_size, -1)) # Flatten x = torch.unsqueeze(x, dim=0) # Add a", "(previous?) hidden state. Returns: output: Hidden state for each output. It has shape", "= self.fc4(output) return output, hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note that", "BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size =", "self.rnn_type == 'rnn': rnn_class = nn.RNN elif self.rnn_type == 'gru': rnn_class = nn.GRU", "'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size)", "rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size = hidden_size self.n_layers =", "= nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256,", "1) through the CNN-RNN. Args: cur_input: Input of shape (batch_size x n_channels x", "(seq_len x batch_size x hidden_size). Since our seq_len is usually 1, this will", "initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note that the default hidden state is zeros", "cur_hidden: Current (previous?) hidden state. Returns: output: Hidden state for each output. It", "of shape (1 x batch_size x hidden_size). \"\"\" # CNN encoder x =", "test, we only process one input at a time. Thus, seq_len = 1", "padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512 * 2 * 3 self.rnn =", "should have shape torch.Size([self.n_layers, batch_size, hidden_size]) output = torch.squeeze(output, dim=0) output = self.fc4(output)", "self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128,", "padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn =", "hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden state. Note that the default hidden state", "x = torch.unsqueeze(x, dim=0) # Add a seq_len dimension of size 1 #", "== 'rnn': rnn_class = nn.RNN elif self.rnn_type == 'gru': rnn_class = nn.GRU elif", "\"\"\" if (self.rnn_type == 'rnn') or (self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size)", "= F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x =", "x hidden_size). Since our seq_len is usually 1, this will generally be of", "out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512 * 2 *", "self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256,", "stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn", "encoder x = cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x)))", "self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64,", "a time. Thus, seq_len = 1 and the input should be (1 x", "self.rnn_type = rnn_type if self.rnn_type == 'rnn': rnn_class = nn.RNN elif self.rnn_type ==", "batch_size x hidden_size). Since our seq_len is usually 1, this will generally be", "kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0)", "num_layers self.rnn_type = rnn_type if self.rnn_type == 'rnn': rnn_class = nn.RNN elif self.rnn_type", "torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _", "and test, we only process one input at a time. Thus, seq_len =", "self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512", "out_channels=128, kernel_size=3, stride=2, padding=0) self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2,", "= nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512,", "through the CNN-RNN. Args: cur_input: Input of shape (batch_size x n_channels x height", "1, this will generally be of shape (1 x batch_size x hidden_size). \"\"\"", "torch.unsqueeze(x, dim=0) # Add a seq_len dimension of size 1 # RNN output,", "shape (1 x batch_size x hidden_size). \"\"\" # CNN encoder x = cur_input", "x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x now has size torch.Size([32, 512,", "3]) batch_size = x.size(0) x = torch.reshape(x, (batch_size, -1)) # Flatten x =", "self.hidden_size) elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)]", "hidden state. Note that the default hidden state is zeros if not provided.", "# x now has size torch.Size([32, 512, 2, 3]) batch_size = x.size(0) x", "torch.squeeze(output, dim=0) output = self.fc4(output) return output, hidden def initial_hidden(self, batch_size): \"\"\"Initial hidden", "reuse this code for train and test, we only process one input at", "= F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x now has size", "= F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x now has size torch.Size([32, 512, 2,", "that the default hidden state is zeros if not provided. \"\"\" if (self.rnn_type", "should have shape torch.Size([1, batch_size, hidden_size]) # hidden should have shape torch.Size([self.n_layers, batch_size,", "(seq_len == 1) through the CNN-RNN. Args: cur_input: Input of shape (batch_size x", "x now has size torch.Size([32, 512, 2, 3]) batch_size = x.size(0) x =", "Hidden state for each output. It has shape (seq_len x batch_size x hidden_size).", "for each output. It has shape (seq_len x batch_size x hidden_size). Since our", "raise ValueError('Invalid RNN type.') # Input resolution: 320x240 self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5,", "= nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5", "'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)] # 2 because cell", "size 1 # RNN output, hidden = self.rnn(x, cur_hidden) # output should have", "x input_size). cur_hidden: Current (previous?) hidden state. Returns: output: Hidden state for each", "nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3,", "CNN encoder x = cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x =", "= nn.RNN elif self.rnn_type == 'gru': rnn_class = nn.GRU elif self.rnn_type == 'lstm':", "= nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6", "the input should be (1 x batch_size x input_size). cur_hidden: Current (previous?) hidden", "nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 =", "output. It has shape (seq_len x batch_size x hidden_size). Since our seq_len is", "x = cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x", "code for train and test, we only process one input at a time.", "state. Note that the default hidden state is zeros if not provided. \"\"\"", "hidden_size self.n_layers = num_layers self.rnn_type = rnn_type if self.rnn_type == 'rnn': rnn_class =", "(self.rnn_type == 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers,", "cur_input: Input of shape (batch_size x n_channels x height x width). Since we", "Current (previous?) hidden state. Returns: output: Hidden state for each output. It has", "rnn_input_size = 512 * 2 * 3 self.rnn = rnn_class(input_size=rnn_input_size, hidden_size=hidden_size, num_layers=self.n_layers) self.fc4", "= x.size(0) x = torch.reshape(x, (batch_size, -1)) # Flatten x = torch.unsqueeze(x, dim=0)", "nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256) self.conv5 =", "a single input (seq_len == 1) through the CNN-RNN. Args: cur_input: Input of", "train and test, we only process one input at a time. Thus, seq_len", "'gru': rnn_class = nn.GRU elif self.rnn_type == 'lstm': rnn_class = nn.LSTM else: raise", "x = F.relu(self.conv4_bn(self.conv4(x))) x = F.relu(self.conv5_bn(self.conv5(x))) x = F.relu(self.conv6_bn(self.conv6(x))) # x now has", "== 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)] # 2 because", "a seq_len dimension of size 1 # RNN output, hidden = self.rnn(x, cur_hidden)", "self.conv4_bn = nn.BatchNorm2d(256) self.conv5 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512)", "Since our seq_len is usually 1, this will generally be of shape (1", "input_size). cur_hidden: Current (previous?) hidden state. Returns: output: Hidden state for each output.", "we would like to reuse this code for train and test, we only", "return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)] # 2 because cell state", "nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2, padding=0) self.conv2_bn = nn.BatchNorm2d(64) self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3,", "have shape torch.Size([1, batch_size, hidden_size]) # hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size])", "= num_layers self.rnn_type = rnn_type if self.rnn_type == 'rnn': rnn_class = nn.RNN elif", "= nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden): \"\"\"Forward pass a single input (seq_len", "shape torch.Size([1, batch_size, hidden_size]) # hidden should have shape torch.Size([self.n_layers, batch_size, hidden_size]) output", "torch.nn.functional as F class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent", "if not provided. \"\"\" if (self.rnn_type == 'rnn') or (self.rnn_type == 'gru'): return", "self.conv6_bn = nn.BatchNorm2d(512) rnn_input_size = 512 * 2 * 3 self.rnn = rnn_class(input_size=rnn_input_size,", "out_channels=32, kernel_size=5, stride=2, padding=0) self.conv1_bn = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=2,", "output should have shape torch.Size([1, batch_size, hidden_size]) # hidden should have shape torch.Size([self.n_layers,", "x height x width). Since we would like to reuse this code for", "== 'gru'): return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size,", "padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0) self.conv6_bn =", "Returns: output: Hidden state for each output. It has shape (seq_len x batch_size", "elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for _ in range(2)] #", "as F class BehaviorRNN(nn.Module): def __init__(self, rnn_type, hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent =", "x.size(0) x = torch.reshape(x, (batch_size, -1)) # Flatten x = torch.unsqueeze(x, dim=0) #", "kernel_size=3, stride=2, padding=0) self.conv5_bn = nn.BatchNorm2d(512) self.conv6 = nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=2, padding=0)", "= cur_input x = F.relu(self.conv1_bn(self.conv1(x))) x = F.relu(self.conv2_bn(self.conv2(x))) x = F.relu(self.conv3_bn(self.conv3(x))) x =", "(batch_size x n_channels x height x width). Since we would like to reuse", "self.n_layers = num_layers self.rnn_type = rnn_type if self.rnn_type == 'rnn': rnn_class = nn.RNN", "return torch.zeros(self.n_layers, batch_size, self.hidden_size) elif self.rnn_type == 'lstm': return [torch.zeros(self.n_layers, batch_size, self.hidden_size) for", "= torch.unsqueeze(x, dim=0) # Add a seq_len dimension of size 1 # RNN", "output, hidden = self.rnn(x, cur_hidden) # output should have shape torch.Size([1, batch_size, hidden_size])", "hidden_size=hidden_size, num_layers=self.n_layers) self.fc4 = nn.Linear(hidden_size, 2) def forward(self, cur_input, cur_hidden): \"\"\"Forward pass a", "self.conv3_bn = nn.BatchNorm2d(128) self.conv4 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=0) self.conv4_bn = nn.BatchNorm2d(256)", "# RNN output, hidden = self.rnn(x, cur_hidden) # output should have shape torch.Size([1,", "cur_hidden) # output should have shape torch.Size([1, batch_size, hidden_size]) # hidden should have", "hidden_size, num_layers): super(BehaviorRNN, self).__init__() self.is_recurrent = True self.hidden_size = hidden_size self.n_layers = num_layers", "rnn_type if self.rnn_type == 'rnn': rnn_class = nn.RNN elif self.rnn_type == 'gru': rnn_class" ]
[ "print(\"Must pass a filename argument\") sys.exit(1) in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\")", "must end in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with", "as out_file, open(in_filename, \"rb\") as in_file: for line in in_file: if line.startswith(b\"```dot\"): dot_lines", "in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with open(out_filename, \"wb\")", "in_filename == out_filename: print(\"File must end in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir)", "if in_filename == out_filename: print(\"File must end in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir)", "if dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename = out_dir +", "while True: dot_line = next(in_file) if dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input =", "svg_filename = out_dir + \"/\" + str(file_num) + \".svg\" svg = subprocess.check_output(['dot', '-Tsvg',", "\"/\" + str(file_num) + \".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div", "<reponame>echo80313/grpc<gh_stars>100-1000 #!/usr/bin/env python3 import subprocess import sys import shutil import os if len(sys.argv)", "in_file: for line in in_file: if line.startswith(b\"```dot\"): dot_lines = [] while True: dot_line", "os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\")", "== out_filename: print(\"File must end in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num", "out_filename = in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\") if in_filename == out_filename: print(\"File", "sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with open(out_filename, \"wb\") as out_file,", "dot_line = next(in_file) if dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename", "svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" % (svg_filename.encode('utf-8')))", "print(\"File must end in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1", "in in_file: if line.startswith(b\"```dot\"): dot_lines = [] while True: dot_line = next(in_file) if", "True: dot_line = next(in_file) if dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines)", "as in_file: for line in in_file: if line.startswith(b\"```dot\"): dot_lines = [] while True:", "if len(sys.argv) < 2: print(\"Must pass a filename argument\") sys.exit(1) in_filename = sys.argv[1]", "line.startswith(b\"```dot\"): dot_lines = [] while True: dot_line = next(in_file) if dot_line == b\"```\\n\":", "b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename = out_dir + \"/\" + str(file_num)", "if line.startswith(b\"```dot\"): dot_lines = [] while True: dot_line = next(in_file) if dot_line ==", "sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\") if in_filename == out_filename:", "1 with open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\") as in_file: for line in", "break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename = out_dir + \"/\" + str(file_num) +", "import os if len(sys.argv) < 2: print(\"Must pass a filename argument\") sys.exit(1) in_filename", "in_file: if line.startswith(b\"```dot\"): dot_lines = [] while True: dot_line = next(in_file) if dot_line", "with open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\") as in_file: for line in in_file:", "shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\") as", "dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename = out_dir + \"/\"", "'-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" % (svg_filename.encode('utf-8'))) out_file.write(b\"</div>\\n\") file_num += 1", "= in_filename.replace(\".in.md\", \"\") if in_filename == out_filename: print(\"File must end in .in.md\") sys.exit(1)", "2: print(\"Must pass a filename argument\") sys.exit(1) in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\",", "= sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\") if in_filename ==", "str(file_num) + \".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img", "'-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" % (svg_filename.encode('utf-8'))) out_file.write(b\"</div>\\n\") file_num +=", "if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with open(out_filename, \"wb\") as out_file, open(in_filename,", "= in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\") if in_filename == out_filename: print(\"File must", "+ \"/\" + str(file_num) + \".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input)", "in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\") if in_filename", "shutil import os if len(sys.argv) < 2: print(\"Must pass a filename argument\") sys.exit(1)", "file_num = 1 with open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\") as in_file: for", "svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" % (svg_filename.encode('utf-8'))) out_file.write(b\"</div>\\n\") file_num += 1 else:", "line in in_file: if line.startswith(b\"```dot\"): dot_lines = [] while True: dot_line = next(in_file)", "= next(in_file) if dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename =", "= out_dir + \"/\" + str(file_num) + \".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o',", "out_dir = in_filename.replace(\".in.md\", \"\") if in_filename == out_filename: print(\"File must end in .in.md\")", "out_file, open(in_filename, \"rb\") as in_file: for line in in_file: if line.startswith(b\"```dot\"): dot_lines =", "b\"\".join(dot_lines) svg_filename = out_dir + \"/\" + str(file_num) + \".svg\" svg = subprocess.check_output(['dot',", "= b\"\".join(dot_lines) svg_filename = out_dir + \"/\" + str(file_num) + \".svg\" svg =", "for line in in_file: if line.startswith(b\"```dot\"): dot_lines = [] while True: dot_line =", "+ \".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\"", "\".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" %", "import shutil import os if len(sys.argv) < 2: print(\"Must pass a filename argument\")", "len(sys.argv) < 2: print(\"Must pass a filename argument\") sys.exit(1) in_filename = sys.argv[1] out_filename", "python3 import subprocess import sys import shutil import os if len(sys.argv) < 2:", "sys.exit(1) in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\") if", "#!/usr/bin/env python3 import subprocess import sys import shutil import os if len(sys.argv) <", "import subprocess import sys import shutil import os if len(sys.argv) < 2: print(\"Must", "in_filename.replace(\".in.md\", \"\") if in_filename == out_filename: print(\"File must end in .in.md\") sys.exit(1) if", "dot_lines = [] while True: dot_line = next(in_file) if dot_line == b\"```\\n\": break", "out_filename: print(\"File must end in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num =", "[] while True: dot_line = next(in_file) if dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input", "= [] while True: dot_line = next(in_file) if dot_line == b\"```\\n\": break dot_lines.append(dot_line)", "import sys import shutil import os if len(sys.argv) < 2: print(\"Must pass a", "\"\") if in_filename == out_filename: print(\"File must end in .in.md\") sys.exit(1) if os.path.isdir(out_dir):", "sys import shutil import os if len(sys.argv) < 2: print(\"Must pass a filename", "= 1 with open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\") as in_file: for line", "< 2: print(\"Must pass a filename argument\") sys.exit(1) in_filename = sys.argv[1] out_filename =", "in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\") if in_filename == out_filename: print(\"File must end", "next(in_file) if dot_line == b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename = out_dir", "out_dir + \"/\" + str(file_num) + \".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename],", "\"wb\") as out_file, open(in_filename, \"rb\") as in_file: for line in in_file: if line.startswith(b\"```dot\"):", "pass a filename argument\") sys.exit(1) in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir", "input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" % (svg_filename.encode('utf-8'))) out_file.write(b\"</div>\\n\") file_num += 1 else: out_file.write(line)", ".in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with open(out_filename, \"wb\") as", "dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename = out_dir + \"/\" + str(file_num) + \".svg\"", "= subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" % (svg_filename.encode('utf-8'))) out_file.write(b\"</div>\\n\")", "open(in_filename, \"rb\") as in_file: for line in in_file: if line.startswith(b\"```dot\"): dot_lines = []", "+ str(file_num) + \".svg\" svg = subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\")", "\".md\") out_dir = in_filename.replace(\".in.md\", \"\") if in_filename == out_filename: print(\"File must end in", "os.mkdir(out_dir) file_num = 1 with open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\") as in_file:", "argument\") sys.exit(1) in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\", \"\")", "\"rb\") as in_file: for line in in_file: if line.startswith(b\"```dot\"): dot_lines = [] while", "subprocess import sys import shutil import os if len(sys.argv) < 2: print(\"Must pass", "os if len(sys.argv) < 2: print(\"Must pass a filename argument\") sys.exit(1) in_filename =", "dot_input = b\"\".join(dot_lines) svg_filename = out_dir + \"/\" + str(file_num) + \".svg\" svg", "subprocess.check_output(['dot', '-Tsvg', '-o', svg_filename], input=dot_input) out_file.write(b\"<div align=center>\\n\") out_file.write(b\"<img src='%s'/>\\n\" % (svg_filename.encode('utf-8'))) out_file.write(b\"</div>\\n\") file_num", "filename argument\") sys.exit(1) in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir = in_filename.replace(\".in.md\",", "a filename argument\") sys.exit(1) in_filename = sys.argv[1] out_filename = in_filename.replace(\".in.md\", \".md\") out_dir =", "end in .in.md\") sys.exit(1) if os.path.isdir(out_dir): shutil.rmtree(out_dir) os.mkdir(out_dir) file_num = 1 with open(out_filename,", "open(out_filename, \"wb\") as out_file, open(in_filename, \"rb\") as in_file: for line in in_file: if", "== b\"```\\n\": break dot_lines.append(dot_line) dot_input = b\"\".join(dot_lines) svg_filename = out_dir + \"/\" +" ]
[ "data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self, connector, reason):", "from twisted.internet import reactor, protocol class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self,", "protocol class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said: \",", "said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self,", "def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class", "return EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector, reason):", "data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient()", "def dataReceived(self, data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr):", "self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def", "print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient() def", "def buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop() def", "EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client", "print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client Connection Lost\") reactor.stop() reactor.connectTCP(\"localhost\", 8000,", "self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection", "reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client Connection Lost\") reactor.stop() reactor.connectTCP(\"localhost\", 8000, EchoFactory()) reactor.run()", "import reactor, protocol class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server", "world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self,", "dataReceived(self, data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return", "addr): return EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector,", "buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self,", "clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client Connection Lost\")", "def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client Connection", "connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory):", "Failed\") reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client Connection Lost\") reactor.stop() reactor.connectTCP(\"localhost\", 8000, EchoFactory())", "reactor, protocol class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said:", "EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said: \", data.decode('utf-8')) self.transport.loseConnection()", "class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data): print(\"Server said: \", data.decode('utf-8'))", "<reponame>cybaek/twisted-network-programming-essentials-2nd-edition-python3 from twisted.internet import reactor, protocol class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def", "class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\")", "EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self, connector, reason): print(\"Connection Failed\") reactor.stop()", "reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client Connection Lost\") reactor.stop() reactor.connectTCP(\"localhost\",", "\", data.decode('utf-8')) self.transport.loseConnection() class EchoFactory(protocol.ClientFactory): def buildProtocol(self, addr): return EchoClient() def clientConnectionFailed(self, connector,", "connector, reason): print(\"Connection Failed\") reactor.stop() def clientConnectionLost(self, connector, reason): print(\"Client Connection Lost\") reactor.stop()", "twisted.internet import reactor, protocol class EchoClient(protocol.Protocol): def connectionMade(self): self.transport.write(u'Hello, world!'.encode('utf-8')) def dataReceived(self, data):" ]
[ "a = 2 print(a) b = \"hello\" print(b) c = 123123 print(c) d", "print(b) c = 123123 print(c) d = c print(d) def fun() print(\"*\"*5) fun()", "<gh_stars>0 a = 2 print(a) b = \"hello\" print(b) c = 123123 print(c)", "= \"hello\" print(b) c = 123123 print(c) d = c print(d) def fun()", "b = \"hello\" print(b) c = 123123 print(c) d = c print(d) def", "= 2 print(a) b = \"hello\" print(b) c = 123123 print(c) d =", "\"hello\" print(b) c = 123123 print(c) d = c print(d) def fun() print(\"*\"*5)", "print(a) b = \"hello\" print(b) c = 123123 print(c) d = c print(d)", "2 print(a) b = \"hello\" print(b) c = 123123 print(c) d = c" ]
[ "include_package_data = True, # setuptools-git MUST be installed test_suite = 'tests', install_requires =", "# coding: utf-8 import setuptools setuptools.setup( name = 'Pynames', version = '0.1.0', author", "= open('README.md').read(), include_package_data = True, # setuptools-git MUST be installed test_suite = 'tests',", "'Pynames', version = '0.1.0', author = '<NAME>', author_email = '<EMAIL>', packages = setuptools.find_packages(),", "True, # setuptools-git MUST be installed test_suite = 'tests', install_requires = ['unicodecsv'], #", "= '0.1.0', author = '<NAME>', author_email = '<EMAIL>', packages = setuptools.find_packages(), url =", "'<EMAIL>', packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license = 'LICENSE', description = \"characters'", "= \"characters' name generation library\", long_description = open('README.md').read(), include_package_data = True, # setuptools-git", "long_description = open('README.md').read(), include_package_data = True, # setuptools-git MUST be installed test_suite =", "open('README.md').read(), include_package_data = True, # setuptools-git MUST be installed test_suite = 'tests', install_requires", "author_email = '<EMAIL>', packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license = 'LICENSE', description", "= 'https://github.com/Tiendil/pynames', license = 'LICENSE', description = \"characters' name generation library\", long_description =", "setuptools setuptools.setup( name = 'Pynames', version = '0.1.0', author = '<NAME>', author_email =", "# setuptools-git MUST be installed test_suite = 'tests', install_requires = ['unicodecsv'], # package_data", "setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license = 'LICENSE', description = \"characters' name generation library\",", "version = '0.1.0', author = '<NAME>', author_email = '<EMAIL>', packages = setuptools.find_packages(), url", "= 'LICENSE', description = \"characters' name generation library\", long_description = open('README.md').read(), include_package_data =", "= setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license = 'LICENSE', description = \"characters' name generation", "= True, # setuptools-git MUST be installed test_suite = 'tests', install_requires = ['unicodecsv'],", "setuptools-git MUST be installed test_suite = 'tests', install_requires = ['unicodecsv'], # package_data =", "'<NAME>', author_email = '<EMAIL>', packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license = 'LICENSE',", "installed test_suite = 'tests', install_requires = ['unicodecsv'], # package_data = { '': ['*.json']", "MUST be installed test_suite = 'tests', install_requires = ['unicodecsv'], # package_data = {", "'LICENSE', description = \"characters' name generation library\", long_description = open('README.md').read(), include_package_data = True,", "= 'Pynames', version = '0.1.0', author = '<NAME>', author_email = '<EMAIL>', packages =", "be installed test_suite = 'tests', install_requires = ['unicodecsv'], # package_data = { '':", "\"characters' name generation library\", long_description = open('README.md').read(), include_package_data = True, # setuptools-git MUST", "license = 'LICENSE', description = \"characters' name generation library\", long_description = open('README.md').read(), include_package_data", "= 'tests', install_requires = ['unicodecsv'], # package_data = { '': ['*.json'] } )", "name = 'Pynames', version = '0.1.0', author = '<NAME>', author_email = '<EMAIL>', packages", "packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license = 'LICENSE', description = \"characters' name", "= '<EMAIL>', packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license = 'LICENSE', description =", "coding: utf-8 import setuptools setuptools.setup( name = 'Pynames', version = '0.1.0', author =", "test_suite = 'tests', install_requires = ['unicodecsv'], # package_data = { '': ['*.json'] }", "description = \"characters' name generation library\", long_description = open('README.md').read(), include_package_data = True, #", "'0.1.0', author = '<NAME>', author_email = '<EMAIL>', packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames',", "= '<NAME>', author_email = '<EMAIL>', packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license =", "library\", long_description = open('README.md').read(), include_package_data = True, # setuptools-git MUST be installed test_suite", "'https://github.com/Tiendil/pynames', license = 'LICENSE', description = \"characters' name generation library\", long_description = open('README.md').read(),", "setuptools.setup( name = 'Pynames', version = '0.1.0', author = '<NAME>', author_email = '<EMAIL>',", "url = 'https://github.com/Tiendil/pynames', license = 'LICENSE', description = \"characters' name generation library\", long_description", "generation library\", long_description = open('README.md').read(), include_package_data = True, # setuptools-git MUST be installed", "utf-8 import setuptools setuptools.setup( name = 'Pynames', version = '0.1.0', author = '<NAME>',", "author = '<NAME>', author_email = '<EMAIL>', packages = setuptools.find_packages(), url = 'https://github.com/Tiendil/pynames', license", "import setuptools setuptools.setup( name = 'Pynames', version = '0.1.0', author = '<NAME>', author_email", "name generation library\", long_description = open('README.md').read(), include_package_data = True, # setuptools-git MUST be" ]
[ "packages=['colorchanger', ], license='MIT', description='Reads in an image and swap specified colors ', long_description=open('README.rst').read(),", "Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language ::", ":: MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python", ":: Python :: 3.6', ], keywords='color changer color-changer opencv numpy', install_requires=[ 'click', 'numpy'", "Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='color", "Python :: 3.6', ], keywords='color changer color-changer opencv numpy', install_requires=[ 'click', 'numpy' ],", ":: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python", "3.5', 'Programming Language :: Python :: 3.6', ], keywords='color changer color-changer opencv numpy',", "author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers',", "OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language", ":: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Programming", "classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License ::", "swap specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='color changer", "in an image and swap specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[", "colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5 - Production/Stable',", "5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT", "url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License", "Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved", "'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language", "description='Reads in an image and swap specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer',", "Approved :: MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language ::", "'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming", ":: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python", "long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience", "Language :: Python :: 3.6', ], keywords='color changer color-changer opencv numpy', install_requires=[ 'click',", "'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI", "Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language", ":: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='color changer color-changer opencv", "license='MIT', description='Reads in an image and swap specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>',", ":: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved ::", ":: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python ::", "version='1.0.5', packages=['colorchanger', ], license='MIT', description='Reads in an image and swap specified colors ',", "import setup setup( name='color-changer', version='1.0.5', packages=['colorchanger', ], license='MIT', description='Reads in an image and", "setup( name='color-changer', version='1.0.5', packages=['colorchanger', ], license='MIT', description='Reads in an image and swap specified", "an image and swap specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development", "', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended", "'Programming Language :: Python :: 3.6', ], keywords='color changer color-changer opencv numpy', install_requires=[", "'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.4',", "name='color-changer', version='1.0.5', packages=['colorchanger', ], license='MIT', description='Reads in an image and swap specified colors", "specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5 -", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ],", "License', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5',", "setuptools import setup setup( name='color-changer', version='1.0.5', packages=['colorchanger', ], license='MIT', description='Reads in an image", "MIT License', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python ::", "author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience ::", "], license='MIT', description='Reads in an image and swap specified colors ', long_description=open('README.rst').read(), author='<NAME>',", "Python :: 3.5', 'Programming Language :: Python :: 3.6', ], keywords='color changer color-changer", "and swap specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status ::", "Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python ::", ":: 3.6', ], keywords='color changer color-changer opencv numpy', install_requires=[ 'click', 'numpy' ], )", "image and swap specified colors ', long_description=open('README.rst').read(), author='<NAME>', author_email='<EMAIL>', url='https://github.com/DahlitzFlorian/python-color-changer', classifiers=[ 'Development Status", "setup setup( name='color-changer', version='1.0.5', packages=['colorchanger', ], license='MIT', description='Reads in an image and swap", "- Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License',", "from setuptools import setup setup( name='color-changer', version='1.0.5', packages=['colorchanger', ], license='MIT', description='Reads in an", "Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming", "3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6'," ]
[ "name='TracSoftDueDate', version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins': [ 'softduedate = softduedate', ], },", "from setuptools import setup, find_packages setup( name='TracSoftDueDate', version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins':", "setup, find_packages setup( name='TracSoftDueDate', version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins': [ 'softduedate =", "version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins': [ 'softduedate = softduedate', ], }, )", "import setup, find_packages setup( name='TracSoftDueDate', version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins': [ 'softduedate", "setuptools import setup, find_packages setup( name='TracSoftDueDate', version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins': [", "setup( name='TracSoftDueDate', version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins': [ 'softduedate = softduedate', ],", "find_packages setup( name='TracSoftDueDate', version='1.0', packages=find_packages(exclude=['*.tests*']), entry_points = { 'trac.plugins': [ 'softduedate = softduedate'," ]
[ "http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages()", "+ '://' + parse.netloc try: redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e:", "is_pdf(filename): if filename[-4:] != '.pdf': return False else: return True def get_pdf_title(filename): #", "is_pdf(filename): return None if os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading') request = requests.get(url)", "PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title = info.title if info.title else filename return", "= PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title = info.title if info.title else filename", "request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f: f.write(request.content) return filename.strip()", "return None if os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading') request = requests.get(url) #", "None if os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module", "https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f: f.write(request.content) return filename.strip() def is_pdf(filename): if filename[-4:]", "else: print(filename, 'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f:", "if filename[-4:] != '.pdf': return False else: return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/", "e: print(e, 2) raise if redirect.status_code == 302: url = base_url + redirect.headers['location']", "= base_url + redirect.headers['location'] else: pass filename = url.split('/')[-1] if not is_pdf(filename): return", "with open(filename, 'rb') as f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title", "open(filename, 'rb') as f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title =", "# http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo()", "base_url + redirect.headers['location'] else: pass filename = url.split('/')[-1] if not is_pdf(filename): return None", "parse = urlparse(url) base_url = parse.scheme + '://' + parse.netloc try: redirect =", "urlparse import requests from PyPDF2 import PdfFileReader def download_pdf(url): parse = urlparse(url) base_url", "# https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f: f.write(request.content) return filename.strip() def is_pdf(filename): if", "True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f: pdf = PdfFileReader(f)", "url.split('/')[-1] if not is_pdf(filename): return None if os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading')", "as e: print(e, 2) raise if redirect.status_code == 302: url = base_url +", "import requests from PyPDF2 import PdfFileReader def download_pdf(url): parse = urlparse(url) base_url =", "def is_pdf(filename): if filename[-4:] != '.pdf': return False else: return True def get_pdf_title(filename):", "= url.split('/')[-1] if not is_pdf(filename): return None if os.path.isfile(filename): return filename.strip() else: print(filename,", "!= '.pdf': return False else: return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename,", "PdfFileReader def download_pdf(url): parse = urlparse(url) base_url = parse.scheme + '://' + parse.netloc", "import urlparse import requests from PyPDF2 import PdfFileReader def download_pdf(url): parse = urlparse(url)", "if not is_pdf(filename): return None if os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading') request", "pass filename = url.split('/')[-1] if not is_pdf(filename): return None if os.path.isfile(filename): return filename.strip()", "urlparse(url) base_url = parse.scheme + '://' + parse.netloc try: redirect = requests.get(url, allow_redirects=False)", "raise if redirect.status_code == 302: url = base_url + redirect.headers['location'] else: pass filename", "requests from PyPDF2 import PdfFileReader def download_pdf(url): parse = urlparse(url) base_url = parse.scheme", "return filename.strip() else: print(filename, 'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb')", "requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f: f.write(request.content) return filename.strip() def is_pdf(filename):", "requests.exceptions.ConnectionError as e: print(e, 2) raise if redirect.status_code == 302: url = base_url", "os from urllib.parse import urlparse import requests from PyPDF2 import PdfFileReader def download_pdf(url):", "parse.netloc try: redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e, 2) raise", "def download_pdf(url): parse = urlparse(url) base_url = parse.scheme + '://' + parse.netloc try:", "parse.scheme + '://' + parse.netloc try: redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as", "'wb') as f: f.write(request.content) return filename.strip() def is_pdf(filename): if filename[-4:] != '.pdf': return", "'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f: f.write(request.content) return", "as f: f.write(request.content) return filename.strip() def is_pdf(filename): if filename[-4:] != '.pdf': return False", "else: pass filename = url.split('/')[-1] if not is_pdf(filename): return None if os.path.isfile(filename): return", "'://' + parse.netloc try: redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e,", "else: return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f: pdf", "print(e, 2) raise if redirect.status_code == 302: url = base_url + redirect.headers['location'] else:", "os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename,", "filename.strip() def is_pdf(filename): if filename[-4:] != '.pdf': return False else: return True def", "redirect.status_code == 302: url = base_url + redirect.headers['location'] else: pass filename = url.split('/')[-1]", "redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e, 2) raise if redirect.status_code", "urllib.parse import urlparse import requests from PyPDF2 import PdfFileReader def download_pdf(url): parse =", "from PyPDF2 import PdfFileReader def download_pdf(url): parse = urlparse(url) base_url = parse.scheme +", "try: redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e, 2) raise if", "print(filename, 'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f: f.write(request.content)", "info = pdf.getDocumentInfo() pdf.getNumPages() title = info.title if info.title else filename return title.strip()", "'.pdf': return False else: return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb')", "if redirect.status_code == 302: url = base_url + redirect.headers['location'] else: pass filename =", "not is_pdf(filename): return None if os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading') request =", "= requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e, 2) raise if redirect.status_code ==", "requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e, 2) raise if redirect.status_code == 302:", "'rb') as f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title = info.title", "if os.path.isfile(filename): return filename.strip() else: print(filename, 'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with", "f.write(request.content) return filename.strip() def is_pdf(filename): if filename[-4:] != '.pdf': return False else: return", "import PdfFileReader def download_pdf(url): parse = urlparse(url) base_url = parse.scheme + '://' +", "filename = url.split('/')[-1] if not is_pdf(filename): return None if os.path.isfile(filename): return filename.strip() else:", "as f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title = info.title if", "302: url = base_url + redirect.headers['location'] else: pass filename = url.split('/')[-1] if not", "filename.strip() else: print(filename, 'downloading') request = requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as", "def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f: pdf = PdfFileReader(f) info", "base_url = parse.scheme + '://' + parse.netloc try: redirect = requests.get(url, allow_redirects=False) except", "get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f: pdf = PdfFileReader(f) info =", "= urlparse(url) base_url = parse.scheme + '://' + parse.netloc try: redirect = requests.get(url,", "redirect.headers['location'] else: pass filename = url.split('/')[-1] if not is_pdf(filename): return None if os.path.isfile(filename):", "return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f: pdf =", "open(filename, 'wb') as f: f.write(request.content) return filename.strip() def is_pdf(filename): if filename[-4:] != '.pdf':", "return False else: return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as", "from urllib.parse import urlparse import requests from PyPDF2 import PdfFileReader def download_pdf(url): parse", "= requests.get(url) # https://stackoverflow.com/questions/34503412/download-and-save-pdf-file-with-python-requests-module with open(filename, 'wb') as f: f.write(request.content) return filename.strip() def", "f: pdf = PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title = info.title if info.title", "download_pdf(url): parse = urlparse(url) base_url = parse.scheme + '://' + parse.netloc try: redirect", "f: f.write(request.content) return filename.strip() def is_pdf(filename): if filename[-4:] != '.pdf': return False else:", "+ parse.netloc try: redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e, 2)", "url = base_url + redirect.headers['location'] else: pass filename = url.split('/')[-1] if not is_pdf(filename):", "allow_redirects=False) except requests.exceptions.ConnectionError as e: print(e, 2) raise if redirect.status_code == 302: url", "False else: return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with open(filename, 'rb') as f:", "= parse.scheme + '://' + parse.netloc try: redirect = requests.get(url, allow_redirects=False) except requests.exceptions.ConnectionError", "+ redirect.headers['location'] else: pass filename = url.split('/')[-1] if not is_pdf(filename): return None if", "filename[-4:] != '.pdf': return False else: return True def get_pdf_title(filename): # http://www.blog.pythonlibrary.org/2018/04/10/extracting-pdf-metadata-and-text-with-python/ with", "pdf = PdfFileReader(f) info = pdf.getDocumentInfo() pdf.getNumPages() title = info.title if info.title else", "== 302: url = base_url + redirect.headers['location'] else: pass filename = url.split('/')[-1] if", "except requests.exceptions.ConnectionError as e: print(e, 2) raise if redirect.status_code == 302: url =", "import os from urllib.parse import urlparse import requests from PyPDF2 import PdfFileReader def", "2) raise if redirect.status_code == 302: url = base_url + redirect.headers['location'] else: pass", "with open(filename, 'wb') as f: f.write(request.content) return filename.strip() def is_pdf(filename): if filename[-4:] !=", "PyPDF2 import PdfFileReader def download_pdf(url): parse = urlparse(url) base_url = parse.scheme + '://'", "return filename.strip() def is_pdf(filename): if filename[-4:] != '.pdf': return False else: return True" ]
[ "'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb =", "infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr)) #", "real run time data by simply grepping out the data like this: $", "but the smallest one must have more than 2 entries. Typically you would", "runtime) with 95% confidence. # Both runs have 50 samples. # The data", "3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for internal computations. You should", "v1.1 v1.2 # ======= ======= 1 119.041 117.038 2 119.670 119.733 3 120.675", "now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix, lineno, msg)) def info(msg,", "======= 1 119.041 117.038 2 119.670 119.733 3 120.675 118.346 4 118.628 117.261", "fn, col): ''' Read column data from the file. ''' ds = []", "py = y # remember the previous height return total_area def binary_search_for_z(probability, tolerance,", "the probability density function (PDF) at x for a student-t distribution with dof", "the better the estimate is at the cost of performance. ''' assert x2", "stderr and exit. ''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) # ================================================================ # #", "minv = -maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts,", "argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level", "the end. parser.add_argument('FILES', nargs='+', help='''The files with the run time data. The data", "95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%. As you can", "* sa2qna**2 dof_denb = (1. / (nb + 1.)) * sb2qnb**2 dof =", "math.log(x1) x3 = x1 - x2 x4 = c0 x5 = float(x) for", "1 line = line.strip() tokens = line.split() if len(tokens) < col: continue token", "proper range. def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser, args, values, option_string=None): if", "1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) # standard deviations stddeva", "standard normal distribution (SND) threshold. When the number of effective degrees of freedom", "observations from both runs in two different files so we can use cmpds.py", "epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such that 0", "len(tokens) < col: continue token = tokens[col-1] try: f = float(token) if f", "whether v1.2 is faster. The table below shows sample data 10 runs for", "calculation simpler. # ================================================================ # # Message utility functions. # # ================================================================ def", "and blank spaces. You can see the ignored data in verbose mode. If", "= float(x) + 5.5 x2 = (float(x) + 0.5) * math.log(x1) x3 =", "value for x at the start of the interval. The accumulation of the", "data points at column {}, found {}, need at least 3 in file:", "columns that define each dataset. The first column is for the first dataset.", "1.)) * sb2qnb**2 dof = (dof_num / (dof_dena + dof_denb)) - 2.0 infov(opts,", "1.1%. As you can see, dataset-2 (v1.2) is slightly faster. Note that we", "see, dataset-2 (v1.2) is slightly faster. Note that we use -k to specify", "is for the first dataset. The second column is for the second dataset.", "top = maxtop bot = 0.0 diff = tolerance * 2 # start", "threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts,", "100. if significant: per = 100. * abs(md) / ma infov(opts, 'percentage: {}'.format(per))", "-86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015 c1 = 2.5066282746310005 x1", "md - z * sdmd club = md + z * sdmd infov(opts,", "{}'.format(per)) if club < 0: print('With {:.1f}% confidence, dataset-2 is larger than dataset-1", "f < 0.0001: # avoid divide by 0 errors if opts.verbose > 1:", "import argparse import datetime import inspect import math import os import sys #VERSION='0.1'", "width) / 2.0 # adjustment based on height change total_area += rectangle_area +", "determine significance. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%). The", "run time data. The data must be organized in columns with one entry", "{:.3f}'.format(sdmd)) # effective degrees of freedom dof_num = (sa2qna + sb2qnb)**2 dof_dena =", "dataset. The second column is for the second dataset. If the value in", "we can use cmpds.py to figure out whether v2 is faster than v1", "the cost of performance. ''' assert x2 > x1 # just a sanity", "dof_num = (sa2qna + sb2qnb)**2 dof_dena = (1. / (na + 1.)) *", "degrees of freedom (DOF) exceeds this threshold, the SND is used instead of", "diff > tolerance: mid = bot + ((top - bot) / 2.0) z", "x5 += 1.0 x4 += c[i] / x5 x6 = math.log((c1 * x4)", "help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments at the end.", "using trapezoidal approximation. It breaks the interval between x1 and x2 into trapezoids", "''' Analyze unpaired observations to determine whether they are significantly different. ''' cl", "opts.verbose > 1: info('skipping line {} in {}: number is too small {}'.format(ln,", "2 and the second dataset is in column 3 of the same file.", "crosses_zero = cllb < 0 < club significant = not crosses_zero infov(opts, 'crosses", "or memory used. The size of the datasets can be different because we", "reverse the columns, you will get the opposite result: $ ./cmpds.py -c 0.95", "= read_file(opts, af, ac) b = read_file(opts, bf, bc) ttest(a, b, opts) if", "-c 0.95 -k 2 3 data.txt With 95.0% confidence, dataset-2 is smaller than", "diff = tolerance * 2 # start the loop while diff > tolerance:", "$ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by", "headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <DATASET-1>", "/ (dof_dena + dof_denb)) - 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof))", "if len(tokens) < col: continue token = tokens[col-1] try: f = float(token) if", "x2 return y def pdf_nd(x, s=1.0, u=0.0): ''' Calculate the probability density function", "a * b, [float(i) for i in range(1, int(x))]) # Lanczos approximation, page", "if opts.verbose > 1 else False if dofr > opts.snd_threshold: # use standard", "dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, there is no significant", "degrees of freedom. This is basically the height of the curve at x.", "printing messages. ''' lineno = inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} -", "{}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis: {}'.format(significant)) # Report the result. clp =", "= math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean difference", "an example to make sense of it all. We want to compare two", "- cl) / 2. q = cl + x infov(opts, '{:.3f}-quantile of t-variate", "3.32335097045 gamma(4) = 6.0 ''' if (x - int(x)) == 0: # Optimization", "areas provides an estimate of the area under the curve. The greater the", "nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that define each dataset. The first", "effective degrees of freedom dof_num = (sa2qna + sb2qnb)**2 dof_dena = (1. /", "with 95% confidence. # The dataset is used. $ ./gends.py 10 100 120", "< 3: err('too few data points at column {}, found {}, need at", "2)) / float(dof)) x4 = float((dof + 1)) / 2.0 x5 = x3", "are created by tools like /usr/bin/time) so they can be used in a", "{:.1f}% confidence, there is no significant difference between the datasets.'.format(clp)) # ================================================================ #", "described in detail here: https://github.com/jlinoff/ztables. To determine significance, you specify the confidence level", "#VERSION='0.1' # Initial load. VERSION='0.2' # Made the std dev calculation simpler. #", "methodology for unpaired observations. Please note that this is not, strictly, a t-test", "# # ================================================================ def gamma(x): ''' Gamma function. Uses the Lanczos approximation and", "= 6.0 ''' if (x - int(x)) == 0: # Optimization for integer", "mid, z, q)) if probability < cp: # It is to the right.", "Each dataset contains a series of numbers to be compared. The numbers must", "1 # another sanity check total_area = 0.0 width = (float(x2) - float(x1))", "different versions of software. The datasets are completely independent of the program (i.e.", "+ x infov(opts, '{:.3f}-quantile of t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr,", "an info message to stdout. ''' _msg('INFO', f+1, msg) def infov(opts, msg, f=1):", "================================================================ # # Main # # ================================================================ def main(): opts = getopts() af", "simple example: $ /usr/bin/time -p sleep 0.3 real 0.30 user 0.00 sys 0.00", "and collects it if the token is a floating point number. When the", "-c is already reserved for specifying the confidence level. If you reverse the", "that we use -k to specify the columns because -c is already reserved", "observations to determine whether they are significantly different. ''' cl = opts.conf infov(opts,", "if opts.verbose > 0: _msg('INFO', f+1, msg) def warn(msg, f=1): ''' Write a", "the areas provides an estimate of the area under the curve. The greater", "stddevb**2 / nb sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of the mean", "# # Num v1.1 v1.2 # === ======= ======= 1 119.041 117.038 2", "f+1, msg) def warn(msg, f=1): ''' Write a warning message to stdout. '''", "the interval between x1 and x2 into trapezoids whose width is fixed (proportional", "= 1.0 + (float((x ** 2)) / float(dof)) x4 = float((dof + 1))", "the confidence level that you want to use to determine significance. Typical confidence", "# Message utility functions. # # ================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout): '''", "action='count', default=0, help='''Increase the level of verbosity. Specify -v to see the values", "used. The size of the datasets can be different because we are treating", "version is faster than the first for the same inputs. The versions are", "each line and collects it if the token is a floating point number.", "opts.cols[0] < 1: parser.error('column 1 must be greater then 0') if opts.cols[1] <", "opts.verbose > 1 else False if dofr > opts.snd_threshold: # use standard normal", "at column {}, found {}, need at least 3 in file: {}'.format(col, len(ds),", "{} degrees of freedom: {:.2f}'.format(q, dofr, z)) cllb = md - z *", "x1 and x2 into trapezoids whose width is fixed (proportional to how the", "+= width # advance to the next edge py = y # remember", "blackbox-v1 50 times and collecting the timing output to a file and then", "because, by default, the tool looks at the first token on each line", "total_area = 0.0 width = (float(x2) - float(x1)) / float(intervals) x = float(x1)", "Read file data. # # ================================================================ def read_file(opts, fn, col): ''' Read column", "> tolerance: mid = bot + ((top - bot) / 2.0) z =", "50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2 is smaller than", "> ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there is no significant", "file but normally it is easier to have it exist in two separate", "{}'.format(significant)) # Report the result. clp = cl * 100. if significant: per", "z = 0.0 adjustment = float(maxtop) / 2.0 top = maxtop bot =", "useful for determining whether runtime or memory use has changed between two different", "#REFERENCES: # <NAME> (1991). \"The Art Computer Systems Performance Analysis\", <NAME>iley and Sons,", "1 must be greater then 0') if len(opts.FILES) > 2: parser.error('only 1 or", "at least 3 in file: {}'.format(col, len(ds), fn)) return ds # ================================================================ #", "In this case, the first dataset is in column 2 and the second", "the loop while diff > tolerance: mid = bot + ((top - bot)", "desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $", "-x4 y = (x1 * x5) / x2 return y def pdf_nd(x, s=1.0,", "means ma = sum(a) / na mb = sum(b) / nb infov(opts, 'mean", "return y def pdf_snd(x): ''' Calculate the probability density function (PDF) for a", "messages. ''' lineno = inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now,", "cp: # It is to the right. top = mid elif probability >", "# effective degrees of freedom dof_num = (sa2qna + sb2qnb)**2 dof_dena = (1.", "and exit': 'Show this help message and exit.\\n ', } return lookup.get(s, s)", "%03d\\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done We can now", "type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such that 0 < c <", "in detail here: https://github.com/jlinoff/ztables. To determine significance, you specify the confidence level that", "q={}'.format( probability, cp, tolerance, maxtop, minval, iterations, top, bot, mid, z, q)) if", "af ac = opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2", "maxtop assert bot >= 0 return z # ================================================================ # # t-test implementation", "= 2 * (s ** 2) den = s * math.sqrt(2 * math.pi)", "the columns because -c is already reserved for specifying the confidence level. If", "1.3%. That tells us that v2 is indeed slightly faster. ''' # License:", "Please note that this is not, strictly, a t-test because it switches over", "collected for v1.1 and v1.2. # # Num v1.1 v1.2 # === =======", "Made the std dev calculation simpler. # ================================================================ # # Message utility functions.", "{}'.format(col, len(ds), fn)) return ds # ================================================================ # # Main # # ================================================================", "hypothesis: {}'.format(significant)) # Report the result. clp = cl * 100. if significant:", "on each line and collects it if the token is a floating point", "distribution) u = mean (0 for a standard normal distribution) This is the", "mid else: break # Sanity checks. assert top <= maxtop assert bot >=", "should never need to change these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int,", "2 - datasets in separate files A more realistic example would be running", "stddeva = math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b:", "is to the right. top = mid elif probability > cp: # It", "maxtop, minval, iterations, v, fct, *args): ''' Get the z value that matches", "mean difference md = ma - mb infov(opts, 'mean diff: {:.3f}'.format(md)) # standard", "cp: # It is to the left. bot = mid else: break #", "function value for x at the start of the interval. The accumulation of", "[OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example", "= area_under_curve(minval, z, iterations, fct, *args) cp = 1.0 - (2.0 * (1.0", "# another sanity check total_area = 0.0 width = (float(x2) - float(x1)) /", "as shown in this simple example: $ /usr/bin/time -p sleep 0.3 real 0.30", "interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb < 0", "{:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) # standard deviations stddeva = math.sqrt(vara) stddevb =", "default is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the level of verbosity.", "if (x - int(x)) == 0: # Optimization for integer values: (x-1)!. return", "program takes about 2 minutes to run (120 seconds) and we want to", "# # ================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout): ''' Base for printing messages.", "_msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) # ================================================================ # # Statistical utility functions. #", "1 else False if dofr > opts.snd_threshold: # use standard normal distribution (SND)", "# # Options # # ================================================================ def getopts(): ''' Get the command line", "# Report the result. clp = cl * 100. if significant: per =", "lines and lines where the token is not a floating point number are", "10 119.134 118.049 $ {0} --cols 2 3 data.txt With 95.0% confidence, dataset-2", "default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that define each dataset. The first column is", "column 3 of the same file. Blank lines and lines where the token", "# It is to the right. top = mid elif probability > cp:", "normal distribution) This is the height of the curve at x. It is", "approximation. It breaks the interval between x1 and x2 into trapezoids whose width", "Binary search to find the closest value. z = 0.0 adjustment = float(maxtop)", "rectangle is the pdf function value for x at the start of the", "= gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0]", "{}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval:", "EXAMPLES: # Example 1: help $ {0} -h # Example 2: No significant", "= stddeva**2 / na sb2qnb = stddevb**2 / nb sdmd = math.sqrt(sa2qna +", "elements'.format(opts.snd_threshold)) return opts # ================================================================ # # Read file data. # # ================================================================", "capture the real run time data by simply grepping out the data like", "estimate is at the cost of performance. ''' assert x2 > x1 #", "in a black box testing environment. Each dataset contains a series of numbers", "DOF (rounded): {}'.format(dofr)) # confidence interval for the mean difference z = 0.0", "'internal lower bound: {}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals))", "to determine whether the datasets differ. Typical confidence levels 0.90 (90%), 0.95 (95%)", "there is no significant difference between the datasets.'.format(clp)) # ================================================================ # # Options", "built-in headers. # Unfortunately I can't get rid of the \":\" reliably. def", "2 entries. Typically you would like to have at least 50 entries in", "reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit.", "in separate files A more realistic example would be running a program called", "float(token) if f < 0.0001: # avoid divide by 0 errors if opts.verbose", "at the start of the interval. The accumulation of the areas provides an", "- {}\\n'.format(now, prefix, lineno, msg)) def info(msg, f=1): ''' Write an info message", "ups are necessary. The methodology used to calculate the z-value is described in", "https://github.com/jlinoff/ztables for background. # # ================================================================ def gamma(x): ''' Gamma function. Uses the", "return y def pdf_nd(x, s=1.0, u=0.0): ''' Calculate the probability density function (PDF)", "Both runs have 50 samples. # The data is specifically generated to show", "print('With {:.1f}% confidence, there is no significant difference between the datasets.'.format(clp)) # ================================================================", "assume that the data is stored in a single file but normally it", "of intervals the better the estimate is at the cost of performance. '''", "# trapezoid area x += width # advance to the next edge py", "The default is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the level of", "infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a", "the token is not a floating point number are ignored. Here is what", "ignored. Here is what the run looks like: $ ./cmpds.py -c 0.95 -k", "; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ;", "standard deviation (1 for a standard normal distribution) u = mean (0 for", "is stored in a single file but normally it is easier to have", "the run looks like: $ ./cmpds.py -c 0.95 -k 2 3 data.txt With", "bot + ((top - bot) / 2.0) z = mid - adjustment q", "117.545 7 120.539 119.751 8 118.880 119.042 9 120.164 116.203 10 119.134 118.049", "a standard normal distribution. s = standard deviation (1 for a standard normal", "x5 = float(x) for i in range(6): x5 += 1.0 x4 += c[i]", "to the left. bot = mid else: break # Sanity checks. assert top", "= ma - mb infov(opts, 'mean diff: {:.3f}'.format(md)) # standard deviation of the", "per line. Non-numeric data is ignored which allows you to add comments and", "# Num v1.1 v1.2 # === ======= ======= 1 119.041 117.038 2 119.670", "v, fct, *args): ''' Get the z value that matches the specified percentage.", "= 1.0 - (2.0 * (1.0 - q)) diff = abs(cp - probability)", "def pdf_snd(x): ''' Calculate the probability density function (PDF) for a standard normal", "''' Get the command line options using argparse. ''' # Make sure that", "Trick to capitalize the built-in headers. # Unfortunately I can't get rid of", "description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such", "Dataset-1 and dataset-2 are in the same file. $ cat data.txt # v1.1", "%(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the level of verbosity. Specify -v", "instead of a t-distribution. The default is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0,", "larger than 32. It is really useful for determining whether runtime or memory", "120.164 116.203 10 119.134 118.049 For this example we assume that the data", "significant difference between them for a specific confidence level using the t-test methodology", "\":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL", "called blackbox-v1 50 times and collecting the timing output to a file and", "= opts.internal[1] ub = opts.internal[2] intervals = int(opts.internal[3]) maxv = 2 * round(abs(lb)", "* sb2qnb**2 dof = (dof_num / (dof_dena + dof_denb)) - 2.0 infov(opts, 'effective", "float(x)) x7 = -x3 + x6 # ln(gamma(x)) g = math.exp(x7) return g", "math.e ** ( -dx2 / xden ) y = exp / den return", "previous height return total_area def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args):", "def err(msg, f=1): ''' Write an error message to stderr and exit. '''", "/usr/bin/time) so they can be used in a black box testing environment. Each", "8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 For this example we", "$ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2 is smaller than", "runs have 50 samples. # The data is specifically generated to show the", "(DOF) is larger than 32. It is really useful for determining whether runtime", "mean difference z = 0.0 # allow the user to play with the", "99.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%. # Example 5:", "range(intervals): y = float(fct(x, *args, **kwargs)) rectangle_area = width * y # area", "- datasets in separate files A more realistic example would be running a", "2) den = s * math.sqrt(2 * math.pi) exp = math.e ** (", "data is stored in a single file but normally it is easier to", "return ds # ================================================================ # # Main # # ================================================================ def main(): opts", "6.0 ''' if (x - int(x)) == 0: # Optimization for integer values:", "msg = 'argument \"{}\" out of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick", "< 1.0: setattr(args, self.dest, values) else: msg = 'argument \"{}\" out of range", "mode. If only one file is specified, is used for both datasets. ''')", "lower bound: {}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts,", "line in ifp.readlines(): ln += 1 line = line.strip() tokens = line.split() if", "the columns, you will get the opposite result: $ ./cmpds.py -c 0.95 -k", "each dataset. You must specify the confidence level that you want to use", "ma - mb infov(opts, 'mean diff: {:.3f}'.format(md)) # standard deviation of the mean", "z={}, q={}'.format( probability, cp, tolerance, maxtop, minval, iterations, top, bot, mid, z, q))", "the command line options using argparse. ''' # Make sure that the confidence", "** ( -dx2 / xden ) y = exp / den return y", "infov(opts, 'effective DOF (rounded): {}'.format(dofr)) # confidence interval for the mean difference z", "slightly faster. Note that we use -k to specify the columns because -c", "the parameters t = opts.internal[0] lb = opts.internal[1] ub = opts.internal[2] intervals =", "-h # Example 2: No significant difference with 95% confidence. # The dataset", "The data is specifically generated to show the difference. $ ./gends.py 50 110", "with the parameters t = opts.internal[0] lb = opts.internal[1] ub = opts.internal[2] intervals", "2 for xa in a]) / float(na - 1.) varb = sum([(xb -", "type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that define each dataset. The first column", "default=0, help='''Increase the level of verbosity. Specify -v to see the values that", "effective degrees of freedom. No table look ups are necessary. EXAMPLE 1 -", "and natural logarithms. For integer values of x we can use the exact", "nb sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd))", "-w ^real /tmp/v1.out > /tmp/v1.ds $ grep -w ^real /tmp/v2.out > /tmp/v2.ds The", "except IOError: err('could not read file: {}'.format(fn)) if len(ds) < 3: err('too few", "iterations, fct, *args) cp = 1.0 - (2.0 * (1.0 - q)) diff", "exp / den return y def pdf_snd(x): ''' Calculate the probability density function", "The accumulation of the areas provides an estimate of the area under the", "breaks the interval between x1 and x2 into trapezoids whose width is fixed", "with one entry per line. Non-numeric data is ignored which allows you to", "''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors", "number of effective degrees of freedom. No table look ups are necessary. The", "print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per)) else:", "116.203 10 119.134 118.049 $ {0} --cols 2 3 data.txt With 95.0% confidence,", "* math.pi) exp = math.e ** - (dx2 / 2) y = exp", "columns with one entry per line. Non-numeric data is ignored which allows you", "30: parser.error('it does not make sense to use SND for {} elements'.format(opts.snd_threshold)) return", "shows sample data 10 runs for each version. # Run time data collected", "faster. Note that we use -k to specify the columns because -c is", "abs(cp - probability) if v: info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={},", "ln(gamma(x)) g = math.exp(x7) return g def pdf_t(x, dof): ''' Calculate the probability", "Read column data from the file. ''' ds = [] try: with open(fn,", "version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments at", "column to collect. In this case, the first dataset is in column 2", "def gettext(s): lookup = { 'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional", "help='''The confidence level such that 0 < c < 1. The default is", "the estimate is at the cost of performance. ''' assert x2 > x1", "a floating point number are ignored. Here is what the run looks like:", "help='''The standard normal distribution (SND) threshold. When the number of effective degrees of", "-v to internal details about the z value lookup and values that were", "112 > ds-50-110-112.txt $ ./gends.py 50 108 112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt", "Lanczos approximation, page 214 of Numerical Recipes in C. c = [76.18009172947146, -86.50532032941677,", "z # ================================================================ # # t-test implementation # # ================================================================ def ttest(a, b,", "parser.error('column 1 must be greater then 0') if len(opts.FILES) > 2: parser.error('only 1", "Copyright (c) 2016 by <NAME> #REFERENCES: # <NAME> (1991). \"The Art Computer Systems", "def getopts(): ''' Get the command line options using argparse. ''' # Make", "/ 2) y = exp / den return y def area_under_curve(x1, x2, intervals,", "which column to collect. In this case, the first dataset is in column", "whether runtime or memory use has changed between two different versions of software.", "The above command takes advantage of the fact that posix time format (-p)", "the first for the same inputs. The versions are 1.1 and 1.2. The", "import datetime import inspect import math import os import sys #VERSION='0.1' # Initial", "versions of the foobar program to see if the second version is faster", "math import os import sys #VERSION='0.1' # Initial load. VERSION='0.2' # Made the", "/ (na + 1.)) * sa2qna**2 dof_denb = (1. / (nb + 1.))", "{:.3f}'.format(mb)) # variances vara = sum([(xa - ma) ** 2 for xa in", "95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.3%. That tells us", "{}'.format(nb)) # means ma = sum(a) / na mb = sum(b) / nb", "= 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) = 6.0 ''' if (x - int(x))", "119.042 9 120.164 116.203 10 119.134 118.049 $ {0} --cols 2 3 data.txt", "= float(x1) py = float(fct(x, *args, **kwargs)) for i in range(intervals): y =", "sys.exit(1) # ================================================================ # # Statistical utility functions. # See https://github.com/jlinoff/ztables for background.", "confidence interval for the mean difference z = 0.0 # allow the user", "info message to stdout. ''' _msg('INFO', f+1, msg) def infov(opts, msg, f=1): '''", "the z-value is described in detail here: https://github.com/jlinoff/ztables. To determine significance, you specify", "to internal details about the z value lookup and values that were discarded", "have it exist in two separate files because, by default, the tool looks", "(-p) outputs the time data on 3 separate lines as shown in this", "With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%. As you", "exit. ''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) # ================================================================ # # Statistical utility", "up the computation. Specify -v -v to internal details about the z value", "look ups are necessary. The methodology used to calculate the z-value is described", "'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2", "2 data.txt With 95.0% confidence, dataset-2 is larger than dataset-1 by about 1.1%.", "-1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015 c1 = 2.5066282746310005 x1 = float(x)", "1.0) / 2.0) x2 = math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0)) x3", "be compared. The numbers must be greater than 0. That is a reasonable", "by <NAME> #REFERENCES: # <NAME> (1991). \"The Art Computer Systems Performance Analysis\", <NAME>iley", "by about 1.1%. EXAMPLE 2 - datasets in separate files A more realistic", "and Sons, New York. import argparse import datetime import inspect import math import", "now capture the real run time data by simply grepping out the data", "Systems Performance Analysis\", <NAME>iley and Sons, New York. import argparse import datetime import", "dataset-2 is smaller than dataset-1 by about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser", "It is exactly the same as pdf_nd(x, 1, 0) but is somewhat more", "'{:.3f}-quantile of t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr, z)) cllb =", "same inputs. The versions are 1.1 and 1.2. The program takes about 2", "'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean difference md = ma", "> 1: info('skipping line {} in {}: not a number: {}'.format(ln, fn, token))", "exp / den return y def area_under_curve(x1, x2, intervals, fct, *args, **kwargs): '''", "left. bot = mid else: break # Sanity checks. assert top <= maxtop", "= math.sqrt(2 * math.pi) exp = math.e ** - (dx2 / 2) y", "total_area def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args): ''' Get the", "# confidence interval for the mean difference z = 0.0 # allow the", "(x - int(x)) == 0: # Optimization for integer values: (x-1)!. return reduce(lambda", "the smallest one must have more than 2 entries. Typically you would like", "maxv = 2 * round(abs(lb) + ub + 0.5, 0) minv = -maxv", "dataset-1 by about 1.3%. That tells us that v2 is indeed slightly faster.", "That is a reasonable constraint given that they typically represent something like elapsed", "f=1): ''' Write an info message to stdout. ''' _msg('INFO', f+1, msg) def", "= (float(x) + 0.5) * math.log(x1) x3 = x1 - x2 x4 =", "x4 += c[i] / x5 x6 = math.log((c1 * x4) / float(x)) x7", "Gamma function. Uses the Lanczos approximation and natural logarithms. For integer values of", "a student-t distribution with dof degrees of freedom. This is basically the height", "bot = mid else: break # Sanity checks. assert top <= maxtop assert", "119.670 119.733 3 120.675 118.346 4 118.628 117.261 5 120.363 118.863 6 118.076", "with 99% confidence. # Both runs have 50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt", "False if dofr > opts.snd_threshold: # use standard normal distribution (SND) infov(opts, 'use", "The data must be organized in columns with one entry per line. Non-numeric", "1: info('skipping line {} in {}: not a number: {}'.format(ln, fn, token)) continue", "# <NAME> (1991). \"The Art Computer Systems Performance Analysis\", <NAME>iley and Sons, New", "t-test implementation # # ================================================================ def ttest(a, b, opts): ''' Analyze unpaired observations", "parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") #", "you specify the confidence level that you want to use to determine significance.", "check total_area = 0.0 width = (float(x2) - float(x1)) / float(intervals) x =", "utility functions. # See https://github.com/jlinoff/ztables for background. # # ================================================================ def gamma(x): '''", "must have more than 2 entries. Typically you would like to have at", "q = area_under_curve(minval, z, iterations, fct, *args) cp = 1.0 - (2.0 *", "rid of the \":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:',", "Initial load. VERSION='0.2' # Made the std dev calculation simpler. # ================================================================ #", "(has faster runtime) with 99% confidence. # Both runs have 50 samples. $", "10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for internal computations. You should never", "{}'.format(ln, fn, token)) continue except IOError: err('could not read file: {}'.format(fn)) if len(ds)", "-f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time", "def ttest(a, b, opts): ''' Analyze unpaired observations to determine whether they are", "{:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na", "level and the number of effective degrees of freedom. No table look ups", "Source # Copyright (c) 2016 by <NAME> #REFERENCES: # <NAME> (1991). \"The Art", "while diff > tolerance: mid = bot + ((top - bot) / 2.0)", "advantage of the fact that posix time format (-p) outputs the time data", "def main(): opts = getopts() af = opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES)", "We want to compare two versions of the foobar program to see if", "column 2 and the second dataset is in column 3 of the same", "is somewhat more efficient. ''' dx2 = float(x) ** 2 den = math.sqrt(2", "(sa2qna + sb2qnb)**2 dof_dena = (1. / (na + 1.)) * sa2qna**2 dof_denb", "look ups are necessary. EXAMPLE 1 - two datasets in one file Here", "EXAMPLE 1 - two datasets in one file Here is an example to", "percentage. ''' # Binary search to find the closest value. z = 0.0", "dofr, z)) cllb = md - z * sdmd club = md +", "by tools like /usr/bin/time) so they can be used in a black box", "./cmpds.py -c 0.95 -k 3 2 data.txt With 95.0% confidence, dataset-2 is larger", "Statistical utility functions. # See https://github.com/jlinoff/ztables for background. # # ================================================================ def gamma(x):", "$ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i", "f+1, msg) def err(msg, f=1): ''' Write an error message to stderr and", "t = opts.internal[0] lb = opts.internal[1] ub = opts.internal[2] intervals = int(opts.internal[3]) maxv", "minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v = True if opts.verbose > 1", "given that they typically represent something like elapsed time or memory used. The", "sure that the confidence level is in the proper range. def get_conf_level(): class", "(has faster runtime) with 95% confidence. # Both runs have 50 samples. #", "sys #VERSION='0.1' # Initial load. VERSION='0.2' # Made the std dev calculation simpler.", "= r''' EXAMPLES: # Example 1: help $ {0} -h # Example 2:", "the computation. Specify -v -v to internal details about the z value lookup", "want to use to determine significance. Typical confidence levels 0.90 (90%), 0.95 (95%)", "parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The", "data. The data must be organized in columns with one entry per line.", "here: https://github.com/jlinoff/ztables. To determine significance, you specify the confidence level that you want", "separate files because, by default, the tool looks at the first token on", "int(x))]) # Lanczos approximation, page 214 of Numerical Recipes in C. c =", "curve at x. It is exactly the same as pdf_nd(x, 1, 0) but", "constraint given that they typically represent something like elapsed time or memory used.", "a sanity check assert intervals > 1 # another sanity check total_area =", "''' assert dof > 2 x1 = gamma((float(dof) + 1.0) / 2.0) x2", "number it is ignored. The default is column 1 for both datasets. ''')", "column is for the first dataset. The second column is for the second", "matches the specified percentage. ''' # Binary search to find the closest value.", "x. It is exactly the same as pdf_nd(x, 1, 0) but is somewhat", "*args, **kwargs)) for i in range(intervals): y = float(fct(x, *args, **kwargs)) rectangle_area =", "g def pdf_t(x, dof): ''' Calculate the probability density function (PDF) at x", "z, q)) if probability < cp: # It is to the right. top", "Optimization for integer values: (x-1)!. return reduce(lambda a, b: a * b, [float(i)", "opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES) == 2 else af ac = opts.cols[0]", "$ grep -w ^real /tmp/v2.out > /tmp/v2.ds The above command takes advantage of", "def infov(opts, msg, f=1): ''' Write an info message to stdout. ''' if", "math.sqrt(2 * math.pi) exp = math.e ** - (dx2 / 2) y =", "by about 0.8%. # Example 5: Dataset-1 and dataset-2 are in the same", "gettext(s): lookup = { 'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments':", "# # ================================================================ def read_file(opts, fn, col): ''' Read column data from the", "# Main # # ================================================================ def main(): opts = getopts() af = opts.FILES[0]", "simpler. # ================================================================ # # Message utility functions. # # ================================================================ def _msg(prefix,", "collecting its output. Here is how you might do it: $ rm -f", "(PDF) for a standard normal distribution. s = standard deviation (1 for a", "= sum([(xb - mb) ** 2 for xb in b]) / float(nb -", "_msg('WARNING', f+1, msg) def err(msg, f=1): ''' Write an error message to stderr", "= tolerance * 2 # start the loop while diff > tolerance: mid", "0.3 real 0.30 user 0.00 sys 0.00 At this point we have the", "parameters t = opts.internal[0] lb = opts.internal[1] ub = opts.internal[2] intervals = int(opts.internal[3])", "samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2 is smaller than dataset-1", "rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ;", "data is not in a single column in a file, you must explicitly", "used instead of a t-distribution. The default is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count',", "(PDF) at x for a student-t distribution with dof degrees of freedom. This", "on 3 separate lines as shown in this simple example: $ /usr/bin/time -p", "same file. Blank lines and lines where the token is not a floating", "50 108 112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2", "observations (t-test) but the smallest one must have more than 2 entries. Typically", "token on each line and collects it if the token is a floating", "be greater than 0. That is a reasonable constraint given that they typically", "run time data by simply grepping out the data like this: $ grep", "dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence,", "112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2 is smaller", "make up the computation. Specify -v -v to internal details about the z", "{}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v = True if", "You can see the ignored data in verbose mode. If only one file", "is a floating point number. When the data is not in a single", "/ xden ) y = exp / den return y def pdf_snd(x): '''", "'COL2'), help='''The columns that define each dataset. The first column is for the", "data values are created by tools like /usr/bin/time) so they can be used", "x7 = -x3 + x6 # ln(gamma(x)) g = math.exp(x7) return g def", "width is fixed (proportional to how the interval is sliced). The height of", "ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%.", "threshold, the SND is used instead of a t-distribution. The default is %(default)s.", "the std dev calculation simpler. # ================================================================ # # Message utility functions. #", "for determining whether runtime or memory use has changed between two different versions", "tool will automatically determine the associated z-value based on the confidence level and", "the specified percentage. ''' # Binary search to find the closest value. z", "are necessary. The methodology used to calculate the z-value is described in detail", "is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, there", "its output. Here is how you might do it: $ rm -f /tmp/blackbox-v1.out", "is at the cost of performance. ''' assert x2 > x1 # just", "is used instead of a t-distribution. The default is %(default)s. ''') parser.add_argument('-v', '--verbose',", "mean (0 for a standard normal distribution) This is the height of the", "i in range(1, int(x))]) # Lanczos approximation, page 214 of Numerical Recipes in", "datasets. ''') opts = parser.parse_args() if opts.cols[0] < 1: parser.error('column 1 must be", "of software. The datasets are completely independent of the program (i.e. the data", "and collecting its output. Here is how you might do it: $ rm", "-p blackbox-v1 >> /tmp/v1.out ; done $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n'", "of effective degrees of freedom (DOF) is larger than 32. It is really", "parser.error('column 1 must be greater then 0') if opts.cols[1] < 1: parser.error('column 1", "= '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r'''", "= bot + ((top - bot) / 2.0) z = mid - adjustment", "continue ds.append(f) except ValueError: if opts.verbose > 1: info('skipping line {} in {}:", "height of the curve at x. It is exactly the same as pdf_nd(x,", "the curve at x. ''' assert dof > 2 x1 = gamma((float(dof) +", "than dataset-1 by about 0.8%. # Example 4: Dataset-2 is slightly smaller (has", "bc = opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1", "{}'.format(bc)) a = read_file(opts, af, ac) b = read_file(opts, bf, bc) ttest(a, b,", "basically the height of the curve at x. ''' assert dof > 2", "lineno = inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix, lineno,", "this help message and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext", "cat data.txt # v1.1 v1.2 # ======= ======= 1 119.041 117.038 2 119.670", "this threshold, the SND is used instead of a t-distribution. The default is", "# ================================================================ # # Read file data. # # ================================================================ def read_file(opts, fn,", "+= rectangle_area + triangle_area # trapezoid area x += width # advance to", "pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv, minv, intervals,", "2 * (s ** 2) den = s * math.sqrt(2 * math.pi) exp", "# It is to the left. bot = mid else: break # Sanity", "/ 2.0 x5 = x3 ** -x4 y = (x1 * x5) /", "ds-10-100-120.txt With 95.0% confidence, there is no significant difference between the datasets. #", "(dx2 / 2) y = exp / den return y def area_under_curve(x1, x2,", "cp = 1.0 - (2.0 * (1.0 - q)) diff = abs(cp -", "user to play with the parameters t = opts.internal[0] lb = opts.internal[1] ub", "freedom. This is basically the height of the curve at x. ''' assert", "v1.2 # ======= ======= 1 119.041 117.038 2 119.670 119.733 3 120.675 118.346", "data.txt # v1.1 v1.2 # ======= ======= 1 119.041 117.038 2 119.670 119.733", "f=1): ''' Write a warning message to stdout. ''' _msg('WARNING', f+1, msg) def", "to be compared. The numbers must be greater than 0. That is a", "parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such that 0 <", "119.134 118.049 $ {0} --cols 2 3 data.txt With 95.0% confidence, dataset-2 is", "top={}, bot={}, mid={}, z={}, q={}'.format( probability, cp, tolerance, maxtop, minval, iterations, top, bot,", "already reserved for specifying the confidence level. If you reverse the columns, you", "sanity check assert intervals > 1 # another sanity check total_area = 0.0", "faster than the first for the same inputs. The versions are 1.1 and", "# ln(gamma(x)) g = math.exp(x7) return g def pdf_t(x, dof): ''' Calculate the", "0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015 c1 = 2.5066282746310005 x1 = float(x) +", "smaller than dataset-1 by about 1.1%. As you can see, dataset-2 (v1.2) is", "data from the file. ''' ds = [] try: with open(fn, 'r') as", "to a file and then running blackbox-v2 and collecting its output. Here is", "Num v1.1 v1.2 # === ======= ======= 1 119.041 117.038 2 119.670 119.733", "to use to determine whether the datasets differ. Typical confidence levels 0.90 (90%),", "> x1 # just a sanity check assert intervals > 1 # another", "= True if opts.verbose > 1 else False if dofr > opts.snd_threshold: #", "2 119.670 119.733 3 120.675 118.346 4 118.628 117.261 5 120.363 118.863 6", "to play with the parameters t = opts.internal[0] lb = opts.internal[1] ub =", "normal distribution (SND) threshold. When the number of effective degrees of freedom (DOF)", "values are created by tools like /usr/bin/time) so they can be used in", "-c 0.95 -k 3 2 data.txt With 95.0% confidence, dataset-2 is larger than", "are treating the samples as unpaired observations (t-test) but the smallest one must", "files so we can use cmpds.py to figure out whether v2 is faster", "tolerance: mid = bot + ((top - bot) / 2.0) z = mid", "is slightly smaller (has faster runtime) with 99% confidence. # Both runs have", "= mid - adjustment q = area_under_curve(minval, z, iterations, fct, *args) cp =", "= 2.5066282746310005 x1 = float(x) + 5.5 x2 = (float(x) + 0.5) *", "No significant difference with 95% confidence. # The dataset is used. $ ./gends.py", "are in the same file. $ cat data.txt # v1.1 v1.2 # =======", "level such that 0 < c < 1. The default is %(default)s. ''')", "two separate files because, by default, the tool looks at the first token", "1, 0) but is somewhat more efficient. ''' dx2 = float(x) ** 2", "err('too few data points at column {}, found {}, need at least 3", "different files so we can use cmpds.py to figure out whether v2 is", "of the interval. The accumulation of the areas provides an estimate of the", "1: help $ {0} -h # Example 2: No significant difference with 95%", "+ 0.5, 0) minv = -maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower", "2.0 top = maxtop bot = 0.0 diff = tolerance * 2 #", "second dataset is in column 3 of the same file. Blank lines and", "(float((x ** 2)) / float(dof)) x4 = float((dof + 1)) / 2.0 x5", "''' dx2 = float(x) ** 2 den = math.sqrt(2 * math.pi) exp =", "- (2.0 * (1.0 - q)) diff = abs(cp - probability) if v:", "Write an info message to stdout. ''' _msg('INFO', f+1, msg) def infov(opts, msg,", "effective degrees of freedom (DOF) exceeds this threshold, the SND is used instead", "0') if opts.cols[1] < 1: parser.error('column 1 must be greater then 0') if", "the null hypothesis: {}'.format(significant)) # Report the result. clp = cl * 100.", "changed between two different versions of software. The datasets are completely independent of", "confidence, dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}%", "117.038 2 119.670 119.733 3 120.675 118.346 4 118.628 117.261 5 120.363 118.863", "''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the level of verbosity. Specify -v to", "the SND is used instead of a t-distribution. The default is %(default)s. ''')", "119.041 117.038 2 119.670 119.733 3 120.675 118.346 4 118.628 117.261 5 120.363", "# Read file data. # # ================================================================ def read_file(opts, fn, col): ''' Read", "as unpaired observations (t-test) but the smallest one must have more than 2", "Sanity checks. assert top <= maxtop assert bot >= 0 return z #", "# Binary search to find the closest value. z = 0.0 adjustment =", "''' cl = opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b),", "be greater then 0') if opts.cols[1] < 1: parser.error('column 1 must be greater", "trapezoids whose width is fixed (proportional to how the interval is sliced). The", "and v1.2. # # Num v1.1 v1.2 # === ======= ======= 1 119.041", "# Statistical utility functions. # See https://github.com/jlinoff/ztables for background. # # ================================================================ def", "ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there is no significant difference", "# mean difference md = ma - mb infov(opts, 'mean diff: {:.3f}'.format(md)) #", "**kwargs): ''' Calculate the approximate area under a curve using trapezoidal approximation. It", "spaces. You can see the ignored data in verbose mode. If only one", "pdf_snd(x): ''' Calculate the probability density function (PDF) for a standard normal distribution.", "lookup = { 'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL", "based on the confidence level and the number of effective degrees of freedom.", "as ifp: ln = 0 for line in ifp.readlines(): ln += 1 line", "dataset. You must specify the confidence level that you want to use to", "that v2 is indeed slightly faster. ''' # License: MIT Open Source #", "in two separate files because, by default, the tool looks at the first", "xden = 2 * (s ** 2) den = s * math.sqrt(2 *", "When the data is not in a single column in a file, you", "'variance b: {:.3f}'.format(varb)) # standard deviations stddeva = math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts,", "need at least 3 in file: {}'.format(col, len(ds), fn)) return ds # ================================================================", "you want to use to determine whether the datasets differ. Typical confidence levels", "samples as unpaired observations (t-test) but the smallest one must have more than", "table below shows sample data 10 runs for each version. # Run time", "of the datasets can be different because we are treating the samples as", "- ma) ** 2 for xa in a]) / float(na - 1.) varb", "= mean (0 for a standard normal distribution) This is the height of", "is an example to make sense of it all. We want to compare", "''' Write a warning message to stdout. ''' _msg('WARNING', f+1, msg) def err(msg,", "* (1.0 - q)) diff = abs(cp - probability) if v: info('p={}, cp={},", "{:.3f}'.format(md)) # standard deviation of the mean difference sa2qna = stddeva**2 / na", "height of each rectangle is the pdf function value for x at the", "is the height of the curve at x. It is exactly the same", "density function (PDF) for a standard normal distribution. s = standard deviation (1", "infov(opts, 'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t,", "we want to determine whether v1.2 is faster. The table below shows sample", "{}'.format(maxv)) v = True if opts.verbose > 1 else False if dofr >", "a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean difference md = ma -", "dx2 = float(x) ** 2 den = math.sqrt(2 * math.pi) exp = math.e", "for the mean difference z = 0.0 # allow the user to play", "a)) infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a))", "= y # remember the previous height return total_area def binary_search_for_z(probability, tolerance, maxtop,", "program (i.e. the data values are created by tools like /usr/bin/time) so they", "'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb = float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts,", "'nb: {}'.format(nb)) # means ma = sum(a) / na mb = sum(b) /", "freedom. No table look ups are necessary. The methodology used to calculate the", "With 95.0% confidence, there is no significant difference between the datasets. # Example", "# The dataset is used. $ ./gends.py 10 100 120 > ds-10-100-120.txt $", "collecting the timing output to a file and then running blackbox-v2 and collecting", "95.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%. # Example 4:", "is in the proper range. def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser, args,", "than 32. It is really useful for determining whether runtime or memory use", "'argument \"{}\" out of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick to capitalize", "area of rectangle at x with height y triangle_area = ((y - py)", "x4 = c0 x5 = float(x) for i in range(6): x5 += 1.0", "to stderr and exit. ''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) # ================================================================ #", "$ ./cmpds.py -c 0.95 -k 3 2 data.txt With 95.0% confidence, dataset-2 is", "No table look ups are necessary. EXAMPLE 1 - two datasets in one", "a, b: a * b, [float(i) for i in range(1, int(x))]) # Lanczos", "def __call__(self, parser, args, values, option_string=None): if 0. < values < 1.0: setattr(args,", "'mean b: {:.3f}'.format(mb)) # variances vara = sum([(xa - ma) ** 2 for", "x3 = 1.0 + (float((x ** 2)) / float(dof)) x4 = float((dof +", "Uses the Lanczos approximation and natural logarithms. For integer values of x we", "significant: per = 100. * abs(md) / ma infov(opts, 'percentage: {}'.format(per)) if club", "This is the height of the curve at x. ''' dx = float(x)", "z * sdmd infov(opts, '{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb,", "standard normal distribution. s = standard deviation (1 for a standard normal distribution)", "area_under_curve(x1, x2, intervals, fct, *args, **kwargs): ''' Calculate the approximate area under a", "runs for each version. # Run time data collected for v1.1 and v1.2.", "- int(x)) == 0: # Optimization for integer values: (x-1)!. return reduce(lambda a,", "than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, dataset-2 is smaller", "second column is for the second dataset. If the value in the column", "sample data 10 runs for each version. # Run time data collected for", "freedom (DOF) exceeds this threshold, the SND is used instead of a t-distribution.", "infov(opts, 'percentage: {}'.format(per)) if club < 0: print('With {:.1f}% confidence, dataset-2 is larger", "to capitalize the built-in headers. # Unfortunately I can't get rid of the", "normal distribution) u = mean (0 for a standard normal distribution) This is", "difference between the datasets.'.format(clp)) # ================================================================ # # Options # # ================================================================ def", "to determine whether there is a significant difference between them for a specific", "use has changed between two different versions of software. The datasets are completely", "York. import argparse import datetime import inspect import math import os import sys", "float(dof)) x4 = float((dof + 1)) / 2.0 x5 = x3 ** -x4", "as pdf_nd(x, 1, 0) but is somewhat more efficient. ''' dx2 = float(x)", "na mb = sum(b) / nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b:", "= inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix, lineno, msg))", "the \":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'positional arguments':", "data is specifically generated to show the difference. $ ./gends.py 50 110 112", "intervals = int(opts.internal[3]) maxv = 2 * round(abs(lb) + ub + 0.5, 0)", "confidence, dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}%", "read_file(opts, fn, col): ''' Read column data from the file. ''' ds =", "if f < 0.0001: # avoid divide by 0 errors if opts.verbose >", "whether the datasets differ. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99", "line {} in {}: number is too small {}'.format(ln, fn, token)) continue ds.append(f)", "(na + 1.)) * sa2qna**2 dof_denb = (1. / (nb + 1.)) *", "+ 5.5 x2 = (float(x) + 0.5) * math.log(x1) x3 = x1 -", "''' _msg('INFO', f+1, msg) def infov(opts, msg, f=1): ''' Write an info message", "= cllb < 0 < club significant = not crosses_zero infov(opts, 'crosses zero:", "'percentage: {}'.format(per)) if club < 0: print('With {:.1f}% confidence, dataset-2 is larger than", "for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out", "ifp: ln = 0 for line in ifp.readlines(): ln += 1 line =", "lineno, msg)) def info(msg, f=1): ''' Write an info message to stdout. '''", "of freedom (DOF) is larger than 32. It is really useful for determining", "based on height change total_area += rectangle_area + triangle_area # trapezoid area x", "checks. assert top <= maxtop assert bot >= 0 return z # ================================================================", "dataset. If the value in the column is not a floating point number", "version. # Run time data collected for v1.1 and v1.2. # # Num", "generated to show the difference. $ ./gends.py 50 110 112 > ds-50-110-112.txt $", "{:.3f}'.format(varb)) # standard deviations stddeva = math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev a:", "token = tokens[col-1] try: f = float(token) if f < 0.0001: # avoid", "95% confidence. # Both runs have 50 samples. # The data is specifically", "search to find the closest value. z = 0.0 adjustment = float(maxtop) /", "ds-50-110-112.txt $ ./gends.py 50 108 112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With", "to use SND for {} elements'.format(opts.snd_threshold)) return opts # ================================================================ # # Read", "two versions of the foobar program to see if the second version is", "datasets in separate files A more realistic example would be running a program", "mb) ** 2 for xb in b]) / float(nb - 1.) infov(opts, 'variance", "intervals, v, pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv,", "size of the datasets can be different because we are treating the samples", "were discarded during file reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's", "faster. ''' # License: MIT Open Source # Copyright (c) 2016 by <NAME>", "specify the which column to collect. In this case, the first dataset is", "** - (dx2 / 2) y = exp / den return y def", "{}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb = float(len(b)) infov(opts,", "# ================================================================ def ttest(a, b, opts): ''' Analyze unpaired observations to determine whether", "dof degrees of freedom. This is basically the height of the curve at", "c[i] / x5 x6 = math.log((c1 * x4) / float(x)) x7 = -x3", "is what the run looks like: $ ./cmpds.py -c 0.95 -k 2 3", "the curve at x. It is exactly the same as pdf_nd(x, 1, 0)", "in a single column in a file, you must explicitly specify the which", "{:.2f}'.format(q, dofr, z)) cllb = md - z * sdmd club = md", "Numerical Recipes in C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ]", "both datasets. ''') opts = parser.parse_args() if opts.cols[0] < 1: parser.error('column 1 must", "significantly different. ''' cl = opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b:", "to stdout. ''' _msg('WARNING', f+1, msg) def err(msg, f=1): ''' Write an error", "(x-1)!. return reduce(lambda a, b: a * b, [float(i) for i in range(1,", "club)) crosses_zero = cllb < 0 < club significant = not crosses_zero infov(opts,", "the second version is faster than the first for the same inputs. The", "of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick to capitalize the built-in headers.", "y # remember the previous height return total_area def binary_search_for_z(probability, tolerance, maxtop, minval,", "faster. The table below shows sample data 10 runs for each version. #", "the value in the column is not a floating point number it is", "1.) varb = sum([(xb - mb) ** 2 for xb in b]) /", "-3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for internal computations. You", "of freedom (DOF) exceeds this threshold, the SND is used instead of a", "number and exit. \"\"\") # Positional arguments at the end. parser.add_argument('FILES', nargs='+', help='''The", "shown in this simple example: $ /usr/bin/time -p sleep 0.3 real 0.30 user", "Calculate the probability density function (PDF) at x for a student-t distribution with", "gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) = 6.0 ''' if (x -", "the confidence level that you want to use to determine whether the datasets", "a reasonable constraint given that they typically represent something like elapsed time or", "with dof degrees of freedom. This is basically the height of the curve", "sb2qnb) infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd)) # effective degrees of freedom", "than dataset-1 by about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2],", "the standard normal distribution (SND) when the number of effective degrees of freedom", "it is ignored. The default is column 1 for both datasets. ''') parser.add_argument('-s',", "is basically the height of the curve at x. ''' assert dof >", "the timing output to a file and then running blackbox-v2 and collecting its", "msg, sys.stderr) sys.exit(1) # ================================================================ # # Statistical utility functions. # See https://github.com/jlinoff/ztables", "the pdf function value for x at the start of the interval. The", "level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb = float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb:", "for a student-t distribution with dof degrees of freedom. This is basically the", "The greater the number of intervals the better the estimate is at the", "degrees of freedom (DOF) is larger than 32. It is really useful for", "y def area_under_curve(x1, x2, intervals, fct, *args, **kwargs): ''' Calculate the approximate area", "the interval. The accumulation of the areas provides an estimate of the area", "a = read_file(opts, af, ac) b = read_file(opts, bf, bc) ttest(a, b, opts)", "in ifp.readlines(): ln += 1 line = line.strip() tokens = line.split() if len(tokens)", "adjustment based on height change total_area += rectangle_area + triangle_area # trapezoid area", "done We can now capture the real run time data by simply grepping", "data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%. '''.format(base)", "specifying the confidence level. If you reverse the columns, you will get the", "x at the start of the interval. The accumulation of the areas provides", "[] try: with open(fn, 'r') as ifp: ln = 0 for line in", "of effective degrees of freedom. No table look ups are necessary. EXAMPLE 1", "normal distribution. s = standard deviation (1 for a standard normal distribution) u", "smallest one must have more than 2 entries. Typically you would like to", "/ float(x)) x7 = -x3 + x6 # ln(gamma(x)) g = math.exp(x7) return", "gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) = 6.0 '''", "action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such that 0 < c < 1. The", "by default, the tool looks at the first token on each line and", "to have it exist in two separate files because, by default, the tool", "len(opts.FILES) > 2: parser.error('only 1 or 2 files may be specified') if opts.snd_threshold", "https://github.com/jlinoff/ztables. To determine significance, you specify the confidence level that you want to", "determine whether v1.2 is faster. The table below shows sample data 10 runs", "must be organized in columns with one entry per line. Non-numeric data is", "0 errors if opts.verbose > 1: info('skipping line {} in {}: number is", "{:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, there is no significant difference between the", "; done We can now capture the real run time data by simply", "The second column is for the second dataset. If the value in the", "cllb = md - z * sdmd club = md + z *", "options using argparse. ''' # Make sure that the confidence level is in", "infov(opts, 'internal maxval: {}'.format(maxv)) v = True if opts.verbose > 1 else False", "file. ''' ds = [] try: with open(fn, 'r') as ifp: ln =", "the height of the curve at x. ''' assert dof > 2 x1", "return reduce(lambda a, b: a * b, [float(i) for i in range(1, int(x))])", "** 2 den = math.sqrt(2 * math.pi) exp = math.e ** - (dx2", "rectangle_area = width * y # area of rectangle at x with height", "./gends.py 10 100 120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence,", "help message and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext #", "{}: not a number: {}'.format(ln, fn, token)) continue except IOError: err('could not read", "have 50 samples. # The data is specifically generated to show the difference.", "Write a warning message to stdout. ''' _msg('WARNING', f+1, msg) def err(msg, f=1):", "continue except IOError: err('could not read file: {}'.format(fn)) if len(ds) < 3: err('too", "md + z * sdmd infov(opts, '{:.1f}% confidence interval for difference: [{:3f} ..", "gamma((float(dof) / 2.0)) x3 = 1.0 + (float((x ** 2)) / float(dof)) x4", "is to the left. bot = mid else: break # Sanity checks. assert", "Open Source # Copyright (c) 2016 by <NAME> #REFERENCES: # <NAME> (1991). \"The", "of numbers to be compared. The numbers must be greater than 0. That", "output to a file and then running blackbox-v2 and collecting its output. Here", "versions of software. The datasets are completely independent of the program (i.e. the", "metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for internal computations. You should never need", "metavar=('UINT'), help='''The standard normal distribution (SND) threshold. When the number of effective degrees", "determining whether runtime or memory use has changed between two different versions of", "separate files A more realistic example would be running a program called blackbox-v1", "float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means ma = sum(a) /", "= (float(x2) - float(x1)) / float(intervals) x = float(x1) py = float(fct(x, *args,", "**kwargs)) for i in range(intervals): y = float(fct(x, *args, **kwargs)) rectangle_area = width", "is faster. The table below shows sample data 10 runs for each version.", "%03d\\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done $ for((i=1;i<=50;i++)) ;", "118.346 4 118.628 117.261 5 120.363 118.863 6 118.076 117.545 7 120.539 119.751", "printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done $", "py) * width) / 2.0 # adjustment based on height change total_area +=", "for the second dataset. If the value in the column is not a", "'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof)", "$ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v1 >>", "The dataset is used. $ ./gends.py 10 100 120 > ds-10-100-120.txt $ {0}", "'internal upper bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts,", "infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) # standard deviations stddeva =", "number of effective degrees of freedom (DOF) exceeds this threshold, the SND is", "''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) # ================================================================ # # Statistical utility functions.", "bot, mid, z, q)) if probability < cp: # It is to the", "= c0 x5 = float(x) for i in range(6): x5 += 1.0 x4", "must be greater than 0. That is a reasonable constraint given that they", "to the next edge py = y # remember the previous height return", "'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) # variances vara = sum([(xa -", "{0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2 is smaller than dataset-1 by about", "the proper range. def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser, args, values, option_string=None):", "figure out whether v2 is faster than v1 at a 95% confidence level.", "Run time data collected for v1.1 and v1.2. # # Num v1.1 v1.2", "{0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there is no significant difference between the", "if opts.verbose > 1: info('skipping line {} in {}: not a number: {}'.format(ln,", "line.split() if len(tokens) < col: continue token = tokens[col-1] try: f = float(token)", "len(ds), fn)) return ds # ================================================================ # # Main # # ================================================================ def", "/tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p", "; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ;", "= maxtop bot = 0.0 diff = tolerance * 2 # start the", "triangle_area # trapezoid area x += width # advance to the next edge", "round(abs(lb) + ub + 0.5, 0) minv = -maxv infov(opts, 'internal threshold: {:.1f}'.format(t))", "name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n", "more realistic example would be running a program called blackbox-v1 50 times and", "sum([(xb - mb) ** 2 for xb in b]) / float(nb - 1.)", "this case, the first dataset is in column 2 and the second dataset", "default=32, metavar=('UINT'), help='''The standard normal distribution (SND) threshold. When the number of effective", "{} in {}: not a number: {}'.format(ln, fn, token)) continue except IOError: err('could", "Here is what the run looks like: $ ./cmpds.py -c 0.95 -k 2", "EXAMPLE 2 - datasets in separate files A more realistic example would be", "memory used. The size of the datasets can be different because we are", "this simple example: $ /usr/bin/time -p sleep 0.3 real 0.30 user 0.00 sys", "'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis: {}'.format(significant)) # Report the result.", "the run time data. The data must be organized in columns with one", "/tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v1", "break # Sanity checks. assert top <= maxtop assert bot >= 0 return", "column {}, found {}, need at least 3 in file: {}'.format(col, len(ds), fn))", "divide by 0 errors if opts.verbose > 1: info('skipping line {} in {}:", "file: {}'.format(col, len(ds), fn)) return ds # ================================================================ # # Main # #", "at the cost of performance. ''' assert x2 > x1 # just a", "by about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog)", "tokens = line.split() if len(tokens) < col: continue token = tokens[col-1] try: f", "confidence. # The dataset is used. $ ./gends.py 10 100 120 > ds-10-100-120.txt", "The size of the datasets can be different because we are treating the", "collect. In this case, the first dataset is in column 2 and the", "is column 1 for both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The", "Calculate the probability density function (PDF) for a standard normal distribution. s =", "least 50 entries in each dataset. You must specify the confidence level that", "that they typically represent something like elapsed time or memory used. The size", "Calculate the approximate area under a curve using trapezoidal approximation. It breaks the", "== 0: # Optimization for integer values: (x-1)!. return reduce(lambda a, b: a", "3 2 data.txt With 95.0% confidence, dataset-2 is larger than dataset-1 by about", "b: {:.3f}'.format(mb)) # variances vara = sum([(xa - ma) ** 2 for xa", "'UPPER', 'INTERVALS'), help='''Factors used for internal computations. You should never need to change", "like: $ ./cmpds.py -c 0.95 -k 2 3 data.txt With 95.0% confidence, dataset-2", "a warning message to stdout. ''' _msg('WARNING', f+1, msg) def err(msg, f=1): '''", "# Initial load. VERSION='0.2' # Made the std dev calculation simpler. # ================================================================", "(95%) and 0.99 (99%). The tool will automatically determine the associated z-value based", "================================================================ # # t-test implementation # # ================================================================ def ttest(a, b, opts): '''", "2) y = exp / den return y def area_under_curve(x1, x2, intervals, fct,", "DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr)) # confidence interval", "= binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr))", "it exist in two separate files because, by default, the tool looks at", "get rid of the \":\" reliably. def gettext(s): lookup = { 'usage: ':", "# Copyright (c) 2016 by <NAME> #REFERENCES: # <NAME> (1991). \"The Art Computer", "not a floating point number it is ignored. The default is column 1", "v1.2 is faster. The table below shows sample data 10 runs for each", "the confidence level and the number of effective degrees of freedom. No table", "use to determine whether the datasets differ. Typical confidence levels 0.90 (90%), 0.95", "is slightly smaller (has faster runtime) with 95% confidence. # Both runs have", "done $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v2", "dataset-1 by about 0.8%. # Example 5: Dataset-1 and dataset-2 are in the", "interval is sliced). The height of each rectangle is the pdf function value", "''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that define each", "that the data is stored in a single file but normally it is", "to collect. In this case, the first dataset is in column 2 and", "================================================================ def ttest(a, b, opts): ''' Analyze unpaired observations to determine whether they", "in two different files so we can use cmpds.py to figure out whether", "datasets. # Example 3: Dataset-2 is slightly smaller (has faster runtime) with 95%", "whether there is a significant difference between them for a specific confidence level", "tools like /usr/bin/time) so they can be used in a black box testing", "'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and", "* width) / 2.0 # adjustment based on height change total_area += rectangle_area", "confidence. # Both runs have 50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0%", "x2, intervals, fct, *args, **kwargs): ''' Calculate the approximate area under a curve", "# advance to the next edge py = y # remember the previous", "<DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1:", "default is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER',", "under the curve. The greater the number of intervals the better the estimate", "of freedom dof_num = (sa2qna + sb2qnb)**2 dof_dena = (1. / (na +", "used to calculate the z-value is described in detail here: https://github.com/jlinoff/ztables. To determine", "like to have at least 50 entries in each dataset. You must specify", "* gamma((float(dof) / 2.0)) x3 = 1.0 + (float((x ** 2)) / float(dof))", "{:.1f}%'.format(100.*cl)) na = float(len(a)) nb = float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb))", "two different versions of software. The datasets are completely independent of the program", "c0 x5 = float(x) for i in range(6): x5 += 1.0 x4 +=", "/ ma infov(opts, 'percentage: {}'.format(per)) if club < 0: print('With {:.1f}% confidence, dataset-2", "top = mid elif probability > cp: # It is to the left.", "The first column is for the first dataset. The second column is for", "[76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015 c1 = 2.5066282746310005", "data is ignored which allows you to add comments and blank spaces. You", "option_string=None): if 0. < values < 1.0: setattr(args, self.dest, values) else: msg =", "for both datasets. ''') opts = parser.parse_args() if opts.cols[0] < 1: parser.error('column 1", "data collected for v1.1 and v1.2. # # Num v1.1 v1.2 # ===", "= math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0)) x3 = 1.0 + (float((x", "ignored which allows you to add comments and blank spaces. You can see", "# variances vara = sum([(xa - ma) ** 2 for xa in a])", "1.0 x4 += c[i] / x5 x6 = math.log((c1 * x4) / float(x))", "except ValueError: if opts.verbose > 1: info('skipping line {} in {}: not a", "Note that we use -k to specify the columns because -c is already", "Main # # ================================================================ def main(): opts = getopts() af = opts.FILES[0] bf", "= (1. / (na + 1.)) * sa2qna**2 dof_denb = (1. / (nb", "advance to the next edge py = y # remember the previous height", "the ignored data in verbose mode. If only one file is specified, is", "1 must be greater then 0') if opts.cols[1] < 1: parser.error('column 1 must", "the same inputs. The versions are 1.1 and 1.2. The program takes about", "for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb < 0 <", "of freedom. No table look ups are necessary. EXAMPLE 1 - two datasets", "file Here is an example to make sense of it all. We want", "the second dataset is in column 3 of the same file. Blank lines", "organized in columns with one entry per line. Non-numeric data is ignored which", "1: parser.error('column 1 must be greater then 0') if len(opts.FILES) > 2: parser.error('only", "Dataset-2 is slightly smaller (has faster runtime) with 99% confidence. # Both runs", "to use to determine significance. Typical confidence levels 0.90 (90%), 0.95 (95%) and", "x5) / x2 return y def pdf_nd(x, s=1.0, u=0.0): ''' Calculate the probability", "0. That is a reasonable constraint given that they typically represent something like", "================================================================ # # Options # # ================================================================ def getopts(): ''' Get the command", "about 1.1%. EXAMPLE 2 - datasets in separate files A more realistic example", "/tmp/v1.out > /tmp/v1.ds $ grep -w ^real /tmp/v2.out > /tmp/v2.ds The above command", "independent of the program (i.e. the data values are created by tools like", "std dev calculation simpler. # ================================================================ # # Message utility functions. # #", "(1991). \"The Art Computer Systems Performance Analysis\", <NAME>iley and Sons, New York. import", "9 120.164 116.203 10 119.134 118.049 $ {0} --cols 2 3 data.txt With", "which allows you to add comments and blank spaces. You can see the", "program to see if the second version is faster than the first for", "dataset-1 by about 1.1%. As you can see, dataset-2 (v1.2) is slightly faster.", "difference md = ma - mb infov(opts, 'mean diff: {:.3f}'.format(md)) # standard deviation", "a file, you must explicitly specify the which column to collect. In this", "than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, there is no", "minval, iterations, top, bot, mid, z, q)) if probability < cp: # It", "xa in a]) / float(na - 1.) varb = sum([(xb - mb) **", "t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof) x", "======= ======= 1 119.041 117.038 2 119.670 119.733 3 120.675 118.346 4 118.628", "24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015 c1 = 2.5066282746310005 x1 =", "dx2 = dx * dx xden = 2 * (s ** 2) den", "= sum(a) / na mb = sum(b) / nb infov(opts, 'mean a: {:.3f}'.format(ma))", "distribution (SND) infov(opts, 'use standard normal distribution (SND)') z = binary_search_for_z(cl, t, maxv,", "at x for a student-t distribution with dof degrees of freedom. This is", "2 files may be specified') if opts.snd_threshold < 30: parser.error('it does not make", "= opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf))", "smaller than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, there is", "capitalize the built-in headers. # Unfortunately I can't get rid of the \":\"", "to make sense of it all. We want to compare two versions of", "argparse import datetime import inspect import math import os import sys #VERSION='0.1' #", "than dataset-1 by about 1.1%. EXAMPLE 2 - datasets in separate files A", "opts.verbose > 0: _msg('INFO', f+1, msg) def warn(msg, f=1): ''' Write a warning", "'\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done We can", "0.00 At this point we have the unpaired observations from both runs in", "= math.e ** ( -dx2 / xden ) y = exp / den", "to add comments and blank spaces. You can see the ignored data in", "avoid divide by 0 errors if opts.verbose > 1: info('skipping line {} in", "like elapsed time or memory used. The size of the datasets can be", "--cols 2 3 data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by", "float(x1)) / float(intervals) x = float(x1) py = float(fct(x, *args, **kwargs)) for i", "# Positional arguments at the end. parser.add_argument('FILES', nargs='+', help='''The files with the run", "2 3 data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by about", "sys.stderr) sys.exit(1) # ================================================================ # # Statistical utility functions. # See https://github.com/jlinoff/ztables for", "Dataset-2 is slightly smaller (has faster runtime) with 95% confidence. # Both runs", "gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage", "nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for internal", "opts.internal[0] lb = opts.internal[1] ub = opts.internal[2] intervals = int(opts.internal[3]) maxv = 2", "blackbox-v2 >> /tmp/v2.out ; done We can now capture the real run time", "the column is not a floating point number it is ignored. The default", "= 0.0 # allow the user to play with the parameters t =", "i in range(intervals): y = float(fct(x, *args, **kwargs)) rectangle_area = width * y", "<NAME> #REFERENCES: # <NAME> (1991). \"The Art Computer Systems Performance Analysis\", <NAME>iley and", "over to the standard normal distribution (SND) when the number of effective degrees", "a significant difference between them for a specific confidence level using the t-test", "Here is an example to make sense of it all. We want to", "is really useful for determining whether runtime or memory use has changed between", "1: parser.error('column 1 must be greater then 0') if opts.cols[1] < 1: parser.error('column", "with the run time data. The data must be organized in columns with", "smaller (has faster runtime) with 95% confidence. # Both runs have 50 samples.", "95.0% confidence, dataset-2 is larger than dataset-1 by about 1.1%. EXAMPLE 2 -", "def info(msg, f=1): ''' Write an info message to stdout. ''' _msg('INFO', f+1,", "time data collected for v1.1 and v1.2. # # Num v1.1 v1.2 #", "samples. # The data is specifically generated to show the difference. $ ./gends.py", "help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS]", "be running a program called blackbox-v1 50 times and collecting the timing output", "2 x1 = gamma((float(dof) + 1.0) / 2.0) x2 = math.sqrt(dof * math.pi)", "utility functions. # # ================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout): ''' Base for", "# # Read file data. # # ================================================================ def read_file(opts, fn, col): '''", "exit.\\n ', } return lookup.get(s, s) argparse._ = gettext # to capitalize help", "cllb < 0 < club significant = not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero))", "= line.split() if len(tokens) < col: continue token = tokens[col-1] try: f =", "the datasets can be different because we are treating the samples as unpaired", "================================================================ # # Message utility functions. # # ================================================================ def _msg(prefix, frame, msg,", "# Trick to capitalize the built-in headers. # Unfortunately I can't get rid", "/tmp/v2.out ; done We can now capture the real run time data by", "intervals, fct, *args, **kwargs): ''' Calculate the approximate area under a curve using", "show the difference. $ ./gends.py 50 110 112 > ds-50-110-112.txt $ ./gends.py 50", "sum(b) / nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) # variances", "standard normal distribution (SND) when the number of effective degrees of freedom (DOF)", "the number of effective degrees of freedom. No table look ups are necessary.", "will get the opposite result: $ ./cmpds.py -c 0.95 -k 3 2 data.txt", "For this example we assume that the data is stored in a single", "determine whether there is a significant difference between them for a specific confidence", "2.0 # adjustment based on height change total_area += rectangle_area + triangle_area #", "setattr(args, self.dest, values) else: msg = 'argument \"{}\" out of range (0..1)'.format(self.dest) parser.error(msg)", "af, ac) b = read_file(opts, bf, bc) ttest(a, b, opts) if __name__ ==", "used. $ ./gends.py 10 100 120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With", "the probability density function (PDF) for a normal distribution. s = standard deviation", "2 # start the loop while diff > tolerance: mid = bot +", "the result. clp = cl * 100. if significant: per = 100. *", "you would like to have at least 50 entries in each dataset. You", "You must specify the confidence level that you want to use to determine", "do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done", "line. Non-numeric data is ignored which allows you to add comments and blank", "x5 x6 = math.log((c1 * x4) / float(x)) x7 = -x3 + x6", "want to use to determine whether the datasets differ. Typical confidence levels 0.90", "fct, *args): ''' Get the z value that matches the specified percentage. '''", "is already reserved for specifying the confidence level. If you reverse the columns,", "sleep 0.3 real 0.30 user 0.00 sys 0.00 At this point we have", "both runs in two different files so we can use cmpds.py to figure", "threshold. When the number of effective degrees of freedom (DOF) exceeds this threshold,", "greater than 0. That is a reasonable constraint given that they typically represent", "ttest(a, b, opts): ''' Analyze unpaired observations to determine whether they are significantly", "info(msg, f=1): ''' Write an info message to stdout. ''' _msg('INFO', f+1, msg)", "for both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard normal distribution", "(nb + 1.)) * sb2qnb**2 dof = (dof_num / (dof_dena + dof_denb)) -", "= float(x) for i in range(6): x5 += 1.0 x4 += c[i] /", "for unpaired observations. Please note that this is not, strictly, a t-test because", "dof_denb)) - 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF", "parser.add_argument('FILES', nargs='+', help='''The files with the run time data. The data must be", "def _msg(prefix, frame, msg, ofp=sys.stdout): ''' Base for printing messages. ''' lineno =", "you can see, dataset-2 (v1.2) is slightly faster. Note that we use -k", "(2.0 * (1.0 - q)) diff = abs(cp - probability) if v: info('p={},", "# ================================================================ # # Main # # ================================================================ def main(): opts = getopts()", "infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean difference md = ma - mb infov(opts,", "* abs(md) / ma infov(opts, 'percentage: {}'.format(per)) if club < 0: print('With {:.1f}%", "dataset-2 is smaller than dataset-1 by about 1.1%. As you can see, dataset-2", "not a number: {}'.format(ln, fn, token)) continue except IOError: err('could not read file:", "than dataset-1 by about 0.8%. # Example 5: Dataset-1 and dataset-2 are in", "that this is not, strictly, a t-test because it switches over to the", "then 0') if opts.cols[1] < 1: parser.error('column 1 must be greater then 0')", "(float(x2) - float(x1)) / float(intervals) x = float(x1) py = float(fct(x, *args, **kwargs))", "- adjustment q = area_under_curve(minval, z, iterations, fct, *args) cp = 1.0 -", "10 runs for each version. # Run time data collected for v1.1 and", "v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments at the", "above command takes advantage of the fact that posix time format (-p) outputs", "Positional arguments at the end. parser.add_argument('FILES', nargs='+', help='''The files with the run time", "columns because -c is already reserved for specifying the confidence level. If you", "just a sanity check assert intervals > 1 # another sanity check total_area", "make sense to use SND for {} elements'.format(opts.snd_threshold)) return opts # ================================================================ #", "argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'),", "> 1: info('skipping line {} in {}: number is too small {}'.format(ln, fn,", "math.pi) exp = math.e ** ( -dx2 / xden ) y = exp", "7 120.539 119.751 8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 For", "files because, by default, the tool looks at the first token on each", "help='''The columns that define each dataset. The first column is for the first", "''' Calculate the probability density function (PDF) for a standard normal distribution. s", "is specified, is used for both datasets. ''') opts = parser.parse_args() if opts.cols[0]", "top <= maxtop assert bot >= 0 return z # ================================================================ # #", "ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2 is smaller than dataset-1", "fact that posix time format (-p) outputs the time data on 3 separate", "# ================================================================ # # Message utility functions. # # ================================================================ def _msg(prefix, frame,", "= opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES) == 2 else af ac =", "width = (float(x2) - float(x1)) / float(intervals) x = float(x1) py = float(fct(x,", "if opts.cols[1] < 1: parser.error('column 1 must be greater then 0') if len(opts.FILES)", "The height of each rectangle is the pdf function value for x at", "t, maxv, minv, intervals, v, pdf_t, dof) x = (1. - cl) /", "2 for xb in b]) / float(nb - 1.) infov(opts, 'variance a: {:.3f}'.format(vara))", "exist in two separate files because, by default, the tool looks at the", "r''' Compare two datasets to determine whether there is a significant difference between", "50 110 112 > ds-50-110-112.txt $ ./gends.py 50 108 112 > ds-50-108-112.txt $", "Performance Analysis\", <NAME>iley and Sons, New York. import argparse import datetime import inspect", "import math import os import sys #VERSION='0.1' # Initial load. VERSION='0.2' # Made", "= math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd)) # effective", "dofr > opts.snd_threshold: # use standard normal distribution (SND) infov(opts, 'use standard normal", "is faster than v1 at a 95% confidence level. $ ./cmpds.py -c 0.95", "of the program (i.e. the data values are created by tools like /usr/bin/time)", "data by simply grepping out the data like this: $ grep -w ^real", "page 214 of Numerical Recipes in C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155,", "ofp=sys.stdout): ''' Base for printing messages. ''' lineno = inspect.stack()[frame][2] now = datetime.datetime.now()", "= ((y - py) * width) / 2.0 # adjustment based on height", "infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd)) # effective degrees of freedom dof_num", "dataset is in column 2 and the second dataset is in column 3", "b: {:.3f}'.format(varb)) # standard deviations stddeva = math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev", "s * math.sqrt(2 * math.pi) exp = math.e ** ( -dx2 / xden", "z = 0.0 # allow the user to play with the parameters t", "getopts() af = opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES) == 2 else af", "mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format( probability, cp, tolerance, maxtop, minval,", "= -x3 + x6 # ln(gamma(x)) g = math.exp(x7) return g def pdf_t(x,", "define each dataset. The first column is for the first dataset. The second", "the curve. The greater the number of intervals the better the estimate is", "else af ac = opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts,", "no significant difference between the datasets.'.format(clp)) # ================================================================ # # Options # #", "# Example 4: Dataset-2 is slightly smaller (has faster runtime) with 99% confidence.", "opts.verbose > 1: info('skipping line {} in {}: not a number: {}'.format(ln, fn,", "Get the z value that matches the specified percentage. ''' # Binary search", "3: err('too few data points at column {}, found {}, need at least", "floating point number are ignored. Here is what the run looks like: $", "the data values are created by tools like /usr/bin/time) so they can be", "= parser.parse_args() if opts.cols[0] < 1: parser.error('column 1 must be greater then 0')", "of the mean diff: {:.3f}'.format(sdmd)) # effective degrees of freedom dof_num = (sa2qna", "the height of the curve at x. It is exactly the same as", "confidence level using the t-test methodology for unpaired observations. Please note that this", "# standard deviations stddeva = math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva))", "{0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by about", "0.0 diff = tolerance * 2 # start the loop while diff >", "is indeed slightly faster. ''' # License: MIT Open Source # Copyright (c)", "Base for printing messages. ''' lineno = inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {}", "2 else af ac = opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af))", "file: {}'.format(fn)) if len(ds) < 3: err('too few data points at column {},", "is too small {}'.format(ln, fn, token)) continue ds.append(f) except ValueError: if opts.verbose >", "can use the exact value of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453", "point number. When the data is not in a single column in a", "looks at the first token on each line and collects it if the", "nb = float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means ma =", "True if opts.verbose > 1 else False if dofr > opts.snd_threshold: # use", "119.134 118.049 For this example we assume that the data is stored in", "use SND for {} elements'.format(opts.snd_threshold)) return opts # ================================================================ # # Read file", "# # Main # # ================================================================ def main(): opts = getopts() af =", "bound: {}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal", "(1. - cl) / 2. q = cl + x infov(opts, '{:.3f}-quantile of", "if len(ds) < 3: err('too few data points at column {}, found {},", "the foobar program to see if the second version is faster than the", "3 in file: {}'.format(col, len(ds), fn)) return ds # ================================================================ # # Main", "b, opts): ''' Analyze unpaired observations to determine whether they are significantly different.", "token)) continue ds.append(f) except ValueError: if opts.verbose > 1: info('skipping line {} in", "-k 2 3 data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by", "is ignored. The default is column 1 for both datasets. ''') parser.add_argument('-s', '--snd-threshold',", "runs have 50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2 is", "the second dataset. If the value in the column is not a floating", "{}'.format(ln, fn, token)) continue ds.append(f) except ValueError: if opts.verbose > 1: info('skipping line", "dof): ''' Calculate the probability density function (PDF) at x for a student-t", "per = 100. * abs(md) / ma infov(opts, 'percentage: {}'.format(per)) if club <", "$ /usr/bin/time -p sleep 0.3 real 0.30 user 0.00 sys 0.00 At this", "a]) / float(na - 1.) varb = sum([(xb - mb) ** 2 for", "observations. Please note that this is not, strictly, a t-test because it switches", "cllb, club)) crosses_zero = cllb < 0 < club significant = not crosses_zero", "= 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $ {0}", "info('skipping line {} in {}: not a number: {}'.format(ln, fn, token)) continue except", "make sense of it all. We want to compare two versions of the", "in range(intervals): y = float(fct(x, *args, **kwargs)) rectangle_area = width * y #", "sum([(xa - ma) ** 2 for xa in a]) / float(na - 1.)", "os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc =", "lookup and values that were discarded during file reads. ''') parser.add_argument('-V', '--version', action='version',", "we assume that the data is stored in a single file but normally", "100. * abs(md) / ma infov(opts, 'percentage: {}'.format(per)) if club < 0: print('With", "default is column 1 for both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'),", "of the \":\" reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'positional", "infov(opts, 'dataset-2 col: {}'.format(bc)) a = read_file(opts, af, ac) b = read_file(opts, bf,", "the user to play with the parameters t = opts.internal[0] lb = opts.internal[1]", "# ================================================================ # # Options # # ================================================================ def getopts(): ''' Get the", "infov(opts, 'reject the null hypothesis: {}'.format(significant)) # Report the result. clp = cl", "number. When the data is not in a single column in a file,", "ma) ** 2 for xa in a]) / float(na - 1.) varb =", "os import sys #VERSION='0.1' # Initial load. VERSION='0.2' # Made the std dev", "- 1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) # standard deviations", "parser.error(msg) return GetConfLevel # Trick to capitalize the built-in headers. # Unfortunately I", "to how the interval is sliced). The height of each rectangle is the", "sliced). The height of each rectangle is the pdf function value for x", "user 0.00 sys 0.00 At this point we have the unpaired observations from", "python r''' Compare two datasets to determine whether there is a significant difference", "range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick to capitalize the built-in headers. #", "the same file. $ cat data.txt # v1.1 v1.2 # ======= ======= 1", "that you want to use to determine significance. Typical confidence levels 0.90 (90%),", "they can be used in a black box testing environment. Each dataset contains", "# adjustment based on height change total_area += rectangle_area + triangle_area # trapezoid", "b: a * b, [float(i) for i in range(1, int(x))]) # Lanczos approximation,", "+ ub + 0.5, 0) minv = -maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts,", "the unpaired observations from both runs in two different files so we can", "* dx xden = 2 * (s ** 2) den = s *", "in one file Here is an example to make sense of it all.", ") y = exp / den return y def pdf_snd(x): ''' Calculate the", "sa2qna**2 dof_denb = (1. / (nb + 1.)) * sb2qnb**2 dof = (dof_num", "specified') if opts.snd_threshold < 30: parser.error('it does not make sense to use SND", "and lines where the token is not a floating point number are ignored.", "That tells us that v2 is indeed slightly faster. ''' # License: MIT", "den = math.sqrt(2 * math.pi) exp = math.e ** - (dx2 / 2)", "$ {0} -h # Example 2: No significant difference with 95% confidence. #", "of effective degrees of freedom. No table look ups are necessary. The methodology", "in C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 =", "# ================================================================ def read_file(opts, fn, col): ''' Read column data from the file.", "a floating point number. When the data is not in a single column", "automatically determine the associated z-value based on the confidence level and the number", "= mid elif probability > cp: # It is to the left. bot", "+ sb2qnb) infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd)) # effective degrees of", "by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, there is no significant difference", "integer values: (x-1)!. return reduce(lambda a, b: a * b, [float(i) for i", "associated z-value based on the confidence level and the number of effective degrees", "# === ======= ======= 1 119.041 117.038 2 119.670 119.733 3 120.675 118.346", "''' # Make sure that the confidence level is in the proper range.", "2: No significant difference with 95% confidence. # The dataset is used. $", "-p blackbox-v2 >> /tmp/v2.out ; done We can now capture the real run", "is for the second dataset. If the value in the column is not", "specified, is used for both datasets. ''') opts = parser.parse_args() if opts.cols[0] <", "cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format( probability, cp, tolerance,", "10 119.134 118.049 For this example we assume that the data is stored", "then 0') if len(opts.FILES) > 2: parser.error('only 1 or 2 files may be", "diff = abs(cp - probability) if v: info('p={}, cp={}, t={:f}, mt={}, mv={}, i={},", "Write an info message to stdout. ''' if opts.verbose > 0: _msg('INFO', f+1,", "= exp / den return y def area_under_curve(x1, x2, intervals, fct, *args, **kwargs):", "club significant = not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the null", "= argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(),", "would be running a program called blackbox-v1 50 times and collecting the timing", "the values that make up the computation. Specify -v -v to internal details", "in columns with one entry per line. Non-numeric data is ignored which allows", "dataset. The first column is for the first dataset. The second column is", "= 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) =", "we can use the exact value of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) =", "time data by simply grepping out the data like this: $ grep -w", "dof_dena = (1. / (na + 1.)) * sa2qna**2 dof_denb = (1. /", "the level of verbosity. Specify -v to see the values that make up", "if the second version is faster than the first for the same inputs.", "os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog", "(i.e. the data values are created by tools like /usr/bin/time) so they can", "are 1.1 and 1.2. The program takes about 2 minutes to run (120", "case, the first dataset is in column 2 and the second dataset is", "float(len(a)) nb = float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means ma", "x2 x4 = c0 x5 = float(x) for i in range(6): x5 +=", "$ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v2 >>", "message and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext # to", "(0 for a standard normal distribution) This is the height of the curve", "msg, ofp=sys.stdout): ''' Base for printing messages. ''' lineno = inspect.stack()[frame][2] now =", "verbosity. Specify -v to see the values that make up the computation. Specify", "./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2 is smaller than dataset-1", "pdf_t, dof) x = (1. - cl) / 2. q = cl +", "self.dest, values) else: msg = 'argument \"{}\" out of range (0..1)'.format(self.dest) parser.error(msg) return", "= float(maxtop) / 2.0 top = maxtop bot = 0.0 diff = tolerance", "{:.1f}% confidence, dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With", "by about 0.8%. # Example 4: Dataset-2 is slightly smaller (has faster runtime)", "x2 > x1 # just a sanity check assert intervals > 1 #", "better the estimate is at the cost of performance. ''' assert x2 >", "afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95,", "are necessary. EXAMPLE 1 - two datasets in one file Here is an", "more efficient. ''' dx2 = float(x) ** 2 den = math.sqrt(2 * math.pi)", "maxtop, minval, iterations, top, bot, mid, z, q)) if probability < cp: #", "the data is stored in a single file but normally it is easier", "single file but normally it is easier to have it exist in two", "math.pi) exp = math.e ** - (dx2 / 2) y = exp /", "pdf_nd(x, 1, 0) but is somewhat more efficient. ''' dx2 = float(x) **", "number of effective degrees of freedom (DOF) is larger than 32. It is", "values, option_string=None): if 0. < values < 1.0: setattr(args, self.dest, values) else: msg", "'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means ma = sum(a) / na mb", "data.txt With 95.0% confidence, dataset-2 is larger than dataset-1 by about 1.1%. EXAMPLE", "** 2)) / float(dof)) x4 = float((dof + 1)) / 2.0 x5 =", "*args, **kwargs)) rectangle_area = width * y # area of rectangle at x", "tolerance * 2 # start the loop while diff > tolerance: mid =", "this point we have the unpaired observations from both runs in two different", "t, maxv, minv, intervals, v, pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr)) z =", "blackbox-v1 >> /tmp/v1.out ; done $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i", "function. Uses the Lanczos approximation and natural logarithms. For integer values of x", "opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts,", "ignored. The default is column 1 for both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int,", "s = standard deviation (1 for a standard normal distribution) u = mean", "result. clp = cl * 100. if significant: per = 100. * abs(md)", "is described in detail here: https://github.com/jlinoff/ztables. To determine significance, you specify the confidence", "len(ds) < 3: err('too few data points at column {}, found {}, need", "lb = opts.internal[1] ub = opts.internal[2] intervals = int(opts.internal[3]) maxv = 2 *", "example to make sense of it all. We want to compare two versions", "like /usr/bin/time) so they can be used in a black box testing environment.", "level is in the proper range. def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser,", "necessary. The methodology used to calculate the z-value is described in detail here:", "= width * y # area of rectangle at x with height y", "start the loop while diff > tolerance: mid = bot + ((top -", "2 minutes to run (120 seconds) and we want to determine whether v1.2", "col): ''' Read column data from the file. ''' ds = [] try:", "in a file, you must explicitly specify the which column to collect. In", "(1 for a standard normal distribution) u = mean (0 for a standard", "# # Message utility functions. # # ================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout):", "msg) def err(msg, f=1): ''' Write an error message to stderr and exit.", "numbers to be compared. The numbers must be greater than 0. That is", "/usr/bin/time -p sleep 0.3 real 0.30 user 0.00 sys 0.00 At this point", "message to stdout. ''' if opts.verbose > 0: _msg('INFO', f+1, msg) def warn(msg,", "= 100. * abs(md) / ma infov(opts, 'percentage: {}'.format(per)) if club < 0:", "a black box testing environment. Each dataset contains a series of numbers to", "0.0 width = (float(x2) - float(x1)) / float(intervals) x = float(x1) py =", "ds.append(f) except ValueError: if opts.verbose > 1: info('skipping line {} in {}: not", "def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args): ''' Get the z", "the left. bot = mid else: break # Sanity checks. assert top <=", "value in the column is not a floating point number it is ignored.", "does not make sense to use SND for {} elements'.format(opts.snd_threshold)) return opts #", "ValueError: if opts.verbose > 1: info('skipping line {} in {}: not a number:", "If only one file is specified, is used for both datasets. ''') opts", "'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for internal computations. You should never need to", "'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr)) # confidence", "-c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2 is smaller than dataset-1 by", "value of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2)", "'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl))", "{:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr)) # confidence interval for", "degrees of freedom: {:.2f}'.format(q, dofr, z)) cllb = md - z * sdmd", "are ignored. Here is what the run looks like: $ ./cmpds.py -c 0.95", "can see the ignored data in verbose mode. If only one file is", "infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a = read_file(opts, af, ac)", "dataset-2 is smaller than dataset-1 by about 0.8%. # Example 4: Dataset-2 is", "With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%. # Example", "50 entries in each dataset. You must specify the confidence level that you", "is how you might do it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++))", "be used in a black box testing environment. Each dataset contains a series", "at x with height y triangle_area = ((y - py) * width) /", "but is somewhat more efficient. ''' dx2 = float(x) ** 2 den =", "unpaired observations from both runs in two different files so we can use", "this help message and exit': 'Show this help message and exit.\\n ', }", "distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof) x =", "'''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float,", "frame, msg, ofp=sys.stdout): ''' Base for printing messages. ''' lineno = inspect.stack()[frame][2] now", "main(): opts = getopts() af = opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES) ==", "it if the token is a floating point number. When the data is", "datasets differ. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%). The", "versions are 1.1 and 1.2. The program takes about 2 minutes to run", "confidence level. If you reverse the columns, you will get the opposite result:", "the same as pdf_nd(x, 1, 0) but is somewhat more efficient. ''' dx2", "1.1%. EXAMPLE 2 - datasets in separate files A more realistic example would", "/ float(na - 1.) varb = sum([(xb - mb) ** 2 for xb", "how you might do it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ;", "Compare two datasets to determine whether there is a significant difference between them", "# Unfortunately I can't get rid of the \":\" reliably. def gettext(s): lookup", "1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) = 6.0", "standard normal distribution) This is the height of the curve at x. It", "we are treating the samples as unpaired observations (t-test) but the smallest one", "The default is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE',", "is sliced). The height of each rectangle is the pdf function value for", "type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for", "gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4)", "This is the height of the curve at x. It is exactly the", "[float(i) for i in range(1, int(x))]) # Lanczos approximation, page 214 of Numerical", "0.5, 0) minv = -maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound:", "'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show", "the token is a floating point number. When the data is not in", "'.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $ {0} -h #", "grepping out the data like this: $ grep -w ^real /tmp/v1.out > /tmp/v1.ds", "minutes to run (120 seconds) and we want to determine whether v1.2 is", "you will get the opposite result: $ ./cmpds.py -c 0.95 -k 3 2", "file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col:", "the data like this: $ grep -w ^real /tmp/v1.out > /tmp/v1.ds $ grep", "b]) / float(nb - 1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb))", "Both runs have 50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2", "numbers must be greater than 0. That is a reasonable constraint given that", "= line.strip() tokens = line.split() if len(tokens) < col: continue token = tokens[col-1]", "freedom dof_num = (sa2qna + sb2qnb)**2 dof_dena = (1. / (na + 1.))", "is smaller than dataset-1 by about 0.8%. # Example 4: Dataset-2 is slightly", "Sons, New York. import argparse import datetime import inspect import math import os", "v: info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format( probability,", "''') opts = parser.parse_args() if opts.cols[0] < 1: parser.error('column 1 must be greater", "at the first token on each line and collects it if the token", "a specific confidence level using the t-test methodology for unpaired observations. Please note", "curve at x. ''' assert dof > 2 x1 = gamma((float(dof) + 1.0)", "'reject the null hypothesis: {}'.format(significant)) # Report the result. clp = cl *", "Recipes in C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0", "value that matches the specified percentage. ''' # Binary search to find the", "50 samples. # The data is specifically generated to show the difference. $", "> ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2 is smaller than", "indeed slightly faster. ''' # License: MIT Open Source # Copyright (c) 2016", "2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr))", "an estimate of the area under the curve. The greater the number of", "c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015 c1", "of the areas provides an estimate of the area under the curve. The", "99% confidence. # Both runs have 50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With", "errors if opts.verbose > 1: info('skipping line {} in {}: number is too", "for line in ifp.readlines(): ln += 1 line = line.strip() tokens = line.split()", "2. q = cl + x infov(opts, '{:.3f}-quantile of t-variate with {} degrees", "example we assume that the data is stored in a single file but", "* 100. if significant: per = 100. * abs(md) / ma infov(opts, 'percentage:", "use -k to specify the columns because -c is already reserved for specifying", "and exit. ''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) # ================================================================ # # Statistical", "opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence", "headers. # Unfortunately I can't get rid of the \":\" reliably. def gettext(s):", "estimate of the area under the curve. The greater the number of intervals", "that make up the computation. Specify -v -v to internal details about the", "If the value in the column is not a floating point number it", "per)) else: print('With {:.1f}% confidence, there is no significant difference between the datasets.'.format(clp))", "sys 0.00 At this point we have the unpaired observations from both runs", "2016 by <NAME> #REFERENCES: # <NAME> (1991). \"The Art Computer Systems Performance Analysis\",", "Calculate the probability density function (PDF) for a normal distribution. s = standard", "lookup.get(s, s) argparse._ = gettext # to capitalize help headers base = os.path.basename(sys.argv[0])", "closest value. z = 0.0 adjustment = float(maxtop) / 2.0 top = maxtop", "runs in two different files so we can use cmpds.py to figure out", "upper bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal", "reduce(lambda a, b: a * b, [float(i) for i in range(1, int(x))]) #", "is ignored which allows you to add comments and blank spaces. You can", "+= 1 line = line.strip() tokens = line.split() if len(tokens) < col: continue", "calculate the z-value is described in detail here: https://github.com/jlinoff/ztables. To determine significance, you", "'\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES:", "number is too small {}'.format(ln, fn, token)) continue ds.append(f) except ValueError: if opts.verbose", "'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help $ {0} -h", "you might do it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do", "# area of rectangle at x with height y triangle_area = ((y -", "cost of performance. ''' assert x2 > x1 # just a sanity check", "time format (-p) outputs the time data on 3 separate lines as shown", "/ float(nb - 1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) #", "dataset-2 are in the same file. $ cat data.txt # v1.1 v1.2 #", "you to add comments and blank spaces. You can see the ignored data", "0) but is somewhat more efficient. ''' dx2 = float(x) ** 2 den", "/ den return y def area_under_curve(x1, x2, intervals, fct, *args, **kwargs): ''' Calculate", "'r') as ifp: ln = 0 for line in ifp.readlines(): ln += 1", "to find the closest value. z = 0.0 adjustment = float(maxtop) / 2.0", "try: with open(fn, 'r') as ifp: ln = 0 for line in ifp.readlines():", "/tmp/v2.out > /tmp/v2.ds The above command takes advantage of the fact that posix", "ub = opts.internal[2] intervals = int(opts.internal[3]) maxv = 2 * round(abs(lb) + ub", "sense to use SND for {} elements'.format(opts.snd_threshold)) return opts # ================================================================ # #", "format (-p) outputs the time data on 3 separate lines as shown in", "top, bot, mid, z, q)) if probability < cp: # It is to", "slightly faster. ''' # License: MIT Open Source # Copyright (c) 2016 by", "(0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick to capitalize the built-in headers. # Unfortunately", "than 0. That is a reasonable constraint given that they typically represent something", "/ float(intervals) x = float(x1) py = float(fct(x, *args, **kwargs)) for i in", "x infov(opts, '{:.3f}-quantile of t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr, z))", "computations. You should never need to change these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols',", "there is a significant difference between them for a specific confidence level using", "used for both datasets. ''') opts = parser.parse_args() if opts.cols[0] < 1: parser.error('column", "1.2. The program takes about 2 minutes to run (120 seconds) and we", "exceeds this threshold, the SND is used instead of a t-distribution. The default", "sb2qnb)**2 dof_dena = (1. / (na + 1.)) * sa2qna**2 dof_denb = (1.", "/tmp/v1.ds $ grep -w ^real /tmp/v2.out > /tmp/v2.ds The above command takes advantage", "file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a = read_file(opts,", "x. ''' assert dof > 2 x1 = gamma((float(dof) + 1.0) / 2.0)", "'\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done $ for((i=1;i<=50;i++))", "between two different versions of software. The datasets are completely independent of the", "< 0.0001: # avoid divide by 0 errors if opts.verbose > 1: info('skipping", "2.5066282746310005 x1 = float(x) + 5.5 x2 = (float(x) + 0.5) * math.log(x1)", "float(intervals) x = float(x1) py = float(fct(x, *args, **kwargs)) for i in range(intervals):", "math.log((c1 * x4) / float(x)) x7 = -x3 + x6 # ln(gamma(x)) g", "= s * math.sqrt(2 * math.pi) exp = math.e ** ( -dx2 /", "{}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na =", "** 2 for xb in b]) / float(nb - 1.) infov(opts, 'variance a:", "degrees of freedom dof_num = (sa2qna + sb2qnb)**2 dof_dena = (1. / (na", "standard normal distribution) This is the height of the curve at x. '''", "elif probability > cp: # It is to the left. bot = mid", "2 * round(abs(lb) + ub + 0.5, 0) minv = -maxv infov(opts, 'internal", "smaller than dataset-1 by about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc,", "distribution (SND) when the number of effective degrees of freedom (DOF) is larger", "math.e ** - (dx2 / 2) y = exp / den return y", "1 119.041 117.038 2 119.670 119.733 3 120.675 118.346 4 118.628 117.261 5", "(v1.2) is slightly faster. Note that we use -k to specify the columns", "how the interval is sliced). The height of each rectangle is the pdf", "f=1): ''' Write an info message to stdout. ''' if opts.verbose > 0:", "else: msg = 'argument \"{}\" out of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel #", "can use cmpds.py to figure out whether v2 is faster than v1 at", "of performance. ''' assert x2 > x1 # just a sanity check assert", "the interval is sliced). The height of each rectangle is the pdf function", "see the values that make up the computation. Specify -v -v to internal", "# # Statistical utility functions. # See https://github.com/jlinoff/ztables for background. # # ================================================================", "slightly smaller (has faster runtime) with 99% confidence. # Both runs have 50", "that 0 < c < 1. The default is %(default)s. ''') parser.add_argument('--internal', type=float,", "When the number of effective degrees of freedom (DOF) exceeds this threshold, the", "the first token on each line and collects it if the token is", "may be specified') if opts.snd_threshold < 30: parser.error('it does not make sense to", "triangle_area = ((y - py) * width) / 2.0 # adjustment based on", "y # area of rectangle at x with height y triangle_area = ((y", "infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv))", "typically represent something like elapsed time or memory used. The size of the", "- (dx2 / 2) y = exp / den return y def area_under_curve(x1,", "math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0)) x3 = 1.0 + (float((x **", "It is to the right. top = mid elif probability > cp: #", "= not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis: {}'.format(significant))", "0.8%. # Example 5: Dataset-1 and dataset-2 are in the same file. $", "* sdmd club = md + z * sdmd infov(opts, '{:.1f}% confidence interval", "dataset is used. $ ./gends.py 10 100 120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt", "= sum([(xa - ma) ** 2 for xa in a]) / float(na -", "is larger than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, dataset-2", "0: print('With {:.1f}% confidence, dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp, per))", "is smaller than dataset-1 by about 1.3%. That tells us that v2 is", "$ ./gends.py 10 100 120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0%", "infov(opts, 'mean diff: {:.3f}'.format(md)) # standard deviation of the mean difference sa2qna =", "assert top <= maxtop assert bot >= 0 return z # ================================================================ #", "value. z = 0.0 adjustment = float(maxtop) / 2.0 top = maxtop bot", "parser.error('only 1 or 2 files may be specified') if opts.snd_threshold < 30: parser.error('it", "try: f = float(token) if f < 0.0001: # avoid divide by 0", "Specify -v -v to internal details about the z value lookup and values", "sense of it all. We want to compare two versions of the foobar", "y def pdf_snd(x): ''' Calculate the probability density function (PDF) for a standard", "switches over to the standard normal distribution (SND) when the number of effective", "represent something like elapsed time or memory used. The size of the datasets", "dataset-1 by about 1.1%. EXAMPLE 2 - datasets in separate files A more", "(SND) when the number of effective degrees of freedom (DOF) is larger than", "The tool will automatically determine the associated z-value based on the confidence level", "do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done", "of the foobar program to see if the second version is faster than", "''' # License: MIT Open Source # Copyright (c) 2016 by <NAME> #REFERENCES:", "95.0% confidence, there is no significant difference between the datasets. # Example 3:", "0.0 adjustment = float(maxtop) / 2.0 top = maxtop bot = 0.0 diff", "3 of the same file. Blank lines and lines where the token is", "119.042 9 120.164 116.203 10 119.134 118.049 For this example we assume that", "during file reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number", "want to determine whether v1.2 is faster. The table below shows sample data", "token is a floating point number. When the data is not in a", "curve. The greater the number of intervals the better the estimate is at", "to figure out whether v2 is faster than v1 at a 95% confidence", "area_under_curve(minval, z, iterations, fct, *args) cp = 1.0 - (2.0 * (1.0 -", "base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base)", "= int(opts.internal[3]) maxv = 2 * round(abs(lb) + ub + 0.5, 0) minv", "* round(abs(lb) + ub + 0.5, 0) minv = -maxv infov(opts, 'internal threshold:", "* math.pi) exp = math.e ** ( -dx2 / xden ) y =", "of rectangle at x with height y triangle_area = ((y - py) *", "int(x)) == 0: # Optimization for integer values: (x-1)!. return reduce(lambda a, b:", "usage = '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog =", "= (dof_num / (dof_dena + dof_denb)) - 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr", "in the column is not a floating point number it is ignored. The", "run (120 seconds) and we want to determine whether v1.2 is faster. The", "= math.log((c1 * x4) / float(x)) x7 = -x3 + x6 # ln(gamma(x))", "<= maxtop assert bot >= 0 return z # ================================================================ # # t-test", "with 95% confidence. # Both runs have 50 samples. # The data is", "and we want to determine whether v1.2 is faster. The table below shows", "file is specified, is used for both datasets. ''') opts = parser.parse_args() if", "{}: number is too small {}'.format(ln, fn, token)) continue ds.append(f) except ValueError: if", "'{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb", "= [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015 c1 =", "one file Here is an example to make sense of it all. We", "of x we can use the exact value of (x-1)!. gamma(1/2) = 1.77245385091", "> cp: # It is to the left. bot = mid else: break", "in {}: number is too small {}'.format(ln, fn, token)) continue ds.append(f) except ValueError:", "to stdout. ''' _msg('INFO', f+1, msg) def infov(opts, msg, f=1): ''' Write an", "not in a single column in a file, you must explicitly specify the", "} return lookup.get(s, s) argparse._ = gettext # to capitalize help headers base", "< 1: parser.error('column 1 must be greater then 0') if opts.cols[1] < 1:", "= getopts() af = opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES) == 2 else", "column 1 for both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard", "ignored data in verbose mode. If only one file is specified, is used", "difference z = 0.0 # allow the user to play with the parameters", "functions. # See https://github.com/jlinoff/ztables for background. # # ================================================================ def gamma(x): ''' Gamma", "x4 = float((dof + 1)) / 2.0 x5 = x3 ** -x4 y", "q)) diff = abs(cp - probability) if v: info('p={}, cp={}, t={:f}, mt={}, mv={},", "y triangle_area = ((y - py) * width) / 2.0 # adjustment based", "= tokens[col-1] try: f = float(token) if f < 0.0001: # avoid divide", "running a program called blackbox-v1 50 times and collecting the timing output to", "point we have the unpaired observations from both runs in two different files", "opts): ''' Analyze unpaired observations to determine whether they are significantly different. '''", "that the confidence level is in the proper range. def get_conf_level(): class GetConfLevel(argparse.Action):", "( -dx2 / xden ) y = exp / den return y def", "add comments and blank spaces. You can see the ignored data in verbose", "MIT Open Source # Copyright (c) 2016 by <NAME> #REFERENCES: # <NAME> (1991).", "deviation (1 for a standard normal distribution) u = mean (0 for a", "for x at the start of the interval. The accumulation of the areas", "with open(fn, 'r') as ifp: ln = 0 for line in ifp.readlines(): ln", "not make sense to use SND for {} elements'.format(opts.snd_threshold)) return opts # ================================================================", "interval for the mean difference z = 0.0 # allow the user to", "get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser, args, values, option_string=None): if 0. < values", "argparse._ = gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name =", "< values < 1.0: setattr(args, self.dest, values) else: msg = 'argument \"{}\" out", "be greater then 0') if len(opts.FILES) > 2: parser.error('only 1 or 2 files", "/ nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) # variances vara", "= (1. - cl) / 2. q = cl + x infov(opts, '{:.3f}-quantile", "command line options using argparse. ''' # Make sure that the confidence level", "bot >= 0 return z # ================================================================ # # t-test implementation # #", "def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser, args, values, option_string=None): if 0. <", "inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix, lineno, msg)) def", "the tool looks at the first token on each line and collects it", "type=int, default=32, metavar=('UINT'), help='''The standard normal distribution (SND) threshold. When the number of", "logarithms. For integer values of x we can use the exact value of", "the which column to collect. In this case, the first dataset is in", "an error message to stderr and exit. ''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1)", "(float(x) + 0.5) * math.log(x1) x3 = x1 - x2 x4 = c0", "= x3 ** -x4 y = (x1 * x5) / x2 return y", "floating point number. When the data is not in a single column in", "================================================================ def getopts(): ''' Get the command line options using argparse. ''' #", "second dataset. If the value in the column is not a floating point", "info message to stdout. ''' if opts.verbose > 0: _msg('INFO', f+1, msg) def", "for background. # # ================================================================ def gamma(x): ''' Gamma function. Uses the Lanczos", "0.95 -k 3 2 data.txt With 95.0% confidence, dataset-2 is larger than dataset-1", "i in range(6): x5 += 1.0 x4 += c[i] / x5 x6 =", "to determine significance. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%).", "Message utility functions. # # ================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout): ''' Base", "If you reverse the columns, you will get the opposite result: $ ./cmpds.py", "the start of the interval. The accumulation of the areas provides an estimate", "different because we are treating the samples as unpaired observations (t-test) but the", "assert intervals > 1 # another sanity check total_area = 0.0 width =", "5: Dataset-1 and dataset-2 are in the same file. $ cat data.txt #", "inputs. The versions are 1.1 and 1.2. The program takes about 2 minutes", "about 1.3%. That tells us that v2 is indeed slightly faster. ''' #", "infov(opts, msg, f=1): ''' Write an info message to stdout. ''' if opts.verbose", "= argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence", "range(6): x5 += 1.0 x4 += c[i] / x5 x6 = math.log((c1 *", "z-value is described in detail here: https://github.com/jlinoff/ztables. To determine significance, you specify the", "than the first for the same inputs. The versions are 1.1 and 1.2.", "computation. Specify -v -v to internal details about the z value lookup and", "the difference. $ ./gends.py 50 110 112 > ds-50-110-112.txt $ ./gends.py 50 108", "datasets to determine whether there is a significant difference between them for a", "verbose mode. If only one file is specified, is used for both datasets.", "binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args): ''' Get the z value", "exp = math.e ** ( -dx2 / xden ) y = exp /", "use cmpds.py to figure out whether v2 is faster than v1 at a", "def pdf_t(x, dof): ''' Calculate the probability density function (PDF) at x for", "a floating point number it is ignored. The default is column 1 for", "ac) b = read_file(opts, bf, bc) ttest(a, b, opts) if __name__ == '__main__':", "confidence level is in the proper range. def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self,", "determine whether the datasets differ. Typical confidence levels 0.90 (90%), 0.95 (95%) and", "# use standard normal distribution (SND) infov(opts, 'use standard normal distribution (SND)') z", "whose width is fixed (proportional to how the interval is sliced). The height", "< 0 < club significant = not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts,", "diff: {:.3f}'.format(md)) # standard deviation of the mean difference sa2qna = stddeva**2 /", "- mb infov(opts, 'mean diff: {:.3f}'.format(md)) # standard deviation of the mean difference", "math.exp(x7) return g def pdf_t(x, dof): ''' Calculate the probability density function (PDF)", "''' Write an info message to stdout. ''' if opts.verbose > 0: _msg('INFO',", "arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this help message", "-p sleep 0.3 real 0.30 user 0.00 sys 0.00 At this point we", "height y triangle_area = ((y - py) * width) / 2.0 # adjustment", "'--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that define each dataset. The", "math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) #", "it is easier to have it exist in two separate files because, by", "you reverse the columns, you will get the opposite result: $ ./cmpds.py -c", "point number are ignored. Here is what the run looks like: $ ./cmpds.py", "distribution) This is the height of the curve at x. ''' dx =", "a series of numbers to be compared. The numbers must be greater than", "+ dof_denb)) - 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective", "118.628 117.261 5 120.363 118.863 6 118.076 117.545 7 120.539 119.751 8 118.880", "5 120.363 118.863 6 118.076 117.545 7 120.539 119.751 8 118.880 119.042 9", "to see if the second version is faster than the first for the", "else: infov(opts, 'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv, minv, intervals, v,", "''' Write an error message to stderr and exit. ''' _msg('ERROR', f+1, msg,", "out of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick to capitalize the built-in", "if dofr > opts.snd_threshold: # use standard normal distribution (SND) infov(opts, 'use standard", "ln += 1 line = line.strip() tokens = line.split() if len(tokens) < col:", "GetConfLevel(argparse.Action): def __call__(self, parser, args, values, option_string=None): if 0. < values < 1.0:", "=== ======= ======= 1 119.041 117.038 2 119.670 119.733 3 120.675 118.346 4", "faster runtime) with 95% confidence. # Both runs have 50 samples. # The", "A more realistic example would be running a program called blackbox-v1 50 times", "probability > cp: # It is to the left. bot = mid else:", "degrees of freedom. No table look ups are necessary. EXAMPLE 1 - two", "effective degrees of freedom (DOF) is larger than 32. It is really useful", "distribution) This is the height of the curve at x. It is exactly", "and the number of effective degrees of freedom. No table look ups are", "on height change total_area += rectangle_area + triangle_area # trapezoid area x +=", "intervals > 1 # another sanity check total_area = 0.0 width = (float(x2)", "confidence, there is no significant difference between the datasets. # Example 3: Dataset-2", "a curve using trapezoidal approximation. It breaks the interval between x1 and x2", "opposite result: $ ./cmpds.py -c 0.95 -k 3 2 data.txt With 95.0% confidence,", "per)) else: print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp,", "- float(x1)) / float(intervals) x = float(x1) py = float(fct(x, *args, **kwargs)) for", "= float(x) - float(u) dx2 = dx * dx xden = 2 *", "1.1 and 1.2. The program takes about 2 minutes to run (120 seconds)", "where the token is not a floating point number are ignored. Here is", "110 112 > ds-50-110-112.txt $ ./gends.py 50 108 112 > ds-50-108-112.txt $ {0}", "is not a floating point number it is ignored. The default is column", "fixed (proportional to how the interval is sliced). The height of each rectangle", "Computer Systems Performance Analysis\", <NAME>iley and Sons, New York. import argparse import datetime", "the first dataset is in column 2 and the second dataset is in", "= dx * dx xden = 2 * (s ** 2) den =", "infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) # variances vara = sum([(xa", "Example 2: No significant difference with 95% confidence. # The dataset is used.", "loop while diff > tolerance: mid = bot + ((top - bot) /", "stdout. ''' _msg('INFO', f+1, msg) def infov(opts, msg, f=1): ''' Write an info", "between the datasets. # Example 3: Dataset-2 is slightly smaller (has faster runtime)", "q = cl + x infov(opts, '{:.3f}-quantile of t-variate with {} degrees of", "help $ {0} -h # Example 2: No significant difference with 95% confidence.", "grep -w ^real /tmp/v2.out > /tmp/v2.ds The above command takes advantage of the", "den return y def area_under_curve(x1, x2, intervals, fct, *args, **kwargs): ''' Calculate the", "all. We want to compare two versions of the foobar program to see", "- x2 x4 = c0 x5 = float(x) for i in range(6): x5", "it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n'", "one entry per line. Non-numeric data is ignored which allows you to add", "grep -w ^real /tmp/v1.out > /tmp/v1.ds $ grep -w ^real /tmp/v2.out > /tmp/v2.ds", "token is not a floating point number are ignored. Here is what the", "5.5 x2 = (float(x) + 0.5) * math.log(x1) x3 = x1 - x2", "is smaller than dataset-1 by about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser =", "''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard normal distribution (SND) threshold. When", "We can now capture the real run time data by simply grepping out", "compared. The numbers must be greater than 0. That is a reasonable constraint", "+ x6 # ln(gamma(x)) g = math.exp(x7) return g def pdf_t(x, dof): '''", "values of x we can use the exact value of (x-1)!. gamma(1/2) =", "file reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and", "maxval: {}'.format(maxv)) v = True if opts.verbose > 1 else False if dofr", "ups are necessary. EXAMPLE 1 - two datasets in one file Here is", "1)) / 2.0 x5 = x3 ** -x4 y = (x1 * x5)", "*args, **kwargs): ''' Calculate the approximate area under a curve using trapezoidal approximation.", "math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd)) # effective degrees", "# standard deviation of the mean difference sa2qna = stddeva**2 / na sb2qnb", "probability) if v: info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={},", "argparse. ''' # Make sure that the confidence level is in the proper", "allow the user to play with the parameters t = opts.internal[0] lb =", "datasets can be different because we are treating the samples as unpaired observations", "the height of the curve at x. ''' dx = float(x) - float(u)", "return total_area def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args): ''' Get", "'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this help message and", "file and then running blackbox-v2 and collecting its output. Here is how you", "to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n", "standard normal distribution) u = mean (0 for a standard normal distribution) This", "float(u) dx2 = dx * dx xden = 2 * (s ** 2)", "= x1 - x2 x4 = c0 x5 = float(x) for i in", "# ================================================================ def gamma(x): ''' Gamma function. Uses the Lanczos approximation and natural", "table look ups are necessary. EXAMPLE 1 - two datasets in one file", "- float(u) dx2 = dx * dx xden = 2 * (s **", "x1 # just a sanity check assert intervals > 1 # another sanity", "is no significant difference between the datasets.'.format(clp)) # ================================================================ # # Options #", "{}, found {}, need at least 3 in file: {}'.format(col, len(ds), fn)) return", "trapezoid area x += width # advance to the next edge py =", "return GetConfLevel # Trick to capitalize the built-in headers. # Unfortunately I can't", "With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%. '''.format(base) afc", "significant difference between the datasets. # Example 3: Dataset-2 is slightly smaller (has", "rectangle_area + triangle_area # trapezoid area x += width # advance to the", "under a curve using trapezoidal approximation. It breaks the interval between x1 and", "edge py = y # remember the previous height return total_area def binary_search_for_z(probability,", "for {} elements'.format(opts.snd_threshold)) return opts # ================================================================ # # Read file data. #", "= standard deviation (1 for a standard normal distribution) u = mean (0", "opts.snd_threshold: # use standard normal distribution (SND) infov(opts, 'use standard normal distribution (SND)')", "used for internal computations. You should never need to change these. Defaults: %(default)s.", "specifically generated to show the difference. $ ./gends.py 50 110 112 > ds-50-110-112.txt", "/ na sb2qnb = stddevb**2 / nb sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts,", "smaller (has faster runtime) with 99% confidence. # Both runs have 50 samples.", "the first dataset. The second column is for the second dataset. If the", "of t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr, z)) cllb = md", "this is not, strictly, a t-test because it switches over to the standard", "z, iterations, fct, *args) cp = 1.0 - (2.0 * (1.0 - q))", "= opts.FILES[1] if len(opts.FILES) == 2 else af ac = opts.cols[0] bc =", "/ (nb + 1.)) * sb2qnb**2 dof = (dof_num / (dof_dena + dof_denb))", "infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts, 'internal upper bound:", "^real /tmp/v2.out > /tmp/v2.ds The above command takes advantage of the fact that", "difference with 95% confidence. # The dataset is used. $ ./gends.py 10 100", "-w ^real /tmp/v2.out > /tmp/v2.ds The above command takes advantage of the fact", "+ 1.)) * sb2qnb**2 dof = (dof_num / (dof_dena + dof_denb)) - 2.0", "the same file. Blank lines and lines where the token is not a", "trapezoidal approximation. It breaks the interval between x1 and x2 into trapezoids whose", "$ ./cmpds.py -c 0.95 -k 2 3 data.txt With 95.0% confidence, dataset-2 is", "greater then 0') if opts.cols[1] < 1: parser.error('column 1 must be greater then", "cl = opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b), b))", "if 0. < values < 1.0: setattr(args, self.dest, values) else: msg = 'argument", "difference. $ ./gends.py 50 110 112 > ds-50-110-112.txt $ ./gends.py 50 108 112", "Typically you would like to have at least 50 entries in each dataset.", "nargs='+', help='''The files with the run time data. The data must be organized", "the associated z-value based on the confidence level and the number of effective", "4 118.628 117.261 5 120.363 118.863 6 118.076 117.545 7 120.539 119.751 8", "{}, need at least 3 in file: {}'.format(col, len(ds), fn)) return ds #", "; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done We can now capture the", "{}'.format(fn)) if len(ds) < 3: err('too few data points at column {}, found", "# Example 5: Dataset-1 and dataset-2 are in the same file. $ cat", "has changed between two different versions of software. The datasets are completely independent", ">= 0 return z # ================================================================ # # t-test implementation # # ================================================================", "/ 2.0 # adjustment based on height change total_area += rectangle_area + triangle_area", "crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis: {}'.format(significant)) # Report", "v1.2. # # Num v1.1 v1.2 # === ======= ======= 1 119.041 117.038", "the t-test methodology for unpaired observations. Please note that this is not, strictly,", "================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout): ''' Base for printing messages. ''' lineno", "= float(fct(x, *args, **kwargs)) rectangle_area = width * y # area of rectangle", "two datasets to determine whether there is a significant difference between them for", "metavar=('FLOAT'), help='''The confidence level such that 0 < c < 1. The default", "for i in range(intervals): y = float(fct(x, *args, **kwargs)) rectangle_area = width *", "small {}'.format(ln, fn, token)) continue ds.append(f) except ValueError: if opts.verbose > 1: info('skipping", "err('could not read file: {}'.format(fn)) if len(ds) < 3: err('too few data points", "arguments at the end. parser.add_argument('FILES', nargs='+', help='''The files with the run time data.", "x2 into trapezoids whose width is fixed (proportional to how the interval is", "fn, token)) continue except IOError: err('could not read file: {}'.format(fn)) if len(ds) <", "file. $ cat data.txt # v1.1 v1.2 # ======= ======= 1 119.041 117.038", "^real /tmp/v1.out > /tmp/v1.ds $ grep -w ^real /tmp/v2.out > /tmp/v2.ds The above", "$ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2 is smaller than dataset-1 by", "x = (1. - cl) / 2. q = cl + x infov(opts,", "= 0.0 width = (float(x2) - float(x1)) / float(intervals) x = float(x1) py", "two datasets in one file Here is an example to make sense of", "cmpds.py to figure out whether v2 is faster than v1 at a 95%", "by 0 errors if opts.verbose > 1: info('skipping line {} in {}: number", "Art Computer Systems Performance Analysis\", <NAME>iley and Sons, New York. import argparse import", "output. Here is how you might do it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out", "confidence level that you want to use to determine whether the datasets differ.", "the program (i.e. the data values are created by tools like /usr/bin/time) so", "b = read_file(opts, bf, bc) ttest(a, b, opts) if __name__ == '__main__': main()", "need to change these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1',", "opts.FILES[1] if len(opts.FILES) == 2 else af ac = opts.cols[0] bc = opts.cols[1]", "# Made the std dev calculation simpler. # ================================================================ # # Message utility", "the curve at x. ''' dx = float(x) - float(u) dx2 = dx", "difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb < 0 < club", "= -maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts, 'internal", "explicitly specify the which column to collect. In this case, the first dataset", "== 2 else af ac = opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1 file:", "created by tools like /usr/bin/time) so they can be used in a black", "int(opts.internal[3]) maxv = 2 * round(abs(lb) + ub + 0.5, 0) minv =", "f+1, msg) def infov(opts, msg, f=1): ''' Write an info message to stdout.", "probability density function (PDF) at x for a student-t distribution with dof degrees", "that posix time format (-p) outputs the time data on 3 separate lines", "= datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix, lineno, msg)) def info(msg, f=1):", "density function (PDF) at x for a student-t distribution with dof degrees of", "Lanczos approximation and natural logarithms. For integer values of x we can use", "somewhat more efficient. ''' dx2 = float(x) ** 2 den = math.sqrt(2 *", "the real run time data by simply grepping out the data like this:", "import os import sys #VERSION='0.1' # Initial load. VERSION='0.2' # Made the std", "when the number of effective degrees of freedom (DOF) is larger than 32.", "assert dof > 2 x1 = gamma((float(dof) + 1.0) / 2.0) x2 =", "varb = sum([(xb - mb) ** 2 for xb in b]) / float(nb", "and then running blackbox-v2 and collecting its output. Here is how you might", "118.863 6 118.076 117.545 7 120.539 119.751 8 118.880 119.042 9 120.164 116.203", "sa2qna = stddeva**2 / na sb2qnb = stddevb**2 / nb sdmd = math.sqrt(sa2qna", "unpaired observations (t-test) but the smallest one must have more than 2 entries.", "something like elapsed time or memory used. The size of the datasets can", "0.95 (95%) and 0.99 (99%). The tool will automatically determine the associated z-value", "token)) continue except IOError: err('could not read file: {}'.format(fn)) if len(ds) < 3:", "the z value that matches the specified percentage. ''' # Binary search to", "have at least 50 entries in each dataset. You must specify the confidence", "_msg('INFO', f+1, msg) def warn(msg, f=1): ''' Write a warning message to stdout.", "student-t distribution with dof degrees of freedom. This is basically the height of", "# ======= ======= 1 119.041 117.038 2 119.670 119.733 3 120.675 118.346 4", "y = (x1 * x5) / x2 return y def pdf_nd(x, s=1.0, u=0.0):", "SND for {} elements'.format(opts.snd_threshold)) return opts # ================================================================ # # Read file data.", "''' Base for printing messages. ''' lineno = inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26}", "message to stdout. ''' _msg('INFO', f+1, msg) def infov(opts, msg, f=1): ''' Write", "ln = 0 for line in ifp.readlines(): ln += 1 line = line.strip()", "check assert intervals > 1 # another sanity check total_area = 0.0 width", "an info message to stdout. ''' if opts.verbose > 0: _msg('INFO', f+1, msg)", "= stddevb**2 / nb sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of the", "approximation, page 214 of Numerical Recipes in C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091,", "specify the columns because -c is already reserved for specifying the confidence level.", "2.0) x2 = math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0)) x3 = 1.0", "levels 0.90 (90%), 0.95 (95%) and 0.99 (99%). The tool will automatically determine", "{:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean difference md = ma - mb", "fn)) return ds # ================================================================ # # Main # # ================================================================ def main():", "= 0.0 diff = tolerance * 2 # start the loop while diff", "in this simple example: $ /usr/bin/time -p sleep 0.3 real 0.30 user 0.00", "abs(md) / ma infov(opts, 'percentage: {}'.format(per)) if club < 0: print('With {:.1f}% confidence,", "methodology used to calculate the z-value is described in detail here: https://github.com/jlinoff/ztables. To", "info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format( probability, cp,", "a single column in a file, you must explicitly specify the which column", "zero: {}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis: {}'.format(significant)) # Report the result. clp", "= os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n')))", "(x1 * x5) / x2 return y def pdf_nd(x, s=1.0, u=0.0): ''' Calculate", "nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) # variances vara =", "1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c', '--conf',", "0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2 is smaller than dataset-1 by about", "and x2 into trapezoids whose width is fixed (proportional to how the interval", "in range(6): x5 += 1.0 x4 += c[i] / x5 x6 = math.log((c1", "different. ''' cl = opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3}", "The default is column 1 for both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32,", "for a specific confidence level using the t-test methodology for unpaired observations. Please", "detail here: https://github.com/jlinoff/ztables. To determine significance, you specify the confidence level that you", "the closest value. z = 0.0 adjustment = float(maxtop) / 2.0 top =", "gamma(7/2) = 3.32335097045 gamma(4) = 6.0 ''' if (x - int(x)) == 0:", "The program takes about 2 minutes to run (120 seconds) and we want", "freedom (DOF) is larger than 32. It is really useful for determining whether", "this: $ grep -w ^real /tmp/v1.out > /tmp/v1.ds $ grep -w ^real /tmp/v2.out", "# Lanczos approximation, page 214 of Numerical Recipes in C. c = [76.18009172947146,", "if the token is a floating point number. When the data is not", "data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%. As", "you want to use to determine significance. Typical confidence levels 0.90 (90%), 0.95", "elapsed time or memory used. The size of the datasets can be different", "''' Gamma function. Uses the Lanczos approximation and natural logarithms. For integer values", "floating point number it is ignored. The default is column 1 for both", "metavar=('COL1', 'COL2'), help='''The columns that define each dataset. The first column is for", "time data on 3 separate lines as shown in this simple example: $", "'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message", "(x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045", "is used for both datasets. ''') opts = parser.parse_args() if opts.cols[0] < 1:", "It is to the left. bot = mid else: break # Sanity checks.", "x1 = gamma((float(dof) + 1.0) / 2.0) x2 = math.sqrt(dof * math.pi) *", "table look ups are necessary. The methodology used to calculate the z-value is", "about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1 by", "= [] try: with open(fn, 'r') as ifp: ln = 0 for line", "{} in {}: number is too small {}'.format(ln, fn, token)) continue ds.append(f) except", "/ 2.0) z = mid - adjustment q = area_under_curve(minval, z, iterations, fct,", "must specify the confidence level that you want to use to determine whether", "= math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb))", "''' Calculate the probability density function (PDF) for a normal distribution. s =", "# ================================================================ # # Statistical utility functions. # See https://github.com/jlinoff/ztables for background. #", "message to stderr and exit. ''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) # ================================================================", "# Example 3: Dataset-2 is slightly smaller (has faster runtime) with 95% confidence.", "x1 - x2 x4 = c0 x5 = float(x) for i in range(6):", "''' # Binary search to find the closest value. z = 0.0 adjustment", "not, strictly, a t-test because it switches over to the standard normal distribution", "we use -k to specify the columns because -c is already reserved for", "a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) # variances vara = sum([(xa - ma)", "float((dof + 1)) / 2.0 x5 = x3 ** -x4 y = (x1", "= float(token) if f < 0.0001: # avoid divide by 0 errors if", "integer values of x we can use the exact value of (x-1)!. gamma(1/2)", "a file and then running blackbox-v2 and collecting its output. Here is how", "by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1", "95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%. '''.format(base) afc =", "memory use has changed between two different versions of software. The datasets are", "takes advantage of the fact that posix time format (-p) outputs the time", "the Lanczos approximation and natural logarithms. For integer values of x we can", "You should never need to change these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2,", "With 99.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%. # Example", "used in a black box testing environment. Each dataset contains a series of", "f=1): ''' Write an error message to stderr and exit. ''' _msg('ERROR', f+1,", "y = exp / den return y def area_under_curve(x1, x2, intervals, fct, *args,", "= math.e ** - (dx2 / 2) y = exp / den return", "internal details about the z value lookup and values that were discarded during", "range(1, int(x))]) # Lanczos approximation, page 214 of Numerical Recipes in C. c", "(c) 2016 by <NAME> #REFERENCES: # <NAME> (1991). \"The Art Computer Systems Performance", "SND is used instead of a t-distribution. The default is %(default)s. ''') parser.add_argument('-v',", "tells us that v2 is indeed slightly faster. ''' # License: MIT Open", "treating the samples as unpaired observations (t-test) but the smallest one must have", "to the standard normal distribution (SND) when the number of effective degrees of", "> 1 # another sanity check total_area = 0.0 width = (float(x2) -", "t-distribution. The default is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the level", "one file is specified, is used for both datasets. ''') opts = parser.parse_args()", "120.539 119.751 8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 For this", "* x4) / float(x)) x7 = -x3 + x6 # ln(gamma(x)) g =", "Example 3: Dataset-2 is slightly smaller (has faster runtime) with 95% confidence. #", "119.751 8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 $ {0} --cols", "datetime import inspect import math import os import sys #VERSION='0.1' # Initial load.", "deviations stddeva = math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev", "108 112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence, dataset-2 is", "that define each dataset. The first column is for the first dataset. The", "z value lookup and values that were discarded during file reads. ''') parser.add_argument('-V',", "a standard normal distribution) This is the height of the curve at x.", "The versions are 1.1 and 1.2. The program takes about 2 minutes to", "data in verbose mode. If only one file is specified, is used for", "change these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The", "data. # # ================================================================ def read_file(opts, fn, col): ''' Read column data from", "/usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done $ for((i=1;i<=50;i++)) ; do printf '\\nExp", "'internal minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v = True if opts.verbose >", "It breaks the interval between x1 and x2 into trapezoids whose width is", "(rounded): {}'.format(dofr)) # confidence interval for the mean difference z = 0.0 #", "IOError: err('could not read file: {}'.format(fn)) if len(ds) < 3: err('too few data", "0.30 user 0.00 sys 0.00 At this point we have the unpaired observations", "x3 ** -x4 y = (x1 * x5) / x2 return y def", "discarded during file reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version", "binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof) x = (1. - cl)", "in range(1, int(x))]) # Lanczos approximation, page 214 of Numerical Recipes in C.", "\"\"\") # Positional arguments at the end. parser.add_argument('FILES', nargs='+', help='''The files with the", "math.pi) * gamma((float(dof) / 2.0)) x3 = 1.0 + (float((x ** 2)) /", "for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out", "2.0) z = mid - adjustment q = area_under_curve(minval, z, iterations, fct, *args)", "line.strip() tokens = line.split() if len(tokens) < col: continue token = tokens[col-1] try:", "'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this help", "0.0001: # avoid divide by 0 errors if opts.verbose > 1: info('skipping line", "determine the associated z-value based on the confidence level and the number of", "To determine significance, you specify the confidence level that you want to use", "load. VERSION='0.2' # Made the std dev calculation simpler. # ================================================================ # #", "{:>3} {}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb = float(len(b))", "ds-50-108-112.txt With 99.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%. #", "of freedom: {:.2f}'.format(q, dofr, z)) cllb = md - z * sdmd club", "args, values, option_string=None): if 0. < values < 1.0: setattr(args, self.dest, values) else:", "confidence, dataset-2 is smaller than dataset-1 by about 0.8%. # Example 4: Dataset-2", "/usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done We can now capture the real", "that matches the specified percentage. ''' # Binary search to find the closest", "seconds) and we want to determine whether v1.2 is faster. The table below", "them for a specific confidence level using the t-test methodology for unpaired observations.", "3 data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.1%.", "= md - z * sdmd club = md + z * sdmd", "%(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'),", "else: print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per))", "< c < 1. The default is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001,", "else: print('With {:.1f}% confidence, there is no significant difference between the datasets.'.format(clp)) #", "return lookup.get(s, s) argparse._ = gettext # to capitalize help headers base =", "0.95 -k 2 3 data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1", "must be greater then 0') if len(opts.FILES) > 2: parser.error('only 1 or 2", "help='''Increase the level of verbosity. Specify -v to see the values that make", "between x1 and x2 into trapezoids whose width is fixed (proportional to how", "of each rectangle is the pdf function value for x at the start", "# means ma = sum(a) / na mb = sum(b) / nb infov(opts,", "> /tmp/v1.ds $ grep -w ^real /tmp/v2.out > /tmp/v2.ds The above command takes", "the confidence level is in the proper range. def get_conf_level(): class GetConfLevel(argparse.Action): def", "contains a series of numbers to be compared. The numbers must be greater", "can be used in a black box testing environment. Each dataset contains a", "function (PDF) at x for a student-t distribution with dof degrees of freedom.", "2: parser.error('only 1 or 2 files may be specified') if opts.snd_threshold < 30:", "printf '\\nExp %03d\\n' $i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done We", "It is really useful for determining whether runtime or memory use has changed", "0 < c < 1. The default is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4,", "# ================================================================ # # t-test implementation # # ================================================================ def ttest(a, b, opts):", "{}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means ma = sum(a) / na mb =", "= opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts,", "in a single file but normally it is easier to have it exist", "', } return lookup.get(s, s) argparse._ = gettext # to capitalize help headers", "$ ./gends.py 50 108 112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0%", "too small {}'.format(ln, fn, token)) continue ds.append(f) except ValueError: if opts.verbose > 1:", "0.99 (99%). The tool will automatically determine the associated z-value based on the", "or 2 files may be specified') if opts.snd_threshold < 30: parser.error('it does not", "================================================================ # # Read file data. # # ================================================================ def read_file(opts, fn, col):", "(1. / (nb + 1.)) * sb2qnb**2 dof = (dof_num / (dof_dena +", "stored in a single file but normally it is easier to have it", "normal distribution (SND) infov(opts, 'use standard normal distribution (SND)') z = binary_search_for_z(cl, t,", "in a]) / float(na - 1.) varb = sum([(xb - mb) ** 2", "confidence, dataset-2 is smaller than dataset-1 by about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter", "ac = opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file:", "z-value based on the confidence level and the number of effective degrees of", "confidence level that you want to use to determine significance. Typical confidence levels", "0. < values < 1.0: setattr(args, self.dest, values) else: msg = 'argument \"{}\"", "4: Dataset-2 is slightly smaller (has faster runtime) with 99% confidence. # Both", "you must explicitly specify the which column to collect. In this case, the", "confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb <", "outputs the time data on 3 separate lines as shown in this simple", "Make sure that the confidence level is in the proper range. def get_conf_level():", "result: $ ./cmpds.py -c 0.95 -k 3 2 data.txt With 95.0% confidence, dataset-2", "u = mean (0 for a standard normal distribution) This is the height", "120.164 116.203 10 119.134 118.049 $ {0} --cols 2 3 data.txt With 95.0%", "# ================================================================ def getopts(): ''' Get the command line options using argparse. '''", "int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr)) # confidence interval for the mean difference", "= 0.0 adjustment = float(maxtop) / 2.0 top = maxtop bot = 0.0", "files with the run time data. The data must be organized in columns", "greater then 0') if len(opts.FILES) > 2: parser.error('only 1 or 2 files may", "bf = opts.FILES[1] if len(opts.FILES) == 2 else af ac = opts.cols[0] bc", "another sanity check total_area = 0.0 width = (float(x2) - float(x1)) / float(intervals)", "95% confidence level. $ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2", "parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that define each dataset.", "in each dataset. You must specify the confidence level that you want to", "the opposite result: $ ./cmpds.py -c 0.95 -k 3 2 data.txt With 95.0%", "'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this", "ifp.readlines(): ln += 1 line = line.strip() tokens = line.split() if len(tokens) <", "realistic example would be running a program called blackbox-v1 50 times and collecting", "''' if opts.verbose > 0: _msg('INFO', f+1, msg) def warn(msg, f=1): ''' Write", "(99%). The tool will automatically determine the associated z-value based on the confidence", "''' Read column data from the file. ''' ds = [] try: with", "reasonable constraint given that they typically represent something like elapsed time or memory", "/ 2.0 top = maxtop bot = 0.0 diff = tolerance * 2", "the file. ''' ds = [] try: with open(fn, 'r') as ifp: ln", "/ x2 return y def pdf_nd(x, s=1.0, u=0.0): ''' Calculate the probability density", "At this point we have the unpaired observations from both runs in two", "* math.sqrt(2 * math.pi) exp = math.e ** ( -dx2 / xden )", "mid={}, z={}, q={}'.format( probability, cp, tolerance, maxtop, minval, iterations, top, bot, mid, z,", "if opts.cols[0] < 1: parser.error('column 1 must be greater then 0') if opts.cols[1]", "float(fct(x, *args, **kwargs)) for i in range(intervals): y = float(fct(x, *args, **kwargs)) rectangle_area", "confidence level and the number of effective degrees of freedom. No table look", "sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of the mean diff: {:.3f}'.format(sdmd)) #", "of the same file. Blank lines and lines where the token is not", "#!/usr/bin/env python r''' Compare two datasets to determine whether there is a significant", "values < 1.0: setattr(args, self.dest, values) else: msg = 'argument \"{}\" out of", "float(x) - float(u) dx2 = dx * dx xden = 2 * (s", "would like to have at least 50 entries in each dataset. You must", "'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) # standard deviations stddeva = math.sqrt(vara)", "3 120.675 118.346 4 118.628 117.261 5 120.363 118.863 6 118.076 117.545 7", "f+1, msg, sys.stderr) sys.exit(1) # ================================================================ # # Statistical utility functions. # See", "difference sa2qna = stddeva**2 / na sb2qnb = stddevb**2 / nb sdmd =", "col: {}'.format(bc)) a = read_file(opts, af, ac) b = read_file(opts, bf, bc) ttest(a,", "dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence,", "= (sa2qna + sb2qnb)**2 dof_dena = (1. / (na + 1.)) * sa2qna**2", "t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format( probability, cp, tolerance, maxtop,", "z value that matches the specified percentage. ''' # Binary search to find", "deviation of the mean difference sa2qna = stddeva**2 / na sb2qnb = stddevb**2", "================================================================ def gamma(x): ''' Gamma function. Uses the Lanczos approximation and natural logarithms.", "unpaired observations. Please note that this is not, strictly, a t-test because it", "because we are treating the samples as unpaired observations (t-test) but the smallest", "binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr)) z", "= float((dof + 1)) / 2.0 x5 = x3 ** -x4 y =", "< cp: # It is to the right. top = mid elif probability", "area under a curve using trapezoidal approximation. It breaks the interval between x1", "infov(opts, 'variance b: {:.3f}'.format(varb)) # standard deviations stddeva = math.sqrt(vara) stddevb = math.sqrt(varb)", "================================================================ def main(): opts = getopts() af = opts.FILES[0] bf = opts.FILES[1] if", "''' Calculate the probability density function (PDF) at x for a student-t distribution", "height of the curve at x. ''' assert dof > 2 x1 =", "allows you to add comments and blank spaces. You can see the ignored", "(t-test) but the smallest one must have more than 2 entries. Typically you", "because -c is already reserved for specifying the confidence level. If you reverse", "are completely independent of the program (i.e. the data values are created by", "no significant difference between the datasets. # Example 3: Dataset-2 is slightly smaller", "< 1. The default is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4,", "iterations, top, bot, mid, z, q)) if probability < cp: # It is", "v2 is indeed slightly faster. ''' # License: MIT Open Source # Copyright", "standard deviations stddeva = math.sqrt(vara) stddevb = math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts,", "functions. # # ================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout): ''' Base for printing", "# Make sure that the confidence level is in the proper range. def", "use to determine significance. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99", "remember the previous height return total_area def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v,", "the z value lookup and values that were discarded during file reads. ''')", "'mean diff: {:.3f}'.format(md)) # standard deviation of the mean difference sa2qna = stddeva**2", "ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%.", "about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage, epilog=epilog) parser.add_argument('-c',", "few data points at column {}, found {}, need at least 3 in", "infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means ma = sum(a) / na", "# # ================================================================ def main(): opts = getopts() af = opts.FILES[0] bf =", "32. It is really useful for determining whether runtime or memory use has", "that were discarded during file reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show", "at x. ''' assert dof > 2 x1 = gamma((float(dof) + 1.0) /", "necessary. EXAMPLE 1 - two datasets in one file Here is an example", "interval. The accumulation of the areas provides an estimate of the area under", "blank spaces. You can see the ignored data in verbose mode. If only", "box testing environment. Each dataset contains a series of numbers to be compared.", "at a 95% confidence level. $ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0%", "$ {0} --cols 2 3 data.txt With 95.0% confidence, dataset-2 is smaller than", "> 2: parser.error('only 1 or 2 files may be specified') if opts.snd_threshold <", "file, you must explicitly specify the which column to collect. In this case,", "whether they are significantly different. ''' cl = opts.conf infov(opts, 'a: {:>3} {}'.format(len(a),", "float(x) ** 2 den = math.sqrt(2 * math.pi) exp = math.e ** -", "= float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means ma = sum(a)", "x5 = x3 ** -x4 y = (x1 * x5) / x2 return", "= sum(b) / nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) #", "see if the second version is faster than the first for the same", "(SND) threshold. When the number of effective degrees of freedom (DOF) exceeds this", "in verbose mode. If only one file is specified, is used for both", "if opts.verbose > 1: info('skipping line {} in {}: number is too small", "read_file(opts, af, ac) b = read_file(opts, bf, bc) ttest(a, b, opts) if __name__", "**kwargs)) rectangle_area = width * y # area of rectangle at x with", "interval between x1 and x2 into trapezoids whose width is fixed (proportional to", "The numbers must be greater than 0. That is a reasonable constraint given", "__call__(self, parser, args, values, option_string=None): if 0. < values < 1.0: setattr(args, self.dest,", "fct, *args, **kwargs): ''' Calculate the approximate area under a curve using trapezoidal", "smaller than dataset-1 by about 1.3%. That tells us that v2 is indeed", "series of numbers to be compared. The numbers must be greater than 0.", "dof > 2 x1 = gamma((float(dof) + 1.0) / 2.0) x2 = math.sqrt(dof", "the mean difference sa2qna = stddeva**2 / na sb2qnb = stddevb**2 / nb", "confidence, there is no significant difference between the datasets.'.format(clp)) # ================================================================ # #", "program's version number and exit. \"\"\") # Positional arguments at the end. parser.add_argument('FILES',", "i={}, top={}, bot={}, mid={}, z={}, q={}'.format( probability, cp, tolerance, maxtop, minval, iterations, top,", "math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean difference md", "ma infov(opts, 'percentage: {}'.format(per)) if club < 0: print('With {:.1f}% confidence, dataset-2 is", "= math.exp(x7) return g def pdf_t(x, dof): ''' Calculate the probability density function", "by about 1.1%. As you can see, dataset-2 (v1.2) is slightly faster. Note", "faster than v1 at a 95% confidence level. $ ./cmpds.py -c 0.95 /tmp/v1.ds", "and dataset-2 are in the same file. $ cat data.txt # v1.1 v1.2", "it switches over to the standard normal distribution (SND) when the number of", "= float(len(a)) nb = float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) # means", "* sdmd infov(opts, '{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club))", "easier to have it exist in two separate files because, by default, the", "def gamma(x): ''' Gamma function. Uses the Lanczos approximation and natural logarithms. For", "for integer values: (x-1)!. return reduce(lambda a, b: a * b, [float(i) for", "x for a student-t distribution with dof degrees of freedom. This is basically", "approximate area under a curve using trapezoidal approximation. It breaks the interval between", "the next edge py = y # remember the previous height return total_area", "dataset-2 is smaller than dataset-1 by about 1.3%. That tells us that v2", "================================================================ # # Statistical utility functions. # See https://github.com/jlinoff/ztables for background. # #", "/tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.3%.", "the data is not in a single column in a file, you must", ".. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb < 0 < club significant =", "'stddev of the mean diff: {:.3f}'.format(sdmd)) # effective degrees of freedom dof_num =", "[<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: # Example 1: help", "these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns", "With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.3%. That tells", "must be greater then 0') if opts.cols[1] < 1: parser.error('column 1 must be", "read file: {}'.format(fn)) if len(ds) < 3: err('too few data points at column", "- bot) / 2.0) z = mid - adjustment q = area_under_curve(minval, z,", "in b]) / float(nb - 1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b:", "Report the result. clp = cl * 100. if significant: per = 100.", "- two datasets in one file Here is an example to make sense", "opts.internal[2] intervals = int(opts.internal[3]) maxv = 2 * round(abs(lb) + ub + 0.5,", "z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd) else: infov(opts, 'use t-{}", "is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER',", "= 0 for line in ifp.readlines(): ln += 1 line = line.strip() tokens", "= 1.000000000190015 c1 = 2.5066282746310005 x1 = float(x) + 5.5 x2 = (float(x)", "like this: $ grep -w ^real /tmp/v1.out > /tmp/v1.ds $ grep -w ^real", "v, pdf_t, dof) x = (1. - cl) / 2. q = cl", "+= c[i] / x5 x6 = math.log((c1 * x4) / float(x)) x7 =", "def warn(msg, f=1): ''' Write a warning message to stdout. ''' _msg('WARNING', f+1,", "1.0: setattr(args, self.dest, values) else: msg = 'argument \"{}\" out of range (0..1)'.format(self.dest)", "to have at least 50 entries in each dataset. You must specify the", "'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a =", "VERSION='0.2' # Made the std dev calculation simpler. # ================================================================ # # Message", "to see the values that make up the computation. Specify -v -v to", "columns, you will get the opposite result: $ ./cmpds.py -c 0.95 -k 3", "= opts.internal[2] intervals = int(opts.internal[3]) maxv = 2 * round(abs(lb) + ub +", "c < 1. The default is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4,", "-maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts, 'internal upper", "never need to change these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1],", "club < 0: print('With {:.1f}% confidence, dataset-2 is larger than dataset-1 by about", "larger than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, dataset-2 is", "xden ) y = exp / den return y def pdf_snd(x): ''' Calculate", "+ 1.0) / 2.0) x2 = math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0))", "(PDF) for a normal distribution. s = standard deviation (1 for a standard", "distribution (SND)') z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd) else: infov(opts,", "0) minv = -maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb))", "z = mid - adjustment q = area_under_curve(minval, z, iterations, fct, *args) cp", "dof = (dof_num / (dof_dena + dof_denb)) - 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof))", "$i ; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done $ for((i=1;i<=50;i++)) ; do", "standard normal distribution (SND) infov(opts, 'use standard normal distribution (SND)') z = binary_search_for_z(cl,", "with {} degrees of freedom: {:.2f}'.format(q, dofr, z)) cllb = md - z", "this example we assume that the data is stored in a single file", "infov(opts, 'nb: {}'.format(nb)) # means ma = sum(a) / na mb = sum(b)", "to calculate the z-value is described in detail here: https://github.com/jlinoff/ztables. To determine significance,", "x3 = x1 - x2 x4 = c0 x5 = float(x) for i", "= 3.32335097045 gamma(4) = 6.0 ''' if (x - int(x)) == 0: #", "density function (PDF) for a normal distribution. s = standard deviation (1 for", "# # t-test implementation # # ================================================================ def ttest(a, b, opts): ''' Analyze", "= opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col:", "might do it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf", "efficient. ''' dx2 = float(x) ** 2 den = math.sqrt(2 * math.pi) exp", "{0} --cols 2 3 data.txt With 95.0% confidence, dataset-2 is smaller than dataset-1", "of the fact that posix time format (-p) outputs the time data on", "but normally it is easier to have it exist in two separate files", "(proportional to how the interval is sliced). The height of each rectangle is", "area under the curve. The greater the number of intervals the better the", "first dataset. The second column is for the second dataset. If the value", "range. def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser, args, values, option_string=None): if 0.", "first token on each line and collects it if the token is a", "_msg('INFO', f+1, msg) def infov(opts, msg, f=1): ''' Write an info message to", "column in a file, you must explicitly specify the which column to collect.", "= md + z * sdmd infov(opts, '{:.1f}% confidence interval for difference: [{:3f}", "continue token = tokens[col-1] try: f = float(token) if f < 0.0001: #", "epilog = r''' EXAMPLES: # Example 1: help $ {0} -h # Example", "maxv, minv, intervals, v, pdf_t, dof) x = (1. - cl) / 2.", "> opts.snd_threshold: # use standard normal distribution (SND) infov(opts, 'use standard normal distribution", "the approximate area under a curve using trapezoidal approximation. It breaks the interval", "1.0 - (2.0 * (1.0 - q)) diff = abs(cp - probability) if", "120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there is no", "Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that", "height return total_area def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct, *args): '''", "standard deviation of the mean difference sa2qna = stddeva**2 / na sb2qnb =", "significant difference between the datasets.'.format(clp)) # ================================================================ # # Options # # ================================================================", "x with height y triangle_area = ((y - py) * width) / 2.0", "to determine whether v1.2 is faster. The table below shows sample data 10", "v = True if opts.verbose > 1 else False if dofr > opts.snd_threshold:", "example: $ /usr/bin/time -p sleep 0.3 real 0.30 user 0.00 sys 0.00 At", "to stdout. ''' if opts.verbose > 0: _msg('INFO', f+1, msg) def warn(msg, f=1):", "1.0 + (float((x ** 2)) / float(dof)) x4 = float((dof + 1)) /", "a 95% confidence level. $ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence,", "- 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded):", "area x += width # advance to the next edge py = y", "stdout. ''' _msg('WARNING', f+1, msg) def err(msg, f=1): ''' Write an error message", "Example 4: Dataset-2 is slightly smaller (has faster runtime) with 99% confidence. #", "is not, strictly, a t-test because it switches over to the standard normal", "than v1 at a 95% confidence level. $ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds", "''' Calculate the approximate area under a curve using trapezoidal approximation. It breaks", "float(nb - 1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) # standard", "msg) def infov(opts, msg, f=1): ''' Write an info message to stdout. '''", "confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%). The tool will automatically", "C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5, ] c0 = 1.000000000190015", "s) argparse._ = gettext # to capitalize help headers base = os.path.basename(sys.argv[0]) name", "for a standard normal distribution. s = standard deviation (1 for a standard", "/ 2. q = cl + x infov(opts, '{:.3f}-quantile of t-variate with {}", "each version. # Run time data collected for v1.1 and v1.2. # #", "implementation # # ================================================================ def ttest(a, b, opts): ''' Analyze unpaired observations to", "+ sb2qnb)**2 dof_dena = (1. / (na + 1.)) * sa2qna**2 dof_denb =", "0.00 sys 0.00 At this point we have the unpaired observations from both", "a t-distribution. The default is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the", "stdout. ''' if opts.verbose > 0: _msg('INFO', f+1, msg) def warn(msg, f=1): '''", "g = math.exp(x7) return g def pdf_t(x, dof): ''' Calculate the probability density", "pdf_t(x, dof): ''' Calculate the probability density function (PDF) at x for a", "< 0: print('With {:.1f}% confidence, dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp,", "1.000000000190015 c1 = 2.5066282746310005 x1 = float(x) + 5.5 x2 = (float(x) +", "for a normal distribution. s = standard deviation (1 for a standard normal", "total_area += rectangle_area + triangle_area # trapezoid area x += width # advance", "x2 = math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0)) x3 = 1.0 +", "message to stdout. ''' _msg('WARNING', f+1, msg) def err(msg, f=1): ''' Write an", "= os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc", "infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb = float(len(b)) infov(opts, 'na: {}'.format(na))", "# Example 1: help $ {0} -h # Example 2: No significant difference", "# ================================================================ def main(): opts = getopts() af = opts.FILES[0] bf = opts.FILES[1]", "Specify -v to see the values that make up the computation. Specify -v", "only one file is specified, is used for both datasets. ''') opts =", "opts = getopts() af = opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES) == 2", "float(x) + 5.5 x2 = (float(x) + 0.5) * math.log(x1) x3 = x1", "values: (x-1)!. return reduce(lambda a, b: a * b, [float(i) for i in", "infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean difference md =", "curve at x. ''' dx = float(x) - float(u) dx2 = dx *", "of freedom. This is basically the height of the curve at x. '''", "'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a = read_file(opts, af, ac) b", "file data. # # ================================================================ def read_file(opts, fn, col): ''' Read column data", "first for the same inputs. The versions are 1.1 and 1.2. The program", "den return y def pdf_snd(x): ''' Calculate the probability density function (PDF) for", "x. ''' dx = float(x) - float(u) dx2 = dx * dx xden", "tolerance, maxtop, minval, iterations, v, fct, *args): ''' Get the z value that", "have 50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence, dataset-2 is smaller", "of it all. We want to compare two versions of the foobar program", "117.261 5 120.363 118.863 6 118.076 117.545 7 120.539 119.751 8 118.880 119.042", "parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used", "{:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts, 'internal", "# Example 2: No significant difference with 95% confidence. # The dataset is", "opts.snd_threshold < 30: parser.error('it does not make sense to use SND for {}", "y = float(fct(x, *args, **kwargs)) rectangle_area = width * y # area of", "strictly, a t-test because it switches over to the standard normal distribution (SND)", "of the mean difference sa2qna = stddeva**2 / na sb2qnb = stddevb**2 /", "in {}: not a number: {}'.format(ln, fn, token)) continue except IOError: err('could not", "Get the command line options using argparse. ''' # Make sure that the", "about 2 minutes to run (120 seconds) and we want to determine whether", "or memory use has changed between two different versions of software. The datasets", "> 2 x1 = gamma((float(dof) + 1.0) / 2.0) x2 = math.sqrt(dof *", "> 0: _msg('INFO', f+1, msg) def warn(msg, f=1): ''' Write a warning message", "is the height of the curve at x. ''' dx = float(x) -", "This is basically the height of the curve at x. ''' assert dof", "= float(fct(x, *args, **kwargs)) for i in range(intervals): y = float(fct(x, *args, **kwargs))", "confidence. # Both runs have 50 samples. # The data is specifically generated", "ma = sum(a) / na mb = sum(b) / nb infov(opts, 'mean a:", "a program called blackbox-v1 50 times and collecting the timing output to a", "= mid else: break # Sanity checks. assert top <= maxtop assert bot", "for v1.1 and v1.2. # # Num v1.1 v1.2 # === ======= =======", "1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) = 6.0 ''' if (x - int(x)) ==", "data must be organized in columns with one entry per line. Non-numeric data", "approximation and natural logarithms. For integer values of x we can use the", "help='''Factors used for internal computations. You should never need to change these. Defaults:", "mid - adjustment q = area_under_curve(minval, z, iterations, fct, *args) cp = 1.0", "1 - two datasets in one file Here is an example to make", "dx = float(x) - float(u) dx2 = dx * dx xden = 2", "usage=usage, epilog=epilog) parser.add_argument('-c', '--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such that", "confidence, dataset-2 is smaller than dataset-1 by about 0.8%. # Example 5: Dataset-1", "time or memory used. The size of the datasets can be different because", "================================================================ def read_file(opts, fn, col): ''' Read column data from the file. '''", "= int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr)) # confidence interval for the mean", "of Numerical Recipes in C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2, -0.5395239384953e-5,", "between the datasets.'.format(clp)) # ================================================================ # # Options # # ================================================================ def getopts():", "-dx2 / xden ) y = exp / den return y def pdf_snd(x):", "y def pdf_nd(x, s=1.0, u=0.0): ''' Calculate the probability density function (PDF) for", "and exit.\\n ', } return lookup.get(s, s) argparse._ = gettext # to capitalize", "not read file: {}'.format(fn)) if len(ds) < 3: err('too few data points at", "testing environment. Each dataset contains a series of numbers to be compared. The", "intervals the better the estimate is at the cost of performance. ''' assert", "mean difference sa2qna = stddeva**2 / na sb2qnb = stddevb**2 / nb sdmd", "completely independent of the program (i.e. the data values are created by tools", "= gamma((float(dof) + 1.0) / 2.0) x2 = math.sqrt(dof * math.pi) * gamma((float(dof)", "specify the confidence level that you want to use to determine significance. Typical", "# t-test implementation # # ================================================================ def ttest(a, b, opts): ''' Analyze unpaired", "want to compare two versions of the foobar program to see if the", "For integer values of x we can use the exact value of (x-1)!.", "into trapezoids whose width is fixed (proportional to how the interval is sliced).", "found {}, need at least 3 in file: {}'.format(col, len(ds), fn)) return ds", "0: # Optimization for integer values: (x-1)!. return reduce(lambda a, b: a *", "# avoid divide by 0 errors if opts.verbose > 1: info('skipping line {}", "really useful for determining whether runtime or memory use has changed between two", "/ den return y def pdf_snd(x): ''' Calculate the probability density function (PDF)", "float(x1) py = float(fct(x, *args, **kwargs)) for i in range(intervals): y = float(fct(x,", "is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the level of verbosity. Specify", "in the same file. $ cat data.txt # v1.1 v1.2 # ======= =======", "height of the curve at x. ''' dx = float(x) - float(u) dx2", "fn, token)) continue ds.append(f) except ValueError: if opts.verbose > 1: info('skipping line {}", "mb infov(opts, 'mean diff: {:.3f}'.format(md)) # standard deviation of the mean difference sa2qna", "they are significantly different. ''' cl = opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a))", "not a floating point number are ignored. Here is what the run looks", "col: continue token = tokens[col-1] try: f = float(token) if f < 0.0001:", "-v to see the values that make up the computation. Specify -v -v", "data on 3 separate lines as shown in this simple example: $ /usr/bin/time", "Analysis\", <NAME>iley and Sons, New York. import argparse import datetime import inspect import", "na sb2qnb = stddevb**2 / nb sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev", "intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v = True", "< 30: parser.error('it does not make sense to use SND for {} elements'.format(opts.snd_threshold))", "v1.1 v1.2 # === ======= ======= 1 119.041 117.038 2 119.670 119.733 3", "def pdf_nd(x, s=1.0, u=0.0): ''' Calculate the probability density function (PDF) for a", "$i ; /usr/bin/time -p blackbox-v2 >> /tmp/v2.out ; done We can now capture", "gamma(x): ''' Gamma function. Uses the Lanczos approximation and natural logarithms. For integer", "is the pdf function value for x at the start of the interval.", "ds # ================================================================ # # Main # # ================================================================ def main(): opts =", "sanity check total_area = 0.0 width = (float(x2) - float(x1)) / float(intervals) x", "infov(opts, '{:.3f}-quantile of t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr, z)) cllb", "confidence level such that 0 < c < 1. The default is %(default)s.", "from the file. ''' ds = [] try: with open(fn, 'r') as ifp:", "values that make up the computation. Specify -v -v to internal details about", "larger than dataset-1 by about 1.1%. EXAMPLE 2 - datasets in separate files", "cl) / 2. q = cl + x infov(opts, '{:.3f}-quantile of t-variate with", "such that 0 < c < 1. The default is %(default)s. ''') parser.add_argument('--internal',", "mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format( probability, cp, tolerance, maxtop, minval, iterations,", "second version is faster than the first for the same inputs. The versions", "Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%). The tool will", "100 120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there is", "* x5) / x2 return y def pdf_nd(x, s=1.0, u=0.0): ''' Calculate the", "z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof) x = (1.", "/ na mb = sum(b) / nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean", "point number it is ignored. The default is column 1 for both datasets.", "- q)) diff = abs(cp - probability) if v: info('p={}, cp={}, t={:f}, mt={},", "120.675 118.346 4 118.628 117.261 5 120.363 118.863 6 118.076 117.545 7 120.539", "./cmpds.py -c 0.95 -k 2 3 data.txt With 95.0% confidence, dataset-2 is smaller", "= cl + x infov(opts, '{:.3f}-quantile of t-variate with {} degrees of freedom:", "collects it if the token is a floating point number. When the data", "if len(opts.FILES) > 2: parser.error('only 1 or 2 files may be specified') if", "= 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) = 6.0 ''' if", "120.363 118.863 6 118.076 117.545 7 120.539 119.751 8 118.880 119.042 9 120.164", "float(maxtop) / 2.0 top = maxtop bot = 0.0 diff = tolerance *", "adjustment = float(maxtop) / 2.0 top = maxtop bot = 0.0 diff =", "two different files so we can use cmpds.py to figure out whether v2", "c1 = 2.5066282746310005 x1 = float(x) + 5.5 x2 = (float(x) + 0.5)", "-k to specify the columns because -c is already reserved for specifying the", "infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence level:", "datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix, lineno, msg)) def info(msg, f=1): '''", "about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, there is no significant difference between", "can now capture the real run time data by simply grepping out the", "maxtop bot = 0.0 diff = tolerance * 2 # start the loop", "parser.parse_args() if opts.cols[0] < 1: parser.error('column 1 must be greater then 0') if", "float(na - 1.) varb = sum([(xb - mb) ** 2 for xb in", "for i in range(6): x5 += 1.0 x4 += c[i] / x5 x6", ">> /tmp/v2.out ; done We can now capture the real run time data", "''' Write an info message to stdout. ''' _msg('INFO', f+1, msg) def infov(opts,", "214 of Numerical Recipes in C. c = [76.18009172947146, -86.50532032941677, 24.01409824083091, -1.231739572450155, 0.1208650973866179e-2,", "of the curve at x. ''' assert dof > 2 x1 = gamma((float(dof)", "0.90 (90%), 0.95 (95%) and 0.99 (99%). The tool will automatically determine the", "performance. ''' assert x2 > x1 # just a sanity check assert intervals", "# # ================================================================ def ttest(a, b, opts): ''' Analyze unpaired observations to determine", "As you can see, dataset-2 (v1.2) is slightly faster. Note that we use", "'--conf', type=float, default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such that 0 < c", "determine whether they are significantly different. ''' cl = opts.conf infov(opts, 'a: {:>3}", "''' lineno = inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix,", "b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb = float(len(b)) infov(opts, 'na:", "'internal maxval: {}'.format(maxv)) v = True if opts.verbose > 1 else False if", "in column 2 and the second dataset is in column 3 of the", "y = exp / den return y def pdf_snd(x): ''' Calculate the probability", "95% confidence. # The dataset is used. $ ./gends.py 10 100 120 >", "t-test because it switches over to the standard normal distribution (SND) when the", "for a standard normal distribution) This is the height of the curve at", "probability, cp, tolerance, maxtop, minval, iterations, top, bot, mid, z, q)) if probability", "a: {:.3f}'.format(vara)) infov(opts, 'variance b: {:.3f}'.format(varb)) # standard deviations stddeva = math.sqrt(vara) stddevb", "for printing messages. ''' lineno = inspect.stack()[frame][2] now = datetime.datetime.now() ofp.write('{!s:<26} {} {:>5}", "{}\\n'.format(now, prefix, lineno, msg)) def info(msg, f=1): ''' Write an info message to", "normal distribution (SND) when the number of effective degrees of freedom (DOF) is", "# allow the user to play with the parameters t = opts.internal[0] lb", "at x. It is exactly the same as pdf_nd(x, 1, 0) but is", "level that you want to use to determine whether the datasets differ. Typical", "*args) cp = 1.0 - (2.0 * (1.0 - q)) diff = abs(cp", "confidence, dataset-2 is smaller than dataset-1 by about 1.1%. As you can see,", "$ grep -w ^real /tmp/v1.out > /tmp/v1.ds $ grep -w ^real /tmp/v2.out >", "to show the difference. $ ./gends.py 50 110 112 > ds-50-110-112.txt $ ./gends.py", "'INTERVALS'), help='''Factors used for internal computations. You should never need to change these.", "level. If you reverse the columns, you will get the opposite result: $", "x2 = (float(x) + 0.5) * math.log(x1) x3 = x1 - x2 x4", "infov(opts, 'b: {:>3} {}'.format(len(b), b)) infov(opts, 'confidence level: {:.1f}%'.format(100.*cl)) na = float(len(a)) nb", "internal computations. You should never need to change these. Defaults: %(default)s. ''') parser.add_argument('-k',", "/tmp/v1.out ; done $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time", "not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis: {}'.format(significant)) #", "tool looks at the first token on each line and collects it if", "first column is for the first dataset. The second column is for the", "118.076 117.545 7 120.539 119.751 8 118.880 119.042 9 120.164 116.203 10 119.134", "specific confidence level using the t-test methodology for unpaired observations. Please note that", "x += width # advance to the next edge py = y #", "] c0 = 1.000000000190015 c1 = 2.5066282746310005 x1 = float(x) + 5.5 x2", "function (PDF) for a standard normal distribution. s = standard deviation (1 for", "1 for both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard normal", "opts = parser.parse_args() if opts.cols[0] < 1: parser.error('column 1 must be greater then", "''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\")", "the time data on 3 separate lines as shown in this simple example:", "# See https://github.com/jlinoff/ztables for background. # # ================================================================ def gamma(x): ''' Gamma function.", "normally it is easier to have it exist in two separate files because,", "t-variate with {} degrees of freedom: {:.2f}'.format(q, dofr, z)) cllb = md -", "environment. Each dataset contains a series of numbers to be compared. The numbers", "dof) x = (1. - cl) / 2. q = cl + x", "+ z * sdmd infov(opts, '{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl,", "\"{}\" out of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick to capitalize the", "(SND)') z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd) else: infov(opts, 'use", "to the right. top = mid elif probability > cp: # It is", "infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis: {}'.format(significant)) # Report the", "line {} in {}: not a number: {}'.format(ln, fn, token)) continue except IOError:", "+= 1.0 x4 += c[i] / x5 x6 = math.log((c1 * x4) /", "do it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $ for((i=1;i<=50;i++)) ; do printf '\\nExp", "((y - py) * width) / 2.0 # adjustment based on height change", "level of verbosity. Specify -v to see the values that make up the", "; /usr/bin/time -p blackbox-v1 >> /tmp/v1.out ; done $ for((i=1;i<=50;i++)) ; do printf", "len(opts.FILES) == 2 else af ac = opts.cols[0] bc = opts.cols[1] infov(opts, 'dataset-1", "# start the loop while diff > tolerance: mid = bot + ((top", "*args): ''' Get the z value that matches the specified percentage. ''' #", "looks like: $ ./cmpds.py -c 0.95 -k 2 3 data.txt With 95.0% confidence,", "''' ds = [] try: with open(fn, 'r') as ifp: ln = 0", "8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 $ {0} --cols 2", "0 < club significant = not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject", "Options # # ================================================================ def getopts(): ''' Get the command line options using", "entries. Typically you would like to have at least 50 entries in each", "119.751 8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 For this example", "((top - bot) / 2.0) z = mid - adjustment q = area_under_curve(minval,", "pdf function value for x at the start of the interval. The accumulation", "# remember the previous height return total_area def binary_search_for_z(probability, tolerance, maxtop, minval, iterations,", "the probability density function (PDF) for a standard normal distribution. s = standard", "+ ((top - bot) / 2.0) z = mid - adjustment q =", "stddeva**2 / na sb2qnb = stddevb**2 / nb sdmd = math.sqrt(sa2qna + sb2qnb)", "note that this is not, strictly, a t-test because it switches over to", "{:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb < 0 < club significant = not", "-k 3 2 data.txt With 95.0% confidence, dataset-2 is larger than dataset-1 by", "posix time format (-p) outputs the time data on 3 separate lines as", "datasets are completely independent of the program (i.e. the data values are created", "clp = cl * 100. if significant: per = 100. * abs(md) /", "/tmp/v2.ds The above command takes advantage of the fact that posix time format", "and exit. \"\"\") # Positional arguments at the end. parser.add_argument('FILES', nargs='+', help='''The files", "software. The datasets are completely independent of the program (i.e. the data values", "118.880 119.042 9 120.164 116.203 10 119.134 118.049 For this example we assume", "# to capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage =", "change total_area += rectangle_area + triangle_area # trapezoid area x += width #", "/ 2.0) x2 = math.sqrt(dof * math.pi) * gamma((float(dof) / 2.0)) x3 =", "v1.1 and v1.2. # # Num v1.1 v1.2 # === ======= ======= 1", "null hypothesis: {}'.format(significant)) # Report the result. clp = cl * 100. if", "dataset is in column 3 of the same file. Blank lines and lines", "; done $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ; /usr/bin/time -p", "v, pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t, maxv, minv,", "den = s * math.sqrt(2 * math.pi) exp = math.e ** ( -dx2", "maxv, minv, intervals, v, pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl,", "if probability < cp: # It is to the right. top = mid", "exit. \"\"\") # Positional arguments at the end. parser.add_argument('FILES', nargs='+', help='''The files with", "for xa in a]) / float(na - 1.) varb = sum([(xb - mb)", "difference between them for a specific confidence level using the t-test methodology for", "the datasets. # Example 3: Dataset-2 is slightly smaller (has faster runtime) with", "effective degrees of freedom. No table look ups are necessary. The methodology used", "%(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'), help='''The columns that define", "function (PDF) for a normal distribution. s = standard deviation (1 for a", "'use standard normal distribution (SND)') z = binary_search_for_z(cl, t, maxv, minv, intervals, v,", "natural logarithms. For integer values of x we can use the exact value", "- py) * width) / 2.0 # adjustment based on height change total_area", "dofr = int('{:.0f}'.format(dof)) infov(opts, 'effective DOF (rounded): {}'.format(dofr)) # confidence interval for the", "background. # # ================================================================ def gamma(x): ''' Gamma function. Uses the Lanczos approximation", "(s ** 2) den = s * math.sqrt(2 * math.pi) exp = math.e", "width # advance to the next edge py = y # remember the", "119.733 3 120.675 118.346 4 118.628 117.261 5 120.363 118.863 6 118.076 117.545", "whether v2 is faster than v1 at a 95% confidence level. $ ./cmpds.py", "confidence level. $ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2 is", "(dof_dena + dof_denb)) - 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr = int('{:.0f}'.format(dof)) infov(opts,", "be different because we are treating the samples as unpaired observations (t-test) but", "- z * sdmd club = md + z * sdmd infov(opts, '{:.1f}%", "if len(opts.FILES) == 2 else af ac = opts.cols[0] bc = opts.cols[1] infov(opts,", "in column 3 of the same file. Blank lines and lines where the", "club = md + z * sdmd infov(opts, '{:.1f}% confidence interval for difference:", "= binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_t, dof) x = (1. -", "- 1.) varb = sum([(xb - mb) ** 2 for xb in b])", "{:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, dataset-2 is smaller than dataset-1 by about", "in the proper range. def get_conf_level(): class GetConfLevel(argparse.Action): def __call__(self, parser, args, values,", "dataset-2 is smaller than dataset-1 by about 0.8%. # Example 5: Dataset-1 and", "tolerance, maxtop, minval, iterations, top, bot, mid, z, q)) if probability < cp:", "if club < 0: print('With {:.1f}% confidence, dataset-2 is larger than dataset-1 by", "running blackbox-v2 and collecting its output. Here is how you might do it:", "error message to stderr and exit. ''' _msg('ERROR', f+1, msg, sys.stderr) sys.exit(1) #", "data 10 runs for each version. # Run time data collected for v1.1", "# The data is specifically generated to show the difference. $ ./gends.py 50", "column data from the file. ''' ds = [] try: with open(fn, 'r')", "ds = [] try: with open(fn, 'r') as ifp: ln = 0 for", "a number: {}'.format(ln, fn, token)) continue except IOError: err('could not read file: {}'.format(fn))", "for xb in b]) / float(nb - 1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts,", "same file. $ cat data.txt # v1.1 v1.2 # ======= ======= 1 119.041", "the confidence level. If you reverse the columns, you will get the opposite", "the exact value of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) =", "# Options # # ================================================================ def getopts(): ''' Get the command line options", "ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit': 'Show this", "(SND) infov(opts, 'use standard normal distribution (SND)') z = binary_search_for_z(cl, t, maxv, minv,", "infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts,", "Here is how you might do it: $ rm -f /tmp/blackbox-v1.out /tmp/blackbox-v2.out $", "so they can be used in a black box testing environment. Each dataset", "{:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb)) # variances vara = sum([(xa - ma) **", "dataset-1 by about 1.1%. '''.format(base) afc = argparse.RawTextHelpFormatter parser = argparse.ArgumentParser(formatter_class=afc, description=desc[:-2], usage=usage,", "{ 'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show", "by about 1.3%. That tells us that v2 is indeed slightly faster. '''", "(90%), 0.95 (95%) and 0.99 (99%). The tool will automatically determine the associated", "'--version', action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional", "of effective degrees of freedom (DOF) exceeds this threshold, the SND is used", "getopts(): ''' Get the command line options using argparse. ''' # Make sure", "< 1: parser.error('column 1 must be greater then 0') if len(opts.FILES) > 2:", "cl * 100. if significant: per = 100. * abs(md) / ma infov(opts,", "dof_denb = (1. / (nb + 1.)) * sb2qnb**2 dof = (dof_num /", "{}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv))", "using argparse. ''' # Make sure that the confidence level is in the", "files A more realistic example would be running a program called blackbox-v1 50", "exit': 'Show this help message and exit.\\n ', } return lookup.get(s, s) argparse._", "significant = not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the null hypothesis:", "black box testing environment. Each dataset contains a series of numbers to be", "mid = bot + ((top - bot) / 2.0) z = mid -", "freedom: {:.2f}'.format(q, dofr, z)) cllb = md - z * sdmd club =", "is a significant difference between them for a specific confidence level using the", "separate lines as shown in this simple example: $ /usr/bin/time -p sleep 0.3", "adjustment q = area_under_curve(minval, z, iterations, fct, *args) cp = 1.0 - (2.0", "./gends.py 50 108 112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt With 95.0% confidence,", "opts.cols[1] infov(opts, 'dataset-1 file: {}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac))", "start of the interval. The accumulation of the areas provides an estimate of", "New York. import argparse import datetime import inspect import math import os import", "{:.1f}% confidence, dataset-2 is smaller than dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With", "opts.internal[1] ub = opts.internal[2] intervals = int(opts.internal[3]) maxv = 2 * round(abs(lb) +", "# # ================================================================ def getopts(): ''' Get the command line options using argparse.", "use the exact value of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2)", "time data. The data must be organized in columns with one entry per", "smaller than dataset-1 by about 0.8%. # Example 5: Dataset-1 and dataset-2 are", "be organized in columns with one entry per line. Non-numeric data is ignored", "confidence, dataset-2 is larger than dataset-1 by about 1.1%. EXAMPLE 2 - datasets", "''' Get the z value that matches the specified percentage. ''' # Binary", "pdf_nd(x, s=1.0, u=0.0): ''' Calculate the probability density function (PDF) for a normal", "_msg(prefix, frame, msg, ofp=sys.stdout): ''' Base for printing messages. ''' lineno = inspect.stack()[frame][2]", "+ 0.5) * math.log(x1) x3 = x1 - x2 x4 = c0 x5", "'dataset-2 col: {}'.format(bc)) a = read_file(opts, af, ac) b = read_file(opts, bf, bc)", "using the t-test methodology for unpaired observations. Please note that this is not,", "{:>5} - {}\\n'.format(now, prefix, lineno, msg)) def info(msg, f=1): ''' Write an info", "* math.log(x1) x3 = x1 - x2 x4 = c0 x5 = float(x)", "normal distribution) This is the height of the curve at x. ''' dx", "smaller than dataset-1 by about 0.8%. # Example 4: Dataset-2 is slightly smaller", "provides an estimate of the area under the curve. The greater the number", "fct, *args) cp = 1.0 - (2.0 * (1.0 - q)) diff =", "> /tmp/v2.ds The above command takes advantage of the fact that posix time", "unpaired observations to determine whether they are significantly different. ''' cl = opts.conf", "(1.0 - q)) diff = abs(cp - probability) if v: info('p={}, cp={}, t={:f},", "on the confidence level and the number of effective degrees of freedom. No", "x6 = math.log((c1 * x4) / float(x)) x7 = -x3 + x6 #", "# v1.1 v1.2 # ======= ======= 1 119.041 117.038 2 119.670 119.733 3", "sum(a) / na mb = sum(b) / nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts,", "files may be specified') if opts.snd_threshold < 30: parser.error('it does not make sense", "No table look ups are necessary. The methodology used to calculate the z-value", "return opts # ================================================================ # # Read file data. # # ================================================================ def", "exact value of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818", "us that v2 is indeed slightly faster. ''' # License: MIT Open Source", "about 0.8%. # Example 5: Dataset-1 and dataset-2 are in the same file.", "default=[0.00001, -3.4, 3.4, 10000], metavar=('TOLERANCE', 'LOWER', 'UPPER', 'INTERVALS'), help='''Factors used for internal computations.", "reserved for specifying the confidence level. If you reverse the columns, you will", "2 den = math.sqrt(2 * math.pi) exp = math.e ** - (dx2 /", "will automatically determine the associated z-value based on the confidence level and the", "a t-test because it switches over to the standard normal distribution (SND) when", "x1 = float(x) + 5.5 x2 = (float(x) + 0.5) * math.log(x1) x3", "and collecting the timing output to a file and then running blackbox-v2 and", "of freedom. No table look ups are necessary. The methodology used to calculate", "of the area under the curve. The greater the number of intervals the", "to compare two versions of the foobar program to see if the second", "dataset-2 (v1.2) is slightly faster. Note that we use -k to specify the", "/ 2.0)) x3 = 1.0 + (float((x ** 2)) / float(dof)) x4 =", "-v -v to internal details about the z value lookup and values that", "1 or 2 files may be specified') if opts.snd_threshold < 30: parser.error('it does", "have the unpaired observations from both runs in two different files so we", "xb in b]) / float(nb - 1.) infov(opts, 'variance a: {:.3f}'.format(vara)) infov(opts, 'variance", "one must have more than 2 entries. Typically you would like to have", "infov(opts, '{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero =", "mean diff: {:.3f}'.format(sdmd)) # effective degrees of freedom dof_num = (sa2qna + sb2qnb)**2", "dataset-1 by about 0.8%. # Example 4: Dataset-2 is slightly smaller (has faster", "level using the t-test methodology for unpaired observations. Please note that this is", "program called blackbox-v1 50 times and collecting the timing output to a file", "to change these. Defaults: %(default)s. ''') parser.add_argument('-k', '--cols', nargs=2, type=int, default=[1,1], metavar=('COL1', 'COL2'),", "times and collecting the timing output to a file and then running blackbox-v2", "intervals, v, pdf_t, dof) x = (1. - cl) / 2. q =", "na = float(len(a)) nb = float(len(b)) infov(opts, 'na: {}'.format(na)) infov(opts, 'nb: {}'.format(nb)) #", "{0} [OPTIONS] <DATASET-1> [<DATASET-2>]'.format(base) desc = 'DESCRIPTION:{0}'.format('\\n '.join(__doc__.split('\\n'))) epilog = r''' EXAMPLES: #", "< col: continue token = tokens[col-1] try: f = float(token) if f <", "for i in range(1, int(x))]) # Lanczos approximation, page 214 of Numerical Recipes", "info('skipping line {} in {}: number is too small {}'.format(ln, fn, token)) continue", "0 return z # ================================================================ # # t-test implementation # # ================================================================ def", "dataset-1 by about {:,.1f}%.'.format(clp, per)) else: print('With {:.1f}% confidence, dataset-2 is smaller than", "be specified') if opts.snd_threshold < 30: parser.error('it does not make sense to use", "The table below shows sample data 10 runs for each version. # Run", "minv, intervals, v, pdf_snd) else: infov(opts, 'use t-{} distribution'.format(dofr)) z = binary_search_for_z(cl, t,", "6 118.076 117.545 7 120.539 119.751 8 118.880 119.042 9 120.164 116.203 10", "comments and blank spaces. You can see the ignored data in verbose mode.", "find the closest value. z = 0.0 adjustment = float(maxtop) / 2.0 top", "about 1.1%. As you can see, dataset-2 (v1.2) is slightly faster. Note that", "the number of intervals the better the estimate is at the cost of", "z * sdmd club = md + z * sdmd infov(opts, '{:.1f}% confidence", "to run (120 seconds) and we want to determine whether v1.2 is faster.", "x6 # ln(gamma(x)) g = math.exp(x7) return g def pdf_t(x, dof): ''' Calculate", "that you want to use to determine whether the datasets differ. Typical confidence", "cp, tolerance, maxtop, minval, iterations, top, bot, mid, z, q)) if probability <", "at x. ''' dx = float(x) - float(u) dx2 = dx * dx", "'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub))", "blackbox-v2 and collecting its output. Here is how you might do it: $", "'stddev b: {:.3f}'.format(stddevb)) # mean difference md = ma - mb infov(opts, 'mean", "sb2qnb = stddevb**2 / nb sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of", "$ cat data.txt # v1.1 v1.2 # ======= ======= 1 119.041 117.038 2", "= 'argument \"{}\" out of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel # Trick to", "q)) if probability < cp: # It is to the right. top =", "is used. $ ./gends.py 10 100 120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt", "distribution (SND) threshold. When the number of effective degrees of freedom (DOF) exceeds", "for specifying the confidence level. If you reverse the columns, you will get", "is in column 2 and the second dataset is in column 3 of", "0: _msg('INFO', f+1, msg) def warn(msg, f=1): ''' Write a warning message to", "significance, you specify the confidence level that you want to use to determine", "parser, args, values, option_string=None): if 0. < values < 1.0: setattr(args, self.dest, values)", "1: info('skipping line {} in {}: number is too small {}'.format(ln, fn, token))", "use standard normal distribution (SND) infov(opts, 'use standard normal distribution (SND)') z =", "+ (float((x ** 2)) / float(dof)) x4 = float((dof + 1)) / 2.0", "{:.3f}'.format(stddevb)) # mean difference md = ma - mb infov(opts, 'mean diff: {:.3f}'.format(md))", "z)) cllb = md - z * sdmd club = md + z", "{}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a = read_file(opts, af, ac) b = read_file(opts,", "mid elif probability > cp: # It is to the left. bot =", "ds-50-108-112.txt With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 0.8%. #", "Unfortunately I can't get rid of the \":\" reliably. def gettext(s): lookup =", "'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v =", "/tmp/v2.ds With 95.0% confidence, dataset-2 is smaller than dataset-1 by about 1.3%. That", "level. $ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With 95.0% confidence, dataset-2 is smaller", "{}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v = True if opts.verbose > 1 else", "and 0.99 (99%). The tool will automatically determine the associated z-value based on", "both datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard normal distribution (SND)", "0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) = 3.32335097045 gamma(4) = 6.0 ''' if (x", ">> /tmp/v1.out ; done $ for((i=1;i<=50;i++)) ; do printf '\\nExp %03d\\n' $i ;", "- probability) if v: info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={},", "Example 5: Dataset-1 and dataset-2 are in the same file. $ cat data.txt", "3 separate lines as shown in this simple example: $ /usr/bin/time -p sleep", "print('With {:.1f}% confidence, dataset-2 is larger than dataset-1 by about {:,.1f}%.'.format(clp, per)) else:", "least 3 in file: {}'.format(col, len(ds), fn)) return ds # ================================================================ # #", "minv, intervals, v, pdf_t, dof) x = (1. - cl) / 2. q", "greater the number of intervals the better the estimate is at the cost", "for the first dataset. The second column is for the second dataset. If", "** 2) den = s * math.sqrt(2 * math.pi) exp = math.e **", "is easier to have it exist in two separate files because, by default,", "accumulation of the areas provides an estimate of the area under the curve.", "takes about 2 minutes to run (120 seconds) and we want to determine", "-x3 + x6 # ln(gamma(x)) g = math.exp(x7) return g def pdf_t(x, dof):", "infov(opts, 'use standard normal distribution (SND)') z = binary_search_for_z(cl, t, maxv, minv, intervals,", "single column in a file, you must explicitly specify the which column to", "''' _msg('WARNING', f+1, msg) def err(msg, f=1): ''' Write an error message to", "datasets. ''') parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard normal distribution (SND) threshold.", "runtime or memory use has changed between two different versions of software. The", "dev calculation simpler. # ================================================================ # # Message utility functions. # # ================================================================", "than 2 entries. Typically you would like to have at least 50 entries", "specified percentage. ''' # Binary search to find the closest value. z =", "= 2 * round(abs(lb) + ub + 0.5, 0) minv = -maxv infov(opts,", "sb2qnb**2 dof = (dof_num / (dof_dena + dof_denb)) - 2.0 infov(opts, 'effective DOF:", "': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help", "import inspect import math import os import sys #VERSION='0.1' # Initial load. VERSION='0.2'", "118.049 For this example we assume that the data is stored in a", "return z # ================================================================ # # t-test implementation # # ================================================================ def ttest(a,", "{}'.format(af)) infov(opts, 'dataset-2 file: {}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc))", "the right. top = mid elif probability > cp: # It is to", "faster runtime) with 99% confidence. # Both runs have 50 samples. $ {0}", "= (1. / (nb + 1.)) * sb2qnb**2 dof = (dof_num / (dof_dena", "'--verbose', action='count', default=0, help='''Increase the level of verbosity. Specify -v to see the", "they typically represent something like elapsed time or memory used. The size of", "of the curve at x. It is exactly the same as pdf_nd(x, 1,", "mb = sum(b) / nb infov(opts, 'mean a: {:.3f}'.format(ma)) infov(opts, 'mean b: {:.3f}'.format(mb))", "{}'.format(bf)) infov(opts, 'dataset-1 col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a = read_file(opts, af,", "height change total_area += rectangle_area + triangle_area # trapezoid area x += width", "values that were discarded during file reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s v{0}'.format(VERSION),", "a normal distribution. s = standard deviation (1 for a standard normal distribution)", "af = opts.FILES[0] bf = opts.FILES[1] if len(opts.FILES) == 2 else af ac", "inspect import math import os import sys #VERSION='0.1' # Initial load. VERSION='0.2' #", "warn(msg, f=1): ''' Write a warning message to stdout. ''' _msg('WARNING', f+1, msg)", "run looks like: $ ./cmpds.py -c 0.95 -k 2 3 data.txt With 95.0%", "return y def area_under_curve(x1, x2, intervals, fct, *args, **kwargs): ''' Calculate the approximate", "(1. / (na + 1.)) * sa2qna**2 dof_denb = (1. / (nb +", "arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS', 'show this help message and exit':", "0') if len(opts.FILES) > 2: parser.error('only 1 or 2 files may be specified')", "math.sqrt(2 * math.pi) exp = math.e ** ( -dx2 / xden ) y", "lines where the token is not a floating point number are ignored. Here", "the samples as unpaired observations (t-test) but the smallest one must have more", "# just a sanity check assert intervals > 1 # another sanity check", "get the opposite result: $ ./cmpds.py -c 0.95 -k 3 2 data.txt With", "is fixed (proportional to how the interval is sliced). The height of each", "Example 1: help $ {0} -h # Example 2: No significant difference with", "ofp.write('{!s:<26} {} {:>5} - {}\\n'.format(now, prefix, lineno, msg)) def info(msg, f=1): ''' Write", "message and exit': 'Show this help message and exit.\\n ', } return lookup.get(s,", "stddevb = math.sqrt(varb) infov(opts, 'stddev a: {:.3f}'.format(stddeva)) infov(opts, 'stddev b: {:.3f}'.format(stddevb)) # mean", "3: Dataset-2 is slightly smaller (has faster runtime) with 95% confidence. # Both", "and the second dataset is in column 3 of the same file. Blank", "msg, f=1): ''' Write an info message to stdout. ''' if opts.verbose >", "b, [float(i) for i in range(1, int(x))]) # Lanczos approximation, page 214 of", "level that you want to use to determine significance. Typical confidence levels 0.90", "/ float(dof)) x4 = float((dof + 1)) / 2.0 x5 = x3 **", "- mb) ** 2 for xb in b]) / float(nb - 1.) infov(opts,", "is slightly faster. Note that we use -k to specify the columns because", "(120 seconds) and we want to determine whether v1.2 is faster. The table", "u=0.0): ''' Calculate the probability density function (PDF) for a normal distribution. s", "7 120.539 119.751 8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 $", "datasets.'.format(clp)) # ================================================================ # # Options # # ================================================================ def getopts(): ''' Get", "cl + x infov(opts, '{:.3f}-quantile of t-variate with {} degrees of freedom: {:.2f}'.format(q,", "play with the parameters t = opts.internal[0] lb = opts.internal[1] ub = opts.internal[2]", "'effective DOF (rounded): {}'.format(dofr)) # confidence interval for the mean difference z =", "dx xden = 2 * (s ** 2) den = s * math.sqrt(2", "(DOF) exceeds this threshold, the SND is used instead of a t-distribution. The", "r''' EXAMPLES: # Example 1: help $ {0} -h # Example 2: No", "number: {}'.format(ln, fn, token)) continue except IOError: err('could not read file: {}'.format(fn)) if", "dataset contains a series of numbers to be compared. The numbers must be", "v1.2 # === ======= ======= 1 119.041 117.038 2 119.670 119.733 3 120.675", "50 times and collecting the timing output to a file and then running", "bot={}, mid={}, z={}, q={}'.format( probability, cp, tolerance, maxtop, minval, iterations, top, bot, mid,", "vara = sum([(xa - ma) ** 2 for xa in a]) / float(na", "ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there is no significant difference between the datasets.", "see the ignored data in verbose mode. If only one file is specified,", "def read_file(opts, fn, col): ''' Read column data from the file. ''' ds", "# Run time data collected for v1.1 and v1.2. # # Num v1.1", "'show this help message and exit': 'Show this help message and exit.\\n ',", "<NAME> (1991). \"The Art Computer Systems Performance Analysis\", <NAME>iley and Sons, New York.", "iterations, v, fct, *args): ''' Get the z value that matches the specified", "distribution. s = standard deviation (1 for a standard normal distribution) u =", "is smaller than dataset-1 by about 1.1%. As you can see, dataset-2 (v1.2)", "out the data like this: $ grep -w ^real /tmp/v1.out > /tmp/v1.ds $", "bot = 0.0 diff = tolerance * 2 # start the loop while", "The methodology used to calculate the z-value is described in detail here: https://github.com/jlinoff/ztables.", "= float(x) ** 2 den = math.sqrt(2 * math.pi) exp = math.e **", "variances vara = sum([(xa - ma) ** 2 for xa in a]) /", "err(msg, f=1): ''' Write an error message to stderr and exit. ''' _msg('ERROR',", "can be different because we are treating the samples as unpaired observations (t-test)", "bot) / 2.0) z = mid - adjustment q = area_under_curve(minval, z, iterations,", "what the run looks like: $ ./cmpds.py -c 0.95 -k 2 3 data.txt", "Blank lines and lines where the token is not a floating point number", "of a t-distribution. The default is %(default)s. ''') parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase", "(dof_num / (dof_dena + dof_denb)) - 2.0 infov(opts, 'effective DOF: {:.2f}'.format(dof)) dofr =", "degrees of freedom. No table look ups are necessary. The methodology used to", "= abs(cp - probability) if v: info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={},", "for a standard normal distribution) u = mean (0 for a standard normal", "* y # area of rectangle at x with height y triangle_area =", "2.0 x5 = x3 ** -x4 y = (x1 * x5) / x2", "''' if (x - int(x)) == 0: # Optimization for integer values: (x-1)!.", "assert bot >= 0 return z # ================================================================ # # t-test implementation #", "specify the confidence level that you want to use to determine whether the", "for the same inputs. The versions are 1.1 and 1.2. The program takes", "action='version', version='%(prog)s v{0}'.format(VERSION), help=\"\"\"Show program's version number and exit. \"\"\") # Positional arguments", "float(fct(x, *args, **kwargs)) rectangle_area = width * y # area of rectangle at", "datasets in one file Here is an example to make sense of it", "the area under the curve. The greater the number of intervals the better", "Analyze unpaired observations to determine whether they are significantly different. ''' cl =", "infov(opts, 'mean b: {:.3f}'.format(mb)) # variances vara = sum([(xa - ma) ** 2", "> ds-50-110-112.txt $ ./gends.py 50 108 112 > ds-50-108-112.txt $ {0} ds-50-110-112.txt ds-50-108-112.txt", "than dataset-1 by about 1.1%. As you can see, dataset-2 (v1.2) is slightly", "than dataset-1 by about 1.3%. That tells us that v2 is indeed slightly", "rectangle at x with height y triangle_area = ((y - py) * width)", "** -x4 y = (x1 * x5) / x2 return y def pdf_nd(x,", "to determine whether they are significantly different. ''' cl = opts.conf infov(opts, 'a:", "-0.5395239384953e-5, ] c0 = 1.000000000190015 c1 = 2.5066282746310005 x1 = float(x) + 5.5", "about 0.8%. # Example 4: Dataset-2 is slightly smaller (has faster runtime) with", "if significant: per = 100. * abs(md) / ma infov(opts, 'percentage: {}'.format(per)) if", "open(fn, 'r') as ifp: ln = 0 for line in ifp.readlines(): ln +=", "are significantly different. ''' cl = opts.conf infov(opts, 'a: {:>3} {}'.format(len(a), a)) infov(opts,", "The datasets are completely independent of the program (i.e. the data values are", "sdmd infov(opts, '{:.1f}% confidence interval for difference: [{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero", "minval, iterations, v, fct, *args): ''' Get the z value that matches the", "help message and exit': 'Show this help message and exit.\\n ', } return", "t-test methodology for unpaired observations. Please note that this is not, strictly, a", "foobar program to see if the second version is faster than the first", "with height y triangle_area = ((y - py) * width) / 2.0 #", "the datasets differ. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%).", "line = line.strip() tokens = line.split() if len(tokens) < col: continue token =", "details about the z value lookup and values that were discarded during file", "v1 at a 95% confidence level. $ ./cmpds.py -c 0.95 /tmp/v1.ds /tmp/v2.ds With", "# License: MIT Open Source # Copyright (c) 2016 by <NAME> #REFERENCES: #", "See https://github.com/jlinoff/ztables for background. # # ================================================================ def gamma(x): ''' Gamma function. Uses", "f = float(token) if f < 0.0001: # avoid divide by 0 errors", "the fact that posix time format (-p) outputs the time data on 3", "assert x2 > x1 # just a sanity check assert intervals > 1", "number are ignored. Here is what the run looks like: $ ./cmpds.py -c", "at the end. parser.add_argument('FILES', nargs='+', help='''The files with the run time data. The", "line options using argparse. ''' # Make sure that the confidence level is", "between them for a specific confidence level using the t-test methodology for unpaired", "Write an error message to stderr and exit. ''' _msg('ERROR', f+1, msg, sys.stderr)", "the number of effective degrees of freedom (DOF) exceeds this threshold, the SND", "values) else: msg = 'argument \"{}\" out of range (0..1)'.format(self.dest) parser.error(msg) return GetConfLevel", "x4) / float(x)) x7 = -x3 + x6 # ln(gamma(x)) g = math.exp(x7)", "return g def pdf_t(x, dof): ''' Calculate the probability density function (PDF) at", "+ 1)) / 2.0 x5 = x3 ** -x4 y = (x1 *", "runtime) with 99% confidence. # Both runs have 50 samples. $ {0} ds-50-110-112.txt", "slightly smaller (has faster runtime) with 95% confidence. # Both runs have 50", "def area_under_curve(x1, x2, intervals, fct, *args, **kwargs): ''' Calculate the approximate area under", "have more than 2 entries. Typically you would like to have at least", "= { 'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS', 'optional arguments': 'OPTIONAL ARGUMENTS',", "0.8%. # Example 4: Dataset-2 is slightly smaller (has faster runtime) with 99%", "for each version. # Run time data collected for v1.1 and v1.2. #", "./gends.py 50 110 112 > ds-50-110-112.txt $ ./gends.py 50 108 112 > ds-50-108-112.txt", "<NAME>iley and Sons, New York. import argparse import datetime import inspect import math", "entry per line. Non-numeric data is ignored which allows you to add comments", "column is not a floating point number it is ignored. The default is", "0.0 # allow the user to play with the parameters t = opts.internal[0]", "determine significance, you specify the confidence level that you want to use to", "* (s ** 2) den = s * math.sqrt(2 * math.pi) exp =", "is specifically generated to show the difference. $ ./gends.py 50 110 112 >", "md = ma - mb infov(opts, 'mean diff: {:.3f}'.format(md)) # standard deviation of", "1. The default is %(default)s. ''') parser.add_argument('--internal', type=float, nargs=4, default=[0.00001, -3.4, 3.4, 10000],", "else: break # Sanity checks. assert top <= maxtop assert bot >= 0", "bound: {}'.format(ub)) infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal maxval:", "we have the unpaired observations from both runs in two different files so", "float(x) for i in range(6): x5 += 1.0 x4 += c[i] / x5", "is not in a single column in a file, you must explicitly specify", "infov(opts, 'internal intervals: {}'.format(intervals)) infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v", "118.880 119.042 9 120.164 116.203 10 119.134 118.049 $ {0} --cols 2 3", "is larger than dataset-1 by about 1.1%. EXAMPLE 2 - datasets in separate", "at least 50 entries in each dataset. You must specify the confidence level", "{} {:>5} - {}\\n'.format(now, prefix, lineno, msg)) def info(msg, f=1): ''' Write an", "parser.add_argument('-s', '--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard normal distribution (SND) threshold. When the", "probability density function (PDF) for a normal distribution. s = standard deviation (1", "< club significant = not crosses_zero infov(opts, 'crosses zero: {}'.format(crosses_zero)) infov(opts, 'reject the", "compare two versions of the foobar program to see if the second version", "exp = math.e ** - (dx2 / 2) y = exp / den", "diff: {:.3f}'.format(sdmd)) # effective degrees of freedom dof_num = (sa2qna + sb2qnb)**2 dof_dena", "120.539 119.751 8 118.880 119.042 9 120.164 116.203 10 119.134 118.049 $ {0}", "sdmd club = md + z * sdmd infov(opts, '{:.1f}% confidence interval for", "real 0.30 user 0.00 sys 0.00 At this point we have the unpaired", "# Both runs have 50 samples. # The data is specifically generated to", "the number of effective degrees of freedom (DOF) is larger than 32. It", "simply grepping out the data like this: $ grep -w ^real /tmp/v1.out >", "With 95.0% confidence, dataset-2 is larger than dataset-1 by about 1.1%. EXAMPLE 2", "> 1 else False if dofr > opts.snd_threshold: # use standard normal distribution", "difference between the datasets. # Example 3: Dataset-2 is slightly smaller (has faster", "column is for the second dataset. If the value in the column is", "curve using trapezoidal approximation. It breaks the interval between x1 and x2 into", "each rectangle is the pdf function value for x at the start of", "* b, [float(i) for i in range(1, int(x))]) # Lanczos approximation, page 214", "must explicitly specify the which column to collect. In this case, the first", "GetConfLevel # Trick to capitalize the built-in headers. # Unfortunately I can't get", "about the z value lookup and values that were discarded during file reads.", "next edge py = y # remember the previous height return total_area def", "parser.error('it does not make sense to use SND for {} elements'.format(opts.snd_threshold)) return opts", "''' assert x2 > x1 # just a sanity check assert intervals >", "the mean difference z = 0.0 # allow the user to play with", "is faster than the first for the same inputs. The versions are 1.1", "b: {:.3f}'.format(stddevb)) # mean difference md = ma - mb infov(opts, 'mean diff:", "dataset-2 is larger than dataset-1 by about 1.1%. EXAMPLE 2 - datasets in", "and values that were discarded during file reads. ''') parser.add_argument('-V', '--version', action='version', version='%(prog)s", "confidence, dataset-2 is smaller than dataset-1 by about 1.3%. That tells us that", "the datasets.'.format(clp)) # ================================================================ # # Options # # ================================================================ def getopts(): '''", "gamma(4) = 6.0 ''' if (x - int(x)) == 0: # Optimization for", "118.049 $ {0} --cols 2 3 data.txt With 95.0% confidence, dataset-2 is smaller", "width * y # area of rectangle at x with height y triangle_area", "# Optimization for integer values: (x-1)!. return reduce(lambda a, b: a * b,", "significant difference with 95% confidence. # The dataset is used. $ ./gends.py 10", "Non-numeric data is ignored which allows you to add comments and blank spaces.", "the mean diff: {:.3f}'.format(sdmd)) # effective degrees of freedom dof_num = (sa2qna +", "is no significant difference between the datasets. # Example 3: Dataset-2 is slightly", "116.203 10 119.134 118.049 For this example we assume that the data is", "0 for line in ifp.readlines(): ln += 1 line = line.strip() tokens =", "there is no significant difference between the datasets. # Example 3: Dataset-2 is", "differ. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%). The tool", "same as pdf_nd(x, 1, 0) but is somewhat more efficient. ''' dx2 =", "py = float(fct(x, *args, **kwargs)) for i in range(intervals): y = float(fct(x, *args,", "data like this: $ grep -w ^real /tmp/v1.out > /tmp/v1.ds $ grep -w", "else False if dofr > opts.snd_threshold: # use standard normal distribution (SND) infov(opts,", "# Both runs have 50 samples. $ {0} ds-50-110-112.txt ds-50-108-112.txt With 99.0% confidence,", "ARGUMENTS', 'show this help message and exit': 'Show this help message and exit.\\n", "can't get rid of the \":\" reliably. def gettext(s): lookup = { 'usage:", "file. Blank lines and lines where the token is not a floating point", "= exp / den return y def pdf_snd(x): ''' Calculate the probability density", "v2 is faster than v1 at a 95% confidence level. $ ./cmpds.py -c", "is not a floating point number are ignored. Here is what the run", "10 100 120 > ds-10-100-120.txt $ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there", "s=1.0, u=0.0): ''' Calculate the probability density function (PDF) for a normal distribution.", "[{:3f} .. {:3f}]'.format(100.*cl, cllb, club)) crosses_zero = cllb < 0 < club significant", "is smaller than dataset-1 by about 0.8%. # Example 5: Dataset-1 and dataset-2", "normal distribution (SND)') z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd) else:", "below shows sample data 10 runs for each version. # Run time data", "if opts.snd_threshold < 30: parser.error('it does not make sense to use SND for", "it all. We want to compare two versions of the foobar program to", "because it switches over to the standard normal distribution (SND) when the number", "out whether v2 is faster than v1 at a 95% confidence level. $", "command takes advantage of the fact that posix time format (-p) outputs the", "'Show this help message and exit.\\n ', } return lookup.get(s, s) argparse._ =", "{0} -h # Example 2: No significant difference with 95% confidence. # The", "I can't get rid of the \":\" reliably. def gettext(s): lookup = {", "# ================================================================ def _msg(prefix, frame, msg, ofp=sys.stdout): ''' Base for printing messages. '''", "infov(opts, 'internal lower bound: {}'.format(lb)) infov(opts, 'internal upper bound: {}'.format(ub)) infov(opts, 'internal intervals:", "is exactly the same as pdf_nd(x, 1, 0) but is somewhat more efficient.", "a standard normal distribution) u = mean (0 for a standard normal distribution)", "to specify the columns because -c is already reserved for specifying the confidence", "capitalize help headers base = os.path.basename(sys.argv[0]) name = os.path.splitext(base)[0] usage = '\\n {0}", "{}'.format(dofr)) # confidence interval for the mean difference z = 0.0 # allow", "from both runs in two different files so we can use cmpds.py to", "/ nb sdmd = math.sqrt(sa2qna + sb2qnb) infov(opts, 'stddev of the mean diff:", "ub + 0.5, 0) minv = -maxv infov(opts, 'internal threshold: {:.1f}'.format(t)) infov(opts, 'internal", "import sys #VERSION='0.1' # Initial load. VERSION='0.2' # Made the std dev calculation", "msg)) def info(msg, f=1): ''' Write an info message to stdout. ''' _msg('INFO',", "$ {0} ds-10-100-120.txt ds-10-100-120.txt With 95.0% confidence, there is no significant difference between", "{} elements'.format(opts.snd_threshold)) return opts # ================================================================ # # Read file data. # #", "number of effective degrees of freedom. No table look ups are necessary. EXAMPLE", "warning message to stdout. ''' _msg('WARNING', f+1, msg) def err(msg, f=1): ''' Write", "dx * dx xden = 2 * (s ** 2) den = s", "in file: {}'.format(col, len(ds), fn)) return ds # ================================================================ # # Main #", "is a reasonable constraint given that they typically represent something like elapsed time", "''' dx = float(x) - float(u) dx2 = dx * dx xden =", "the built-in headers. # Unfortunately I can't get rid of the \":\" reliably.", "line and collects it if the token is a floating point number. When", "is larger than 32. It is really useful for determining whether runtime or", "each dataset. The first column is for the first dataset. The second column", "and 1.2. The program takes about 2 minutes to run (120 seconds) and", "0.5) * math.log(x1) x3 = x1 - x2 x4 = c0 x5 =", "= cl * 100. if significant: per = 100. * abs(md) / ma", "of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2) = 0.886226925453 gamma(5/2) = 1.32934038818 gamma(7/2) =", "$ ./gends.py 50 110 112 > ds-50-110-112.txt $ ./gends.py 50 108 112 >", "distribution with dof degrees of freedom. This is basically the height of the", "probability < cp: # It is to the right. top = mid elif", "exactly the same as pdf_nd(x, 1, 0) but is somewhat more efficient. '''", "so we can use cmpds.py to figure out whether v2 is faster than", "lines as shown in this simple example: $ /usr/bin/time -p sleep 0.3 real", "freedom. No table look ups are necessary. EXAMPLE 1 - two datasets in", "number of intervals the better the estimate is at the cost of performance.", "c0 = 1.000000000190015 c1 = 2.5066282746310005 x1 = float(x) + 5.5 x2 =", "of verbosity. Specify -v to see the values that make up the computation.", "points at column {}, found {}, need at least 3 in file: {}'.format(col,", "significance. Typical confidence levels 0.90 (90%), 0.95 (95%) and 0.99 (99%). The tool", "/ x5 x6 = math.log((c1 * x4) / float(x)) x7 = -x3 +", "** 2 for xa in a]) / float(na - 1.) varb = sum([(xb", "class GetConfLevel(argparse.Action): def __call__(self, parser, args, values, option_string=None): if 0. < values <", "entries in each dataset. You must specify the confidence level that you want", "* 2 # start the loop while diff > tolerance: mid = bot", "if v: info('p={}, cp={}, t={:f}, mt={}, mv={}, i={}, top={}, bot={}, mid={}, z={}, q={}'.format(", "version number and exit. \"\"\") # Positional arguments at the end. parser.add_argument('FILES', nargs='+',", "more than 2 entries. Typically you would like to have at least 50", "msg) def warn(msg, f=1): ''' Write a warning message to stdout. ''' _msg('WARNING',", "for internal computations. You should never need to change these. Defaults: %(default)s. ''')", "+ triangle_area # trapezoid area x += width # advance to the next", "opts # ================================================================ # # Read file data. # # ================================================================ def read_file(opts,", "= (x1 * x5) / x2 return y def pdf_nd(x, s=1.0, u=0.0): '''", "\"The Art Computer Systems Performance Analysis\", <NAME>iley and Sons, New York. import argparse", "1.)) * sa2qna**2 dof_denb = (1. / (nb + 1.)) * sb2qnb**2 dof", "infov(opts, 'internal minval: {}'.format(minv)) infov(opts, 'internal maxval: {}'.format(maxv)) v = True if opts.verbose", "by simply grepping out the data like this: $ grep -w ^real /tmp/v1.out", "+ 1.)) * sa2qna**2 dof_denb = (1. / (nb + 1.)) * sb2qnb**2", "License: MIT Open Source # Copyright (c) 2016 by <NAME> #REFERENCES: # <NAME>", "default=0.95, action=get_conf_level(), metavar=('FLOAT'), help='''The confidence level such that 0 < c < 1.", "tokens[col-1] try: f = float(token) if f < 0.0001: # avoid divide by", "value lookup and values that were discarded during file reads. ''') parser.add_argument('-V', '--version',", "can see, dataset-2 (v1.2) is slightly faster. Note that we use -k to", "9 120.164 116.203 10 119.134 118.049 For this example we assume that the", "is in column 3 of the same file. Blank lines and lines where", "of the curve at x. ''' dx = float(x) - float(u) dx2 =", "= opts.internal[0] lb = opts.internal[1] ub = opts.internal[2] intervals = int(opts.internal[3]) maxv =", "a single file but normally it is easier to have it exist in", "gamma((float(dof) + 1.0) / 2.0) x2 = math.sqrt(dof * math.pi) * gamma((float(dof) /", "end. parser.add_argument('FILES', nargs='+', help='''The files with the run time data. The data must", "then running blackbox-v2 and collecting its output. Here is how you might do", "'--snd-threshold', type=int, default=32, metavar=('UINT'), help='''The standard normal distribution (SND) threshold. When the number", "prefix, lineno, msg)) def info(msg, f=1): ''' Write an info message to stdout.", "default, the tool looks at the first token on each line and collects", "* math.pi) * gamma((float(dof) / 2.0)) x3 = 1.0 + (float((x ** 2))", "col: {}'.format(ac)) infov(opts, 'dataset-2 col: {}'.format(bc)) a = read_file(opts, af, ac) b =", "timing output to a file and then running blackbox-v2 and collecting its output.", "the previous height return total_area def binary_search_for_z(probability, tolerance, maxtop, minval, iterations, v, fct,", "parser.add_argument('-v', '--verbose', action='count', default=0, help='''Increase the level of verbosity. Specify -v to see", "x = float(x1) py = float(fct(x, *args, **kwargs)) for i in range(intervals): y", "right. top = mid elif probability > cp: # It is to the", "opts.cols[1] < 1: parser.error('column 1 must be greater then 0') if len(opts.FILES) >", "example would be running a program called blackbox-v1 50 times and collecting the", "reliably. def gettext(s): lookup = { 'usage: ': 'USAGE:', 'positional arguments': 'POSITIONAL ARGUMENTS',", "probability density function (PDF) for a standard normal distribution. s = standard deviation", "# Sanity checks. assert top <= maxtop assert bot >= 0 return z", "first dataset is in column 2 and the second dataset is in column", "standard normal distribution (SND)') z = binary_search_for_z(cl, t, maxv, minv, intervals, v, pdf_snd)", "2.0)) x3 = 1.0 + (float((x ** 2)) / float(dof)) x4 = float((dof", "help='''The files with the run time data. The data must be organized in", "x we can use the exact value of (x-1)!. gamma(1/2) = 1.77245385091 gamma(3/2)" ]
[ "'//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::: あ\", \"expectation\": { \"element\":", "\"text3\", \"link\": \"/test3.html\" }]) def test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2 text2 text3.</p>", "'//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::::: あ\", \"expectation\": { \"element\":", "{ \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for pattern in test_patterns:", "def test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\",", "{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\"", "\"param\": \"\", \"expectation\": [\"\", []] }, { \"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\",", "{ \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s'", "% quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::::: あ\", \"expectation\": { \"element\": \"\"\"", "4567 text3 8901 <a id=\"2\">text2</a> text3 9999 </p> text1 abcd text2 efgh text3", "\"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for pattern in test_patterns: test_result =", "} self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor =", "\"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link':", "class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern in text_patterns: text, elems = self.dotest(pattern[\"param\"])", "expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase):", "</p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp()", "unittest import markdown from markdown.util import etree from mkdocs import config, nav import", "= link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns =", "utf-8 from __future__ import unicode_literals import re import unittest import markdown from markdown.util", "def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\"", "<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>", "\"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link':", "{ \"param\": \":::::: あ\", \"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\":", "'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }]", "expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase):", "def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page =", "}] }, }, { \"param\": \":: !あ, い\", \"expectation\": { \"element\": \"\"\" <h2", "\"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } }, { \"expectation\": \"\"\"<a", "re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def", "}] } }, { \"param\": \":::: あ\", \"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\"", "\"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ', 'link':", "super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\"", "い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ',", "link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals", "{ \"param\": \":: !あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\":", "href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p>", "= \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\"", "[{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::", "text1 0123 text2 4567 text3 8901 <a id=\"2\">text2</a> text3 9999 </p> text1 abcd", "\"/test2.html\" }, { \"text\": \"text3\", \"link\": \"/test3.html\" }]) def test_run(self): elemstr = \"\"\"<p>", "[ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\",", "href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a", "\"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for pattern in test_patterns: test_result", "class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(),", "plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element']) self.assertEqual(plugin.TABLE.all(), test_pattern[\"expectation\"]['db']) if __name__", "<h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }]", "site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\":", "def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns", "\"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def", "dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] =", "class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\",", "\"param\": \":: !あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\":", "config, nav import linkpatcher.plugin as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from", "= pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def", "self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor()", "text2 efgh text3 hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\"", "= plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{ \"param\": \": あ\", \"expectation\":", "from mkdocs import config, nav import linkpatcher.plugin as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor,", "text1 acd text2 text3\", \"expectation\": [ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd", "import etree from mkdocs import config, nav import linkpatcher.plugin as plugin from linkpatcher.extension", "as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import quote except", "'Home': 'index.md' }, { 'testpage': 'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class", "'/index.html#linkpatcher_test', \"text\": \"test\" } }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\":", "etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher']", "quote except ImportError: from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff =", "% quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::: あ\", \"expectation\": { \"element\": \"\"\"", "{ \"param\": \"::: あ\", \"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\":", "'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::::: あ\",", "import unittest import markdown from markdown.util import etree from mkdocs import config, nav", "'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }, { \"param\": \":: !あ, い\",", "nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\",", "\"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to,", "def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text]", "self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def", "in text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda", "= re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text)", "class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem", "<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh", "い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い',", "<h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }]", "self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map", "'//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::: あ\", \"expectation\": { \"element\":", "class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for", "<p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem =", "class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(),", "\"text\": \"text1\", \"link\": \"/test1.html\" }, { \"text\": \"text2\", \"link\": \"/test2.html\" }, { \"text\":", "expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p>", "self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" },", "{ \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\"", "abcd text2 efgh text3 hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p id=\"1\"> <a", "{ \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s'", "\"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern in text_patterns: text, elems", "\":: あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{", "self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join(", "expectation = \"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a>", "\"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }]", "href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern in text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation", "{ \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s'", "self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text)", "self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' }, {", "\"あいうえお\" } }] for pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result,", "title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns =", "import markdown from markdown.util import etree from mkdocs import config, nav import linkpatcher.plugin", "}, { \"param\": \":::::: あ\", \"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\",", "= \"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567", "}] } }, { \"param\": \":::::: あ\", \"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\"", "[{ \"param\": \": あ\", \"expectation\": { \"element\": \": あ\", \"db\": [] } },", "{ \"element\": \": あ\", \"db\": [] } }, { \"param\": \":: あ\", \"expectation\":", "[{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link': '//#linkpatcher_%s'", "} }, { \"param\": \":::: あ\", \"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4>", "text3 hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123", "def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123 text2 4567 text3", "\"link\": \"/test1.html\" }, { \"text\": \"text2\", \"link\": \"/test2.html\" }, { \"text\": \"text3\", \"link\":", "test_patterns = [{ \"param\": \": あ\", \"expectation\": { \"element\": \": あ\", \"db\": []", "}] } }, { \"param\": \":: !あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\"", "\"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } }, { \"param\": \":: あ,", "text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{", "\"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2", "import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor() self.conf", "= self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8'))", "dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns =", "}, }] for test_pattern in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\"", "\"text\": \"test\" } }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\",", "md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor =", "</p> text1 abcd text2 efgh text3 hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p", "self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='', url_context=nav.URLContext(),", "'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp()", "<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class", "LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals =", "\"abcabc acd\", \"expectation\": [\"abcabc acd\", []] }, { \"param\": \"abcabc text1 acd text2", "% quote('あ'.encode('utf-8')) }] }, }, { \"param\": \":: !あ, い\", \"expectation\": { \"element\":", "</p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk", "etree from mkdocs import config, nav import linkpatcher.plugin as plugin from linkpatcher.extension import", "id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } },", "class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def", "'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::: あ\", \"expectation\": {", "\"text2\", \"link\": \"/test2.html\" }, { \"text\": \"text3\", \"link\": \"/test3.html\" }]) def test_run(self): elemstr", "\"expectation\": { \"element\": \": あ\", \"db\": [] } }, { \"param\": \":: あ\",", "\"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' %", "plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{ \"param\": \": あ\", \"expectation\": {", "re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map =", "plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" }, { \"text\":", "quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::: あ\", \"expectation\": { \"element\": \"\"\" <h5", "{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }, { \"param\": \"::", "<h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) },", "\"param\": \":: あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{", "\"param\": \":::::: あ\", \"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{", "expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\":", "site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{ \"param\": \": あ\", \"expectation\": { \"element\": \":", "super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re", "linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import quote except ImportError: from urllib", "self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink,", "\"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": {", "setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" }", "text, elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(),", "self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {}", "[\"abcabc acd\", []] }, { \"param\": \"abcabc text1 acd text2 text3\", \"expectation\": [", "= \"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123 text2 4567 text3 8901 <a id=\"2\">text2</a>", "self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' }, { 'testpage': 'nest/nest.md' }] }) self.site_navigation =", "\"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals", "] }] for pattern in text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"]", "text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e:", "page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{ \"param\": \"\", \"expectation\": [\"\",", "% quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::: あ\", \"expectation\": { \"element\": \"\"\"", "!あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text':", "= plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{ \"param\": \"\",", "\"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={},", "link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{", "text3\", \"expectation\": [ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\"", "}, { \"param\": \"abcabc text1 acd text2 text3\", \"expectation\": [ \"abcabc \", [", "\": あ\", \"db\": [] } }, { \"param\": \":: あ\", \"expectation\": { \"element\":", "\"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8'))", "{ \"param\": \"abcabc text1 acd text2 text3\", \"expectation\": [ \"abcabc \", [ \"\"\"<a", "}, { \"param\": \"::: あ\", \"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\",", "\"text1\", \"link\": \"/test1.html\" }, { \"text\": \"text2\", \"link\": \"/test2.html\" }, { \"text\": \"text3\",", "'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":: !あ\", \"expectation\": {", "elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\",", "\"::: あ\", \"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text':", "test_pattern in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element']) self.assertEqual(plugin.TABLE.all(),", "\"expectation\": [ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a>", "expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation)", "9999 </p> text1 abcd text2 efgh text3 hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\">", "}] } }, { \"param\": \"::: あ\", \"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\"", "{} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self):", "href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation)", "0123 text2 4567 text3 8901 <a id=\"2\">text2</a> text3 9999 </p> text1 abcd text2", "setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" }", "quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":: !あ\", \"expectation\": { \"element\": \"\"\" <h2", "plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{ \"param\": \"\", \"expectation\":", "href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } }, { \"expectation\":", "\"\"\", \"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }] for", "{ \"text\": \"text3\", \"link\": \"/test3.html\" }]) def test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2", "markdown from markdown.util import etree from mkdocs import config, nav import linkpatcher.plugin as", "self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return", "page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" }, { \"text\": \"text2\", \"link\": \"/test2.html\"", "{ 'testpage': 'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self):", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } }, { \"param\": \":: あ, い\", \"expectation\":", "encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class", "elemstr = \"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123 text2 4567 text3 8901 <a", "= [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\":", "}, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\":", "[]] }, { \"param\": \"abcabc text1 acd text2 text3\", \"expectation\": [ \"abcabc \",", "def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\",", "\"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url,", "elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md,", "= re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr =", "LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({", "= {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def", "\"param\": \"::::: あ\", \"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{", "<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a", "plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" }, { \"text\": \"text2\", \"link\":", "= LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation)", "import linkpatcher.plugin as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import", "'//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }, { \"param\": \":: !あ, い\", \"expectation\": {", "[\"\", []] }, { \"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\", []] }, {", "expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp()", "<h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } }, { \"param\": \":: あ, い\",", "\"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern in", "super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re", "'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link': '//#linkpatcher_%s' %", "\"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, {", "<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem)", "LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self):", "class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA)", "from markdown.util import etree from mkdocs import config, nav import linkpatcher.plugin as plugin", "in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element']) self.assertEqual(plugin.TABLE.all(), test_pattern[\"expectation\"]['db'])", "return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{ \"param\": \"\", \"expectation\": [\"\", []] },", "'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":: !あ\", \"expectation\":", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] }", "id=\"2\">text2</a> text3 9999 </p> text1 abcd text2 efgh text3 hijk </p>\"\"\" expectation =", "'index.md' }, { 'testpage': 'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase):", "</p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\"", "md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor", "self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self):", "{ \"param\": \":::: あ\", \"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\":", "self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element']) self.assertEqual(plugin.TABLE.all(), test_pattern[\"expectation\"]['db']) if __name__ == '__main__': unittest.main()", "page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\",", "pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp()", "\"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals =", "setUp(self): self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{", "test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123 text2 4567 text3 8901", "TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\":", "\"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\", []] }, { \"param\": \"abcabc text1 acd", "\"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\":", "id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } }, { \"param\": \":: あ, い\", \"expectation\": {", "except ImportError: from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None", "\"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor", "abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr)", "in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual(", "encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\",", "= None self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md'", "plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import quote except ImportError:", "LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' }, { 'testpage': 'nest/nest.md'", "} }, { \"param\": \":: !あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2>", "} }, { \"param\": \":: あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\"", "text2 text3\", \"expectation\": [ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a", "id=\"1\"> text1 0123 text2 4567 text3 8901 <a id=\"2\">text2</a> text3 9999 </p> text1", "\"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } }, {", "nav import linkpatcher.plugin as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse", "} }, { \"param\": \"::::: あ\", \"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5>", "\"::::: あ\", \"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text':", "elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\")", "LinkPathcerInlineProcessor) try: from urllib.parse import quote except ImportError: from urllib import quote class", "TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\":", "href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem =", "\"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ', 'link':", "<a id=\"2\">text2</a> text3 9999 </p> text1 abcd text2 efgh text3 hijk </p>\"\"\" expectation", "\"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(", "self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\">", "}] for test_pattern in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" %", "<p id=\"1\"> text1 0123 text2 4567 text3 8901 <a id=\"2\">text2</a> text3 9999 </p>", "hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a", "{ \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s'", "= plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p id=\"1\"> text1", "{ \"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\", []] }, { \"param\": \"abcabc text1", "text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual(", "<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase):", "'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::: あ\",", "import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import quote except ImportError: from urllib import", "self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr", "id=\"0\"> <p id=\"1\"> text1 0123 text2 4567 text3 8901 <a id=\"2\">text2</a> text3 9999", "href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for pattern", "あ\", \"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ',", "from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None self.tree_processor =", "self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='',", "[{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::::", "setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" }", "= LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' }, { 'testpage':", "4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a", "\"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(", "efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class", "[{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\"", "\"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals", "{ \"param\": \":: !あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\",", "plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123", "\"\"\", \"db\": [] } }, { \"param\": \":: あ, い\", \"expectation\": { \"element\":", "nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns", "coding: utf-8 from __future__ import unicode_literals import re import unittest import markdown from", "\"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8'))", "\"link\": \"/test2.html\" }, { \"text\": \"text3\", \"link\": \"/test3.html\" }]) def test_run(self): elemstr =", "page = nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def", "\"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' %", "'//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::: あ\", \"expectation\": { \"element\":", "'//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":: !あ\", \"expectation\": { \"element\":", "{ \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } }, { \"param\":", "= etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals):", "'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }, { \"param\": \":: !あ, い\", \"expectation\":", "[] } }, { \"param\": \":: あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\"", "}, { \"param\": \"::::: あ\", \"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\",", "'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }, { \"param\": \":: !あ,", "id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }]", "plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" }, { \"text\": \"text2\", \"link\": \"/test2.html\" }, {", "LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return", "linkpatcher.plugin as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import quote", "md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor(", "\"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s' %", "\"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={},", "{ \"text\": \"text2\", \"link\": \"/test2.html\" }, { \"text\": \"text3\", \"link\": \"/test3.html\" }]) def", "'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }] for test_pattern in test_patterns:", "self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(", "self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = {", "'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }] for test_pattern in test_patterns: plugin.TABLE.purge()", "= markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page,", "self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\":", "<h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }]", "= self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element']) self.assertEqual(plugin.TABLE.all(), test_pattern[\"expectation\"]['db']) if __name__ == '__main__':", "\"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ', 'link':", "'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::: あ\",", "= nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\":", "site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" }, { \"text\": \"text2\", \"link\": \"/test2.html\" },", "\"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem", "あ\", \"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ',", "}] } }, { \"param\": \"::::: あ\", \"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\"", "[{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::", "pattern in text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual(", "id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text':", "\"link\": \"/test3.html\" }]) def test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\"", "super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re", "path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{", "\"text\": \"text3\", \"link\": \"/test3.html\" }]) def test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2 text2", "\"expectation\": [\"\", []] }, { \"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\", []] },", "\"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation)", "<a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\"", "\"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' %", "test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test',", "\"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8'))", "[{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }] for test_pattern in", "href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd", "class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()])", "\"abcabc text1 acd text2 text3\", \"expectation\": [ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a>", "\"text\": \"あいうえお\" } }] for pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual(", "{ \"param\": \":: あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\":", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, {", "}] for pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation'])", "hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun,", "} }, { \"param\": \":::::: あ\", \"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6>", "\"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for pattern in", "plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{ \"param\": \": あ\",", "text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals =", "LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map =", "LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md", "urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor()", "test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element']) self.assertEqual(plugin.TABLE.all(), test_pattern[\"expectation\"]['db']) if", "self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class", "{ \"param\": \"::::: あ\", \"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\":", "}, { \"text\": \"text2\", \"link\": \"/test2.html\" }, { \"text\": \"text3\", \"link\": \"/test3.html\" }])", "super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='',", "{ \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s'", "\"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い', 'link':", "TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\":", "href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem)", "}, { \"param\": \":: !あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\",", "page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{ \"param\": \": あ\", \"expectation\": { \"element\":", "\"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern", "\"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8'))", "import re import unittest import markdown from markdown.util import etree from mkdocs import", "\"text\": \"text2\", \"link\": \"/test2.html\" }, { \"text\": \"text3\", \"link\": \"/test3.html\" }]) def test_run(self):", "= LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern)", "= self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1])", "quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor() self.conf =", "\"param\": \":: !あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": []", "None self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' },", "unicode_literals import re import unittest import markdown from markdown.util import etree from mkdocs", "pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError:", "test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result,", "[{ \"param\": \"\", \"expectation\": [\"\", []] }, { \"param\": \"abcabc acd\", \"expectation\": [\"abcabc", "}, { \"param\": \":: あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\",", "class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a>", "\"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern in text_patterns: text, elems =", "self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(", "acd\", []] }, { \"param\": \"abcabc text1 acd text2 text3\", \"expectation\": [ \"abcabc", "'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::: あ\",", "for pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except", "\"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text):", "class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\",", "\", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\"", "!あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } },", "for pattern in text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0])", "import unicode_literals import re import unittest import markdown from markdown.util import etree from", "etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\":", "}] for pattern in text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text,", "}, { \"param\": \":::: あ\", \"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\",", "test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'),", "acd text2 text3\", \"expectation\": [ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\",", "extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp()", "\"param\": \":::: あ\", \"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{", "\":: !あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] }", "config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' }, { 'testpage': 'nest/nest.md' }] }) self.site_navigation", "plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{ \"param\":", "= re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map", "config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{ \"param\": \":", "\"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ', 'link':", "'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8'))", "}, { \"param\": \":: !あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2>", "self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] =", "% quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::: あ\", \"expectation\": { \"element\": \"\"\"", "self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p", "test_handleMatch(self): test_patterns = [{ \"param\": \": あ\", \"expectation\": { \"element\": \": あ\", \"db\":", "\"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self,", "markdown.util import etree from mkdocs import config, nav import linkpatcher.plugin as plugin from", "\": あ\", \"expectation\": { \"element\": \": あ\", \"db\": [] } }, { \"param\":", "\"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8'))", "% quote('あ'.encode('utf-8')) }] }, }] for test_pattern in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"])", "[{ 'Home': 'index.md' }, { 'testpage': 'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md')", "{ \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def", "text3 9999 </p> text1 abcd text2 efgh text3 hijk </p>\"\"\" expectation = \"\"\"<p", "\"db\": [] } }, { \"param\": \":: あ, い\", \"expectation\": { \"element\": \"\"\"", "\"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text': 'い',", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] }", "\"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"])", "= config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' }, { 'testpage': 'nest/nest.md' }] })", "site_navigation=self.site_navigation) return self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{ \"param\": \"\", \"expectation\": [\"\", []]", "}, { \"param\": \":: あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2>", "\"param\": \"abcabc text1 acd text2 text3\", \"expectation\": [ \"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\"", "id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } },", "'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":: !あ\",", "def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch,", "self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={},", "class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\",", "try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase):", "test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\":", "encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText,", "] ] }] for pattern in text_patterns: text, elems = self.dotest(pattern[\"param\"]) expectation =", "} self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self):", "'//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }] for test_pattern in test_patterns: plugin.TABLE.purge() result =", "def test_newelem_from_text(self): text_patterns = [{ \"param\": \"\", \"expectation\": [\"\", []] }, { \"param\":", "\"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text):", "<p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a", "\"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }] for test_pattern", "'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::: あ\", \"expectation\": {", "'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::: あ\", \"expectation\": {", "\"expectation\": [\"abcabc acd\", []] }, { \"param\": \"abcabc text1 acd text2 text3\", \"expectation\":", "import config, nav import linkpatcher.plugin as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try:", "site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123 text2 4567", "'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::::: あ\", \"expectation\": {", "<h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }]", "あ\", \"db\": [] } }, { \"param\": \":: あ\", \"expectation\": { \"element\": \"\"\"", "TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page", "あ\", \"expectation\": { \"element\": \": あ\", \"db\": [] } }, { \"param\": \"::", "\"\", \"expectation\": [\"\", []] }, { \"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\", []]", "re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p", "} }, { \"param\": \":: あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2>", "class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999", "href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\"", "= \"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a>", "from __future__ import unicode_literals import re import unittest import markdown from markdown.util import", "= [{ \"param\": \": あ\", \"expectation\": { \"element\": \": あ\", \"db\": [] }", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] }", "text_patterns = [{ \"param\": \"\", \"expectation\": [\"\", []] }, { \"param\": \"abcabc acd\",", "self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals(", "self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={},", "[] } }, { \"param\": \":: あ, い\", \"expectation\": { \"element\": \"\"\" <h2", "<p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>", "def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\"", "class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ]", "urllib.parse import quote except ImportError: from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self):", "= nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self):", "}) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map =", "\"param\": \": あ\", \"expectation\": { \"element\": \": あ\", \"db\": [] } }, {", "list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map =", "\"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' %", "__future__ import unicode_literals import re import unittest import markdown from markdown.util import etree", "id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } },", "self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home':", "LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals", "def test_handleMatch(self): test_patterns = [{ \"param\": \": あ\", \"expectation\": { \"element\": \": あ\",", "\"db\": [] } }, { \"param\": \":: あ\", \"expectation\": { \"element\": \"\"\" <h2", "'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::: あ\", \"expectation\":", "[ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\"", "pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self):", "'testpage': 'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor,", "plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p id=\"1\">", "url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation) def test_handleMatch(self): test_patterns = [{ \"param\":", "self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class", "<h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }]", "from urllib.parse import quote except ImportError: from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def", "}] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map", "id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\"", "markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf) plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page=page, site_navigation=self.site_navigation)", "あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ',", "quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::::: あ\", \"expectation\": { \"element\": \"\"\" <h6", "= plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" }, { \"text\": \"text2\",", "class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{", "import quote except ImportError: from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] },", "8901 <a id=\"2\">text2</a> text3 9999 </p> text1 abcd text2 efgh text3 hijk </p>\"\"\"", "return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": {", "'/index.html#linkpatcher_test', \"text\": \"あいうえお\" } }] for pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try:", "}, { 'testpage': 'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def", "def test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p>", "self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\": [{ 'Home': 'index.md' }, { 'testpage': 'nest/nest.md' }]", "self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\":", "etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern)", "あ\", \"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ',", "} }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test',", "text1 abcd text2 efgh text3 hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p id=\"1\">", "% quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, },", "{ \"param\": \":: あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\",", "= LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page( title='', path='', url_context=nav.URLContext(), config=self.conf)", "text2 4567 text3 8901 <a id=\"2\">text2</a> text3 9999 </p> text1 abcd text2 efgh", "} }] for pattern in test_patterns: test_result = self.dotest(**pattern[\"params\"]) try: self.assertEqual( etree.tostring(test_result, encoding='unicode'),", "self.tree_processor.db_value_map[text] = link_to plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns", "id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } },", "etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\":", "\"param\": \":: あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\":", "'//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] },", "def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\"", "class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr)", "\"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' %", "= plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a", "'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }] for test_pattern in test_patterns: plugin.TABLE.purge() result", "\"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a", "# coding: utf-8 from __future__ import unicode_literals import re import unittest import markdown", "= etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals", "class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a", "plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{ \"expectation\": \"\"\"<a class=\"linkpatcher_link\"", "self.tree_processor.newelem_from_text(text) def test_newelem_from_text(self): text_patterns = [{ \"param\": \"\", \"expectation\": [\"\", []] }, {", "[{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::::", "class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a>", "setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\":", "text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a>", "\":: あ\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text':", "} }, { \"param\": \"::: あ\", \"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3>", "}, { \"text\": \"text3\", \"link\": \"/test3.html\" }]) def test_run(self): elemstr = \"\"\"<p> <p>text1", "LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md =", "\"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8'))", "</p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\"", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] }", "def setUp(self): self.maxDiff = None self.tree_processor = LinkPatcherTreeProcessor() self.conf = config.Config(schema=config.DEFAULT_SCHEMA) self.conf.load_dict({ \"pages\":", "except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map", "8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a", "class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension):", "class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] }", "}, { \"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\", []] }, { \"param\": \"abcabc", "\"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"あいうえお\" }", "quote('あ'.encode('utf-8')) }] }, }] for test_pattern in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual(", "etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self): super(TestRun, self).setUp() self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals =", "class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern in text_patterns:", "\":::::: あ\", \"expectation\": { \"element\": \"\"\" <h6 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h6> \"\"\", \"db\": [{ 'text':", "self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) plugin.TABLE.insert_multiple([{ \"text\": \"text1\", \"link\": \"/test1.html\" }, {", "href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def", "id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } },", "(LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import quote except ImportError: from urllib import quote", "あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{ 'text':", "} self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, text): plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation)", "\"/test1.html\" }, { \"text\": \"text2\", \"link\": \"/test2.html\" }, { \"text\": \"text3\", \"link\": \"/test3.html\"", "text2text2 text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\"", "\"abcabc \", [ \"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a", "= { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys()))", "}]) def test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation =", "mkdocs import config, nav import linkpatcher.plugin as plugin from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor)", "\"param\": \"::: あ\", \"expectation\": { \"element\": \"\"\" <h3 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h3> \"\"\", \"db\": [{", "self.site_navigation = nav.SiteNavigation(self.conf) self.site_navigation.file_context.set_current_path('nest/nest.md') class TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = {", "re import unittest import markdown from markdown.util import etree from mkdocs import config,", "href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation)", "quote(\"あ\".encode('utf-8')) }, { 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }, {", "9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>", "'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::::: あ\", \"expectation\":", "quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":::: あ\", \"expectation\": { \"element\": \"\"\" <h4", "TestMakeAnchor(LinkPatcherExtensionTestBase): def setUp(self): super(TestMakeAnchor, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\":", "efgh text3 hijk </p>\"\"\" expectation = \"\"\"<p id=\"0\"> <p id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a>", "text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> text2text2 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a", "<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>.</p> </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem,", "\"test\" } }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\": { \"current_url\": \"/index.html\", \"link_to\":", "\":::: あ\", \"expectation\": { \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text':", "\"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [] } }, { \"param\": \"::", "</p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.run(elem) self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self,", "from linkpatcher.extension import (LinkPatcherTreeProcessor, LinkPathcerInlineProcessor) try: from urllib.parse import quote except ImportError: from", "[]] }, { \"param\": \"abcabc acd\", \"expectation\": [\"abcabc acd\", []] }, { \"param\":", "class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } }, {", "0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\"", "[{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::", "href=\"./test3.html\">text3</a> hijk </p>\"\"\" elem = etree.fromstring(elemstr) self.tree_processor.insert_patchedlink(elem) self.assertEqual(etree.tostring(elem).decode(), expectation) class TestRun(LinkPatcherExtensionTestBase): def setUp(self):", "\"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } },", "page={}, site_navigation=self.site_navigation) def test_insert_patchedlink(self): elemstr = \"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123 text2", "quote('あ'.encode('utf-8')) }] }, }, { \"param\": \":: !あ, い\", \"expectation\": { \"element\": \"\"\"", "{ \"element\": \"\"\" <h4 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h4> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s'", "current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url) self.tree_processor = LinkPatcherTreeProcessor() self.tree_processor.db_value_map = {} self.tree_processor.db_value_map[text] = link_to", "% quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \":: !あ\", \"expectation\": { \"element\": \"\"\"", "\"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) def dotest(self, current_url, link_to, text): self.site_navigation.url_context.set_current_url(current_url)", "acd\", \"expectation\": [\"abcabc acd\", []] }, { \"param\": \"abcabc text1 acd text2 text3\",", "href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for pattern in text_patterns: text,", "id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a>", "e: etree.tostring(e).decode(), elems)), expectation[1]) class TestInsertPatchedLink(LinkPatcherExtensionTestBase): def setUp(self): super(TestInsertPatchedLink, self).setUp() self.tree_processor.db_value_map = {", "{ \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\")", "\"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\", \"params\":", "pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self): super(TestNewElemFromText, self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\":", "'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::::: あ\", \"expectation\":", "result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element']) self.assertEqual(plugin.TABLE.all(), test_pattern[\"expectation\"]['db']) if __name__ ==", "plugin.linkpatcher_plugin_globals = plugin.LinkPatcherGlobals( page={}, site_navigation=self.site_navigation) return self.tree_processor.make_anchor(text) def test_make_anchor(self): test_patterns = [{ \"expectation\":", "ImportError: from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase): def setUp(self): self.maxDiff = None self.tree_processor", "\"/test3.html\" }]) def test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation", "elemstr = \"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a class=\"linkpatcher_link\"", "<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901 <a id=\"2\">text2</a> <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 9999 </p> <a class=\"linkpatcher_link\"", "\"element\": \": あ\", \"db\": [] } }, { \"param\": \":: あ\", \"expectation\": {", "self.assertEqual( etree.tostring(elem, encoding='utf-8').decode(), expectation) class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor(", "= [{ \"param\": \"\", \"expectation\": [\"\", []] }, { \"param\": \"abcabc acd\", \"expectation\":", "\"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"../../index.html#linkpatcher_test\">test</a>\"\"\", \"params\": { \"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" }", "test_run(self): elemstr = \"\"\"<p> <p>text1 text2text2 text2 text3.</p> </p>\"\"\" expectation = \"\"\"<p> <p><a", "class LinkPatcherExtensionTest(markdown.Extension): def extendMarkdown(self, md, md_globals): md.inlinePatterns['linkpatcher'] = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) class TestHandleMatch(LinkPatcherExtensionTestBase): def", "try: from urllib.parse import quote except ImportError: from urllib import quote class LinkPatcherExtensionTestBase(unittest.TestCase):", "quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::: あ\", \"expectation\": { \"element\": \"\"\" <h3", "\":: !あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h2> \"\"\", \"db\": [{", "\"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re = re.compile(\"|\".join( self.tree_processor.db_value_map.keys())) self.site_navigation.url_context.set_current_url(\"/index.html\") plugin.linkpatcher_plugin_globals =", "'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }] } }, { \"param\": \"::: あ\", \"expectation\":", "<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> abcd <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> efgh <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> hijk </p>\"\"\"", "setUp(self): super(TestHandleMatch, self).setUp() self.inline_processor = LinkPathcerInlineProcessor( LinkPathcerInlineProcessor.pattern) self.md = markdown.Markdown(extensions=[LinkPatcherExtensionTest()]) page = nav.Page(", "<h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ', 'link': '//#linkpatcher_%s' % quote(\"あ\".encode('utf-8')) }]", "href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }]", "\"current_url\": \"/nest/nest/index.html\", \"link_to\": '/index.html#linkpatcher_test', \"text\": \"test\" } }, { \"expectation\": \"\"\"<a class=\"linkpatcher_link\" href=\"./index.html#linkpatcher_test\">あいうえお</a>\"\"\",", "for test_pattern in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result, \"\"\"<p>%s</p>\"\"\" % test_pattern[\"expectation\"]['element'])", "self).setUp() self.tree_processor.db_value_map = { \"text1\": \"/test1.html\", \"text2\": \"/test2.html\", \"text3\": \"/test3.html\" } self.tree_processor.db_keys_re =", "elems = self.dotest(pattern[\"param\"]) expectation = pattern[\"expectation\"] self.assertEqual(text, expectation[0]) self.assertEqual( list(map(lambda e: etree.tostring(e).decode(), elems)),", "あ\", \"expectation\": { \"element\": \"\"\" <h5 class=\"linkpatcher\" id=\"linkpatcher_%E3%81%82\">あ</h5> \"\"\", \"db\": [{ 'text': 'あ',", "id=\"1\"> <a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> 0123 <a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> 4567 <a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a> 8901", "test_newelem_from_text(self): text_patterns = [{ \"param\": \"\", \"expectation\": [\"\", []] }, { \"param\": \"abcabc", "acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ] ] }] for", "etree.tostring(test_result, encoding='unicode'), pattern['expectation']) except LookupError: self.assertEqual( etree.tostring(test_result, encoding='utf-8'), pattern['expectation'].encode('utf-8')) class TestNewElemFromText(LinkPatcherExtensionTestBase): def setUp(self):", "}, { 'text': 'い', 'link': '//#linkpatcher_%s' % quote('あ'.encode('utf-8')) }] }, }, { \"param\":", "\"\"\"<a class=\"linkpatcher_link\" href=\"./test1.html\">text1</a> acd \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test2.html\">text2</a> \"\"\", \"\"\"<a class=\"linkpatcher_link\" href=\"./test3.html\">text3</a>\"\"\" ]", "text3 8901 <a id=\"2\">text2</a> text3 9999 </p> text1 abcd text2 efgh text3 hijk", "\"\"\"<p id=\"0\"> <p id=\"1\"> text1 0123 text2 4567 text3 8901 <a id=\"2\">text2</a> text3", "}, }, { \"param\": \":: !あ, い\", \"expectation\": { \"element\": \"\"\" <h2 class=\"linkpatcher\"", "}] }, }] for test_pattern in test_patterns: plugin.TABLE.purge() result = self.md.convert(test_pattern[\"param\"]) self.assertEqual( result,", "\"pages\": [{ 'Home': 'index.md' }, { 'testpage': 'nest/nest.md' }] }) self.site_navigation = nav.SiteNavigation(self.conf)", "<filename>test/test_extension.py # coding: utf-8 from __future__ import unicode_literals import re import unittest import" ]
[ "your name? ') # ask for their name myName = input() print(f'It is", "name print('Hello, world!') print('What is your name? ') # ask for their name", "print(f'It is good to meet you, {myName}!') print(f'The length of your name is:", "This program says hello and asks for my name print('Hello, world!') print('What is", "age?') #ask for their age myAge = input() print('You will be ' +", "name is: {len(myName)}') print('What is your age?') #ask for their age myAge =", "and asks for my name print('Hello, world!') print('What is your name? ') #", "their name myName = input() print(f'It is good to meet you, {myName}!') print(f'The", "for their name myName = input() print(f'It is good to meet you, {myName}!')", "you, {myName}!') print(f'The length of your name is: {len(myName)}') print('What is your age?')", "ask for their name myName = input() print(f'It is good to meet you,", "world!') print('What is your name? ') # ask for their name myName =", "for their age myAge = input() print('You will be ' + str(int(myAge) +", "my name print('Hello, world!') print('What is your name? ') # ask for their", "asks for my name print('Hello, world!') print('What is your name? ') # ask", "good to meet you, {myName}!') print(f'The length of your name is: {len(myName)}') print('What", "= input() print(f'It is good to meet you, {myName}!') print(f'The length of your", "myName = input() print(f'It is good to meet you, {myName}!') print(f'The length of", "{len(myName)}') print('What is your age?') #ask for their age myAge = input() print('You", "= input() print('You will be ' + str(int(myAge) + 1) + ' in", "# ask for their name myName = input() print(f'It is good to meet", "print('You will be ' + str(int(myAge) + 1) + ' in a year.')", "age myAge = input() print('You will be ' + str(int(myAge) + 1) +", "name? ') # ask for their name myName = input() print(f'It is good", "input() print(f'It is good to meet you, {myName}!') print(f'The length of your name", "says hello and asks for my name print('Hello, world!') print('What is your name?", "your name is: {len(myName)}') print('What is your age?') #ask for their age myAge", "{myName}!') print(f'The length of your name is: {len(myName)}') print('What is your age?') #ask", "is: {len(myName)}') print('What is your age?') #ask for their age myAge = input()", "myAge = input() print('You will be ' + str(int(myAge) + 1) + '", "is good to meet you, {myName}!') print(f'The length of your name is: {len(myName)}')", "is your age?') #ask for their age myAge = input() print('You will be", "input() print('You will be ' + str(int(myAge) + 1) + ' in a", "# This program says hello and asks for my name print('Hello, world!') print('What", "name myName = input() print(f'It is good to meet you, {myName}!') print(f'The length", "program says hello and asks for my name print('Hello, world!') print('What is your", "hello and asks for my name print('Hello, world!') print('What is your name? ')", "for my name print('Hello, world!') print('What is your name? ') # ask for", "print(f'The length of your name is: {len(myName)}') print('What is your age?') #ask for", "their age myAge = input() print('You will be ' + str(int(myAge) + 1)", "print('Hello, world!') print('What is your name? ') # ask for their name myName", "length of your name is: {len(myName)}') print('What is your age?') #ask for their", "meet you, {myName}!') print(f'The length of your name is: {len(myName)}') print('What is your", "of your name is: {len(myName)}') print('What is your age?') #ask for their age", "to meet you, {myName}!') print(f'The length of your name is: {len(myName)}') print('What is", "is your name? ') # ask for their name myName = input() print(f'It", "') # ask for their name myName = input() print(f'It is good to", "print('What is your age?') #ask for their age myAge = input() print('You will", "your age?') #ask for their age myAge = input() print('You will be '", "print('What is your name? ') # ask for their name myName = input()", "#ask for their age myAge = input() print('You will be ' + str(int(myAge)" ]
[ "'Zaloguj się | Administracja stroną Django' in self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot',", "self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot:", "take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self,", "os import time import pytest @pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert", "test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja stroną Django' in self.driver.title def", "stroną Django' in self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name))", "TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja stroną Django' in", "name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server):", "się | Administracja stroną Django' in self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)),", "pytest @pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja", "| Administracja stroną Django' in self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True)", "Administracja stroną Django' in self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot',", "import os import time import pytest @pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/')", "def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def", "live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja stroną Django' in self.driver.title def take_screenshot(driver,", "<reponame>LukaszHoszowski/Django_ProEstate<filename>User/tests/test_selenium.py import os import time import pytest @pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server):", "assert 'Zaloguj się | Administracja stroną Django' in self.driver.title def take_screenshot(driver, name): time.sleep(1)", "Django' in self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init')", "def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj się | Administracja stroną Django'", "class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja stroną Django'", "in self.driver.title def take_screenshot(driver, name): time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class", "driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj", "exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert", "name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj się", "Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj się | Administracja stroną", "live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj się | Administracja stroną Django' in self.driver.title", "screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj się | Administracja stroną Django' in", "@pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj się |", "time import pytest @pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się", "class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png') assert 'Zaloguj się | Administracja", "def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja stroną Django' in self.driver.title", "self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja stroną Django' in self.driver.title def take_screenshot(driver, name):", "import time import pytest @pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj", "@pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się | Administracja stroną", "time.sleep(1) os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/')", "os.makedirs(os.path.join('screenshot', os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver,", "import pytest @pytest.mark.usefixtures('driver_init') class TestUrlChrome: def test_open_url(self, live_server): self.driver.get(f'{live_server.url}/admin/') assert 'Zaloguj się |", "os.path.dirname(name)), exist_ok=True) driver.save_screenshot(os.path.join('screenshot', name)) @pytest.mark.usefixtures('driver_init') class Screenshot: def screenshot_admin(self, live_server): self.driver.get(f'{live_server.url}/admin/') take_screenshot(self.driver, f'admin/admin_{self.browser}.png')" ]
[ "\"# Trace component used in the implementations but not defined in fttrace.h.\" cmpnt", "in SRC_FILE_DIRS: for (p, dlst, flst) in os.walk(d): for f in flst: if", "KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse", "src_line in open(src_pathname, 'r'): line_num = line_num + 1 src_line = src_line.strip() if", "is defined twice, see %s and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else:", "--src-dirs=dir1:dir2:...\" print \" Specify the directories of C source files to be checked\"", "c in cmpnt: if c not in USED_COMPONENT: if c != \"any\": print", "[option]\" % sys.argv[0] print \"Search used-but-defined and defined-but-not-used trace_XXX macros\" print \"\" print", "the directories of C source files to be checked\" print \" Default is", "= \"%s:%d\" % \\ (os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare the used and", "1 src_line = src_line.strip() if trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('', src_line) if", "line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name", "header file(s) defining trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls =", "\" --def-files=file1:file2:...\" print \" Specify the header files including FT_TRACE_DEF()\" print \" Default", "the public domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT", "using trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[", "print \" Specify the directories of C source files to be checked\" print", "(component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f), line_num) # --------------------------------------------------------------", "python # # Check trace components in FreeType 2 source. # Author: <NAME>,", "sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- # Scan C source and", "see %s and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\"", "%s [option]\" % sys.argv[0] print \"Search used-but-defined and defined-but-not-used trace_XXX macros\" print \"\"", "trace components in FreeType 2 source. # Author: <NAME>, 2009, 2013 # #", "\"trace component %s is defined twice, see %s and fttrace.h:%d\" % \\ (component_name,", "line_num = 0 for hdr_line in open(f, 'r'): line_num = line_num + 1", "in the implementations but not defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for", "component_name = trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else:", "'r'): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match(src_line) != None:", "\"Usage: %s [option]\" % sys.argv[0] print \"Search used-but-defined and defined-but-not-used trace_XXX macros\" print", "be checked\" print \" Default is %s\" % \":\".join(SRC_FILE_DIRS) print \"\" print \"", "\\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES: line_num = 0", "for c in cmpnt: if c not in USED_COMPONENT: if c != \"any\":", "{} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command line", "# trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f in", "line options # for i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s", "USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print \"Trace", "!= \"any\": print \"Trace component %s (defined in %s) is not used.\" %", "file(s) defining trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[", "f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open(f, 'r'): line_num =", "elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- # Scan C source", "= os.path.join(p, f) line_num = 0 for src_line in open(src_pathname, 'r'): line_num =", "\\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f), line_num) #", "exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\",", "trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if component_name", "Trace component used in the implementations but not defined in fttrace.h.\" cmpnt =", "{} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- #", "\" --src-dirs=dir1:dir2:...\" print \" Specify the directories of C source files to be", "# Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*')", "c in cmpnt: if c not in KNOWN_COMPONENT: print \"Trace component %s (used", "in open(src_pathname, 'r'): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match(src_line)", "\"Search used-but-defined and defined-but-not-used trace_XXX macros\" print \"\" print \" --help:\" print \"", "Scan C source and header files using trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$',", "\" Default is %s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS =", "<NAME>, 2009, 2013 # # This code is explicitly into the public domain.", "is not defined.\" % (c, \", \".join(USED_COMPONENT[c])) print \"# Trace component is defined", "and defined trace macros. # print \"# Trace component used in the implementations", "checked\" print \" Default is %s\" % \":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\"", "\\ (os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare the used and defined trace macros.", "line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match(src_line) != None: component_name", "help\" print \"\" print \" --src-dirs=dir1:dir2:...\" print \" Specify the directories of C", "source files to be checked\" print \" Default is %s\" % \":\".join(SRC_FILE_DIRS) print", "FreeType 2 source. # Author: <NAME>, 2009, 2013 # # This code is", "-------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([", "[] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"]", "trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for", "defined trace macros. # print \"# Trace component used in the implementations but", "line_num + 1 src_line = src_line.strip() if trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('',", "len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" % sys.argv[0] print \"Search used-but-defined and", "% \\ (os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare the used and defined trace", "None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT:", "%s\" % \":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\" print \" Specify the header", "trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES:", "for hdr_line in open(f, 'r'): line_num = line_num + 1 hdr_line = hdr_line.strip()", "= hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('',", "re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS: for", "= [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES =", "KNOWN_COMPONENT: print \"trace component %s is defined twice, see %s and fttrace.h:%d\" %", "and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\", "range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" % sys.argv[0] print \"Search used-but-defined", "cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if c !=", "component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname,", "%s and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" %", "Author: <NAME>, 2009, 2013 # # This code is explicitly into the public", "Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls", "is explicitly into the public domain. import sys import os import re SRC_FILE_LIST", "defined-but-not-used trace_XXX macros\" print \"\" print \" --help:\" print \" Show this help\"", "print \"Usage: %s [option]\" % sys.argv[0] print \"Search used-but-defined and defined-but-not-used trace_XXX macros\"", "(src_pathname, line_num)] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn", "C source files to be checked\" print \" Default is %s\" % \":\".join(SRC_FILE_DIRS)", "in TRACE_DEF_FILES: line_num = 0 for hdr_line in open(f, 'r'): line_num = line_num", "import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT", "in os.walk(d): for f in flst: if c_pathname_pat.match(f) != None: src_pathname = os.path.join(p,", "open(src_pathname, 'r'): line_num = line_num + 1 src_line = src_line.strip() if trace_use_pat.match(src_line) !=", "= line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name =", "2013 # # This code is explicitly into the public domain. import sys", "line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)] # -------------------------------------------------------------- # Scan header", "line_num) # -------------------------------------------------------------- # Compare the used and defined trace macros. # print", "if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" % sys.argv[0] print \"Search used-but-defined and defined-but-not-used", "and header files using trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat =", "1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name", "# print \"# Trace component used in the implementations but not defined in", "to be checked\" print \" Default is %s\" % \":\".join(SRC_FILE_DIRS) print \"\" print", "1).split(\":\") # -------------------------------------------------------------- # Scan C source and header files using trace macros.", "\".join(USED_COMPONENT[c])) print \"# Trace component is defined but not used in the implementations.\"", "for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open(f, 'r'): line_num", "# for i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" %", "files using trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[", "in cmpnt: if c not in USED_COMPONENT: if c != \"any\": print \"Trace", "of C source files to be checked\" print \" Default is %s\" %", "for i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" % sys.argv[0]", "the implementations but not defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c", "FT_TRACE_DEF()\" print \" Default is %s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"):", "\", \".join(USED_COMPONENT[c])) print \"# Trace component is defined but not used in the", "line_num)] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn =", "flst) in os.walk(d): for f in flst: if c_pathname_pat.match(f) != None: src_pathname =", "= 0 for hdr_line in open(f, 'r'): line_num = line_num + 1 hdr_line", "\\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS: for (p, dlst, flst) in os.walk(d): for", "cmpnt: if c not in USED_COMPONENT: if c != \"any\": print \"Trace component", "This code is explicitly into the public domain. import sys import os import", "component is defined but not used in the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort()", "in the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c", "USED_COMPONENT: if c != \"any\": print \"Trace component %s (defined in %s) is", "SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") #", "in open(f, 'r'): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line)", "is %s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\",", "source and header files using trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat", "cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT:", "os.walk(d): for f in flst: if c_pathname_pat.match(f) != None: src_pathname = os.path.join(p, f)", "KNOWN_COMPONENT: print \"Trace component %s (used in %s) is not defined.\" % (c,", "in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not", "\"\", 1).split(\":\") # -------------------------------------------------------------- # Scan C source and header files using trace", "the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not", "\"\" print \" --help:\" print \" Show this help\" print \"\" print \"", "# # Check trace components in FreeType 2 source. # Author: <NAME>, 2009,", "sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT =", "this help\" print \"\" print \" --src-dirs=dir1:dir2:...\" print \" Specify the directories of", "# Parse command line options # for i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"):", "= re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES: line_num", "in KNOWN_COMPONENT: print \"trace component %s is defined twice, see %s and fttrace.h:%d\"", "component %s (used in %s) is not defined.\" % (c, \", \".join(USED_COMPONENT[c])) print", "src_line.strip() if trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT:", "defined twice, see %s and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name]", "!= None: component_name = trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname,", "for c in cmpnt: if c not in KNOWN_COMPONENT: print \"Trace component %s", "if trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\"", "print \" Show this help\" print \"\" print \" --src-dirs=dir1:dir2:...\" print \" Specify", "in KNOWN_COMPONENT: print \"Trace component %s (used in %s) is not defined.\" %", "\\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS: for (p, dlst, flst) in os.walk(d):", "USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)] #", "Compare the used and defined trace macros. # print \"# Trace component used", "files to be checked\" print \" Default is %s\" % \":\".join(SRC_FILE_DIRS) print \"\"", "not defined.\" % (c, \", \".join(USED_COMPONENT[c])) print \"# Trace component is defined but", "# Scan C source and header files using trace macros. # c_pathname_pat =", "# -------------------------------------------------------------- # Parse command line options # for i in range(1, len(sys.argv)):", "% (src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)] # -------------------------------------------------------------- #", "c not in KNOWN_COMPONENT: print \"Trace component %s (used in %s) is not", "component_name) if component_name in KNOWN_COMPONENT: print \"trace component %s is defined twice, see", "for d in SRC_FILE_DIRS: for (p, dlst, flst) in os.walk(d): for f in", "re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS: for (p, dlst, flst) in", "\\t]+trace_') for d in SRC_FILE_DIRS: for (p, dlst, flst) in os.walk(d): for f", "# This code is explicitly into the public domain. import sys import os", "%s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\")", "\" Show this help\" print \"\" print \" --src-dirs=dir1:dir2:...\" print \" Specify the", "1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- # Scan C", "%s is defined twice, see %s and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num)", "print \"\" print \" --help:\" print \" Show this help\" print \"\" print", "including FT_TRACE_DEF()\" print \" Default is %s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if", "(src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)] # -------------------------------------------------------------- # Scan", "\"Trace component %s (used in %s) is not defined.\" % (c, \", \".join(USED_COMPONENT[c]))", "src_pathname = os.path.join(p, f) line_num = 0 for src_line in open(src_pathname, 'r'): line_num", "[\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command line options # for", "open(f, 'r'): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) !=", "for src_line in open(src_pathname, 'r'): line_num = line_num + 1 src_line = src_line.strip()", "print \"\" print \" --src-dirs=dir1:dir2:...\" print \" Specify the directories of C source", "0 for src_line in open(src_pathname, 'r'): line_num = line_num + 1 src_line =", "Trace component is defined but not used in the implementations.\" cmpnt = KNOWN_COMPONENT.keys()", "(p, dlst, flst) in os.walk(d): for f in flst: if c_pathname_pat.match(f) != None:", "hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name =", "\":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\" print \" Specify the header files including", "domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT = {}", "trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES: line_num = 0 for hdr_line", "in USED_COMPONENT: if c != \"any\": print \"Trace component %s (defined in %s)", "KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if c", "= re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS:", "= sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # --------------------------------------------------------------", "print \"# Trace component is defined but not used in the implementations.\" cmpnt", "USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] #", "hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name)", "\"%s:%d\" % \\ (os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare the used and defined", "fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f),", "d in SRC_FILE_DIRS: for (p, dlst, flst) in os.walk(d): for f in flst:", "if component_name in KNOWN_COMPONENT: print \"trace component %s is defined twice, see %s", "KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare the used", "is defined but not used in the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for", "None: src_pathname = os.path.join(p, f) line_num = 0 for src_line in open(src_pathname, 'r'):", "macros. # print \"# Trace component used in the implementations but not defined", "= sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- # Scan C source and header files", "# Author: <NAME>, 2009, 2013 # # This code is explicitly into the", "if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" %", "TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command line options # for i", "SRC_FILE_DIRS: for (p, dlst, flst) in os.walk(d): for f in flst: if c_pathname_pat.match(f)", "in flst: if c_pathname_pat.match(f) != None: src_pathname = os.path.join(p, f) line_num = 0", "= {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # --------------------------------------------------------------", "= re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS: for (p, dlst, flst)", "# -------------------------------------------------------------- # Compare the used and defined trace macros. # print \"#", "Default is %s\" % \":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\" print \" Specify", "re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open(f,", "!= None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if component_name in", "\" Specify the header files including FT_TRACE_DEF()\" print \" Default is %s\" %", "else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare the", "trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT: print \"trace component", "# -------------------------------------------------------------- # Scan C source and header files using trace macros. #", "command line options # for i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage:", "src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\"", "import os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {}", "USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)] # --------------------------------------------------------------", "= [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command line options # for i in", "src_line = src_line.strip() if trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('', src_line) if component_name", "used and defined trace macros. # print \"# Trace component used in the", "print \"\" print \" --def-files=file1:file2:...\" print \" Specify the header files including FT_TRACE_DEF()\"", "macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f", "defined but not used in the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c", "\"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- # Scan", "print \"trace component %s is defined twice, see %s and fttrace.h:%d\" % \\", "USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)] # -------------------------------------------------------------- # Scan header file(s) defining", "[\"%s:%d\" % (src_pathname, line_num)] # -------------------------------------------------------------- # Scan header file(s) defining trace macros.", "if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\",", "if c != \"any\": print \"Trace component %s (defined in %s) is not", "# # This code is explicitly into the public domain. import sys import", "0 for hdr_line in open(f, 'r'): line_num = line_num + 1 hdr_line =", "trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_')", "sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\")", "macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for", "not in USED_COMPONENT: if c != \"any\": print \"Trace component %s (defined in", "= trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name]", "but not defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt:", "not in KNOWN_COMPONENT: print \"Trace component %s (used in %s) is not defined.\"", "= re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in", "Specify the header files including FT_TRACE_DEF()\" print \" Default is %s\" % \":\".join(TRACE_DEF_FILES)", "Default is %s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\",", "= KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in USED_COMPONENT: if", "= [\"%s:%d\" % (src_pathname, line_num)] # -------------------------------------------------------------- # Scan header file(s) defining trace", "\\t\\)].*$') for f in TRACE_DEF_FILES: line_num = 0 for hdr_line in open(f, 'r'):", "twice, see %s and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] =", "\"# Trace component is defined but not used in the implementations.\" cmpnt =", "-------------------------------------------------------------- # Compare the used and defined trace macros. # print \"# Trace", "fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in", "SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command line options", "'r'): line_num = line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None:", "!= None: src_pathname = os.path.join(p, f) line_num = 0 for src_line in open(src_pathname,", "= trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT: print \"trace", "line_num + 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('',", "[\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command line options # for i in range(1,", "dlst, flst) in os.walk(d): for f in flst: if c_pathname_pat.match(f) != None: src_pathname", "= src_line.strip() if trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('', src_line) if component_name in", "= trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT: print \"trace component %s is defined", "component %s is defined twice, see %s and fttrace.h:%d\" % \\ (component_name, KNOWN_COMPONENT[component_name],", "%s (used in %s) is not defined.\" % (c, \", \".join(USED_COMPONENT[c])) print \"#", "hdr_line in open(f, 'r'): line_num = line_num + 1 hdr_line = hdr_line.strip() if", "Show this help\" print \"\" print \" --src-dirs=dir1:dir2:...\" print \" Specify the directories", "files including FT_TRACE_DEF()\" print \" Default is %s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0)", "print \" --help:\" print \" Show this help\" print \"\" print \" --src-dirs=dir1:dir2:...\"", "c_pathname_pat.match(f) != None: src_pathname = os.path.join(p, f) line_num = 0 for src_line in", "import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS =", "sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" % sys.argv[0] print \"Search used-but-defined and defined-but-not-used trace_XXX", "defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c", "sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- # Scan C source and header files using", "c != \"any\": print \"Trace component %s (defined in %s) is not used.\"", "(used in %s) is not defined.\" % (c, \", \".join(USED_COMPONENT[c])) print \"# Trace", "defined.\" % (c, \", \".join(USED_COMPONENT[c])) print \"# Trace component is defined but not", "print \"Trace component %s (used in %s) is not defined.\" % (c, \",", "print \" Specify the header files including FT_TRACE_DEF()\" print \" Default is %s\"", "not defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if", "component_name = trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT: print", "C source and header files using trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE)", "in FreeType 2 source. # Author: <NAME>, 2009, 2013 # # This code", "trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS: for (p, dlst,", "sys.argv[0] print \"Search used-but-defined and defined-but-not-used trace_XXX macros\" print \"\" print \" --help:\"", "explicitly into the public domain. import sys import os import re SRC_FILE_LIST =", "KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f), line_num) # -------------------------------------------------------------- #", "os import re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS", "cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print \"Trace component", "Specify the directories of C source files to be checked\" print \" Default", "line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare", "but not used in the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in", "not used in the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt:", "re SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"]", "components in FreeType 2 source. # Author: <NAME>, 2009, 2013 # # This", "used-but-defined and defined-but-not-used trace_XXX macros\" print \"\" print \" --help:\" print \" Show", "code is explicitly into the public domain. import sys import os import re", "\":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"):", "source. # Author: <NAME>, 2009, 2013 # # This code is explicitly into", "trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name] =", "TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- # Scan C source and header", "hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT: print \"trace component %s", "% (c, \", \".join(USED_COMPONENT[c])) print \"# Trace component is defined but not used", "= 0 for src_line in open(src_pathname, 'r'): line_num = line_num + 1 src_line", "trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" %", "defining trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$')", "component used in the implementations but not defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys()", "= [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command line options #", "into the public domain. import sys import os import re SRC_FILE_LIST = []", "header files using trace macros. # c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[", "implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in", "-------------------------------------------------------------- # Parse command line options # for i in range(1, len(sys.argv)): if", "print \"Search used-but-defined and defined-but-not-used trace_XXX macros\" print \"\" print \" --help:\" print", "%s) is not defined.\" % (c, \", \".join(USED_COMPONENT[c])) print \"# Trace component is", "re.compile('^.*FT_TRACE_DEF[ \\t]*\\([ \\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES: line_num =", "used in the implementations.\" cmpnt = KNOWN_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if", "f in flst: if c_pathname_pat.match(f) != None: src_pathname = os.path.join(p, f) line_num =", "print \" --def-files=file1:file2:...\" print \" Specify the header files including FT_TRACE_DEF()\" print \"", "\" Default is %s\" % \":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\" print \"", "for f in flst: if c_pathname_pat.match(f) != None: src_pathname = os.path.join(p, f) line_num", "Parse command line options # for i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print", "# -------------------------------------------------------------- # Scan header file(s) defining trace macros. # trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[", "component_name = trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT: print \"trace component %s is", "= line_num + 1 src_line = src_line.strip() if trace_use_pat.match(src_line) != None: component_name =", "macros\" print \"\" print \" --help:\" print \" Show this help\" print \"\"", "-------------------------------------------------------------- # Scan C source and header files using trace macros. # c_pathname_pat", "\" Specify the directories of C source files to be checked\" print \"", "if c not in USED_COMPONENT: if c != \"any\": print \"Trace component %s", "in %s) is not defined.\" % (c, \", \".join(USED_COMPONENT[c])) print \"# Trace component", "f) line_num = 0 for src_line in open(src_pathname, 'r'): line_num = line_num +", "else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)] # -------------------------------------------------------------- # Scan header file(s)", "implementations but not defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in", "if c_pathname_pat.match(f) != None: src_pathname = os.path.join(p, f) line_num = 0 for src_line", "in cmpnt: if c not in KNOWN_COMPONENT: print \"Trace component %s (used in", "--help:\" print \" Show this help\" print \"\" print \" --src-dirs=dir1:dir2:...\" print \"", "for (p, dlst, flst) in os.walk(d): for f in flst: if c_pathname_pat.match(f) !=", "c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in", "(os.path.basename(f), line_num) # -------------------------------------------------------------- # Compare the used and defined trace macros. #", "trace macros. # print \"# Trace component used in the implementations but not", "% \\ (component_name, KNOWN_COMPONENT[component_name], line_num) else: KNOWN_COMPONENT[component_name] = \"%s:%d\" % \\ (os.path.basename(f), line_num)", "cmpnt: if c not in KNOWN_COMPONENT: print \"Trace component %s (used in %s)", "= {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES = [\"include/freetype/internal/fttrace.h\"] # -------------------------------------------------------------- # Parse command", "#!/usr/bin/env python # # Check trace components in FreeType 2 source. # Author:", "in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" % sys.argv[0] print \"Search", "print \" Default is %s\" % \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS", "print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES", "% (src_pathname, line_num)] # -------------------------------------------------------------- # Scan header file(s) defining trace macros. #", "2 source. # Author: <NAME>, 2009, 2013 # # This code is explicitly", "print \" Default is %s\" % \":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\" print", "component_name in KNOWN_COMPONENT: print \"trace component %s is defined twice, see %s and", "print \"# Trace component used in the implementations but not defined in fttrace.h.\"", "\"any\": print \"Trace component %s (defined in %s) is not used.\" % (c,", "print \"Trace component %s (defined in %s) is not used.\" % (c, KNOWN_COMPONENT[c])", "directories of C source files to be checked\" print \" Default is %s\"", "options # for i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\"", "is %s\" % \":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\" print \" Specify the", "used in the implementations but not defined in fttrace.h.\" cmpnt = USED_COMPONENT.keys() cmpnt.sort()", "+ 1 hdr_line = hdr_line.strip() if trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('', hdr_line)", "in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num)) else: USED_COMPONENT[component_name] = [\"%s:%d\" % (src_pathname, line_num)]", "TRACE_DEF_FILES: line_num = 0 for hdr_line in open(f, 'r'): line_num = line_num +", "os.path.join(p, f) line_num = 0 for src_line in open(src_pathname, 'r'): line_num = line_num", "line_num = 0 for src_line in open(src_pathname, 'r'): line_num = line_num + 1", "i in range(1, len(sys.argv)): if sys.argv[i].startswith(\"--help\"): print \"Usage: %s [option]\" % sys.argv[0] print", "re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d in SRC_FILE_DIRS: for (p,", "the header files including FT_TRACE_DEF()\" print \" Default is %s\" % \":\".join(TRACE_DEF_FILES) print", "Check trace components in FreeType 2 source. # Author: <NAME>, 2009, 2013 #", "and defined-but-not-used trace_XXX macros\" print \"\" print \" --help:\" print \" Show this", "--def-files=file1:file2:...\" print \" Specify the header files including FT_TRACE_DEF()\" print \" Default is", "the used and defined trace macros. # print \"# Trace component used in", "if c not in KNOWN_COMPONENT: print \"Trace component %s (used in %s) is", "public domain. import sys import os import re SRC_FILE_LIST = [] USED_COMPONENT =", "header files including FT_TRACE_DEF()\" print \" Default is %s\" % \":\".join(TRACE_DEF_FILES) print \"\"", "cmpnt = USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT:", "= USED_COMPONENT.keys() cmpnt.sort() for c in cmpnt: if c not in KNOWN_COMPONENT: print", "if trace_def_pat_opn.match(hdr_line) != None: component_name = trace_def_pat_opn.sub('', hdr_line) component_name = trace_def_pat_cls.sub('', component_name) if", "\" --help:\" print \" Show this help\" print \"\" print \" --src-dirs=dir1:dir2:...\" print", "\"\" print \" --src-dirs=dir1:dir2:...\" print \" Specify the directories of C source files", "print \" --src-dirs=dir1:dir2:...\" print \" Specify the directories of C source files to", "% \":\".join(TRACE_DEF_FILES) print \"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif", "None: component_name = trace_use_pat.sub('', src_line) if component_name in USED_COMPONENT: USED_COMPONENT[component_name].append(\"%s:%d\" % (src_pathname, line_num))", "trace_XXX macros\" print \"\" print \" --help:\" print \" Show this help\" print", "2009, 2013 # # This code is explicitly into the public domain. import", "\"\" exit(0) if sys.argv[i].startswith(\"--src-dirs=\"): SRC_FILE_DIRS = sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES =", "\\t]*') trace_def_pat_cls = re.compile('[ \\t\\)].*$') for f in TRACE_DEF_FILES: line_num = 0 for", "+ 1 src_line = src_line.strip() if trace_use_pat.match(src_line) != None: component_name = trace_use_pat.sub('', src_line)", "SRC_FILE_LIST = [] USED_COMPONENT = {} KNOWN_COMPONENT = {} SRC_FILE_DIRS = [\"src\"] TRACE_DEF_FILES", "# Check trace components in FreeType 2 source. # Author: <NAME>, 2009, 2013", "flst: if c_pathname_pat.match(f) != None: src_pathname = os.path.join(p, f) line_num = 0 for", "# Compare the used and defined trace macros. # print \"# Trace component", "# c_pathname_pat = re.compile('^.*\\.[ch]$', re.IGNORECASE) trace_use_pat = re.compile('^[ \\t]*#define[ \\t]+FT_COMPONENT[ \\t]+trace_') for d", "% sys.argv[0] print \"Search used-but-defined and defined-but-not-used trace_XXX macros\" print \"\" print \"", "sys.argv[i].replace(\"--src-dirs=\", \"\", 1).split(\":\") elif sys.argv[i].startswith(\"--def-files=\"): TRACE_DEF_FILES = sys.argv[i].replace(\"--def-files=\", \"\", 1).split(\":\") # -------------------------------------------------------------- #", "trace_def_pat_cls.sub('', component_name) if component_name in KNOWN_COMPONENT: print \"trace component %s is defined twice,", "% \":\".join(SRC_FILE_DIRS) print \"\" print \" --def-files=file1:file2:...\" print \" Specify the header files", "c not in USED_COMPONENT: if c != \"any\": print \"Trace component %s (defined", "\"\" print \" --def-files=file1:file2:...\" print \" Specify the header files including FT_TRACE_DEF()\" print", "(c, \", \".join(USED_COMPONENT[c])) print \"# Trace component is defined but not used in" ]
[ "dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (link.source_id, link.label,", "example from this candidate model for node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links", "Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm,", "Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections'])", "align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm:", "= align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label =", "pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return None, None,", "List[GraphLink]) -> Dict[int, int]: accum_numbered_links = {} numbered_links = {} for l in", "1 else: accum_numbered_links[l.label] += 1 for l in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label]", "bytes, Union[bytes, int]]]: gold_triples = set() for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] =", "pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node =", "coding: utf-8 -*- from typing import Dict, Tuple, List, Union, Optional, Set from", "List, Union, Optional, Set from data_structure import Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode", "in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node", "is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if", "# -*- coding: utf-8 -*- from typing import Dict, Tuple, List, Union, Optional,", "else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return None, None, None bijection = alignment['_bijections'][0]", "for link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else:", "return gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes,", "len(alignment['_bijections']) != 1: return None, None, None bijection = alignment['_bijections'][0] link2label = {}", "in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links def get_numbered_link_label(link_label: str,", "DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra", "Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE", "link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if", "bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label", "link.label, dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] # Copied", "numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label", "link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if", "!= 1: return None, None, None bijection = alignment['_bijections'][0] link2label = {} #", "link2label = {} # build example from this candidate model for node in", "in links: if l.label not in accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] +=", "accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] += 1 for l in links: numbered_links[l.id] =", "= get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (link.source_id, link.label, dest_label)", "is_blurring_label else dest_node.label triple = (link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm:", "Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core", "= triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph,", "= link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id])", "Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples = set() for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink]", "else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (link.source_id,", "node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links:", "link.label, dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples:", "triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label:", "int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0]", "return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int,", "not in accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] += 1 for l in", "-*- from typing import Dict, Tuple, List, Union, Optional, Set from data_structure import", "= get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label)", "\"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (link.source_id, link.label, dest_label) gold_triples.add(triple) return", "alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]):", "numbered_links def get_numbered_link_label(link_label: str, number: int) -> str: \"\"\"Number a link\"\"\" return \"%s:_%d\"", "outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id else: dest_label = get_numbered_link_label(", "bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection", "if l.label not in accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] += 1 for", "= accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links def get_numbered_link_label(link_label: str, number: int) ->", "link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm:", "if dest_node.is_class_node(): dest_label = link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label", "if is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label = {} # build example", "# Copied from model_core and model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links", "dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int,", "def get_numbered_link_label(link_label: str, number: int) -> str: \"\"\"Number a link\"\"\" return \"%s:_%d\" %", "align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return None,", "in gold_triples return link2label, bijection.prime2x, alignment['f1'] # Copied from model_core and model_extra def", "import Dict, Tuple, List, Union, Optional, Set from data_structure import Graph, GraphLink from", "gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes,", "semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label", "model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links = {} numbered_links = {}", "import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) ->", "set() for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for", "alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label", "import Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph # from", "semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool)", "list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node():", "else dest_node.label triple = (link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph,", "alignment['f1'] # Copied from model_core and model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]:", "accum_numbered_links[l.label] += 1 for l in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1", "def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment", "is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return None, None, None bijection =", "numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links = {} numbered_links = {} for l", "from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels", "# build example from this candidate model for node in pred_sm.iter_class_nodes(): outgoing_links =", "else: accum_numbered_links[l.label] += 1 for l in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -=", "int]]]: gold_triples = set() for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links", "Union, Optional, Set from data_structure import Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode from", "for link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id else:", "link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label", "#!/usr/bin/python # -*- coding: utf-8 -*- from typing import Dict, Tuple, List, Union,", "if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return None, None, None bijection", "get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (link.source_id, link.label, dest_label) gold_triples.add(triple)", "get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples =", "DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return None, None, None", "import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from", "get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id]", "dest_label = link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label", "<reponame>binh-vu/semantic-modeling<gh_stars>1-10 #!/usr/bin/python # -*- coding: utf-8 -*- from typing import Dict, Tuple, List,", "outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label(", "-> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples = set() for node in gold_sm.iter_class_nodes(): outgoing_links:", "bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples = set() for node in gold_sm.iter_class_nodes():", "= (link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label:", "else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id],", "in gold_triples return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool,", "def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment", "Dict[int, int]: accum_numbered_links = {} numbered_links = {} for l in links: if", "= alignment['_bijections'][0] link2label = {} # build example from this candidate model for", "typing import Dict, Tuple, List, Union, Optional, Set from data_structure import Graph, GraphLink", "alignment['_bijections'][0] link2label = {} # build example from this candidate model for node", "None bijection = alignment['_bijections'][0] link2label = {} # build example from this candidate", "= link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id])", "for node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in", "1 return numbered_links def get_numbered_link_label(link_label: str, number: int) -> str: \"\"\"Number a link\"\"\"", "dest_node.is_class_node(): dest_label = link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else", "link.label, dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm:", "l.label not in accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] += 1 for l", "GraphLink from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import", "= list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node() if", "else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label = {} # build example from this", "data_structure import Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph #", "for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link", "candidate model for node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for", "def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links = {} numbered_links = {} for", "+= 1 for l in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return", "bijection = alignment['_bijections'][0] link2label = {} # build example from this candidate model", "accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links def get_numbered_link_label(link_label: str, number: int) -> str:", "dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in gold_triples return link2label,", "max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment =", "model_core and model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links = {} numbered_links", "outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node()", "bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if", "int]: accum_numbered_links = {} numbered_links = {} for l in links: if l.label", "Dict, Tuple, List, Union, Optional, Set from data_structure import Graph, GraphLink from experiments.evaluation_metrics", "return None, None, None bijection = alignment['_bijections'][0] link2label = {} # build example", "DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label = {} # build example from this candidate", "from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label:", "alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1:", "= triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] # Copied from model_core and", "def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples = set()", "link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple =", "\"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] =", "DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label = {} # build", "-*- coding: utf-8 -*- from typing import Dict, Tuple, List, Union, Optional, Set", "is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label = {} # build example from", "= set() for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links)", "triple = (link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph,", "str, number: int) -> str: \"\"\"Number a link\"\"\" return \"%s:_%d\" % (link_label, number)", "= {} numbered_links = {} for l in links: if l.label not in", "from semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import", "accum_numbered_links = {} numbered_links = {} for l in links: if l.label not", "semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]:", "List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node()", "{} # build example from this candidate model for node in pred_sm.iter_class_nodes(): outgoing_links", "gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node", "= {} for l in links: if l.label not in accum_numbered_links: accum_numbered_links[l.label] =", "in accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] += 1 for l in links:", "gold_triples = set() for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links =", "dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph,", "# from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes,", "gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else", "link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id else: dest_label", "Set from data_structure import Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import", "numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int,", "if is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in", "link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes,", "(bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] #", "for l in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links def", "return numbered_links def get_numbered_link_label(link_label: str, number: int) -> str: \"\"\"Number a link\"\"\" return", "from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes,", "= link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple", "utf-8 -*- from typing import Dict, Tuple, List, Union, Optional, Set from data_structure", "Set[Tuple[int, bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH)", "dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] # Copied from", "from model_core and model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links = {}", "experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels #", "dest_node.label triple = (link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph, pred_sm:", "numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links def get_numbered_link_label(link_label: str, number: int)", "if len(alignment['_bijections']) != 1: return None, None, None bijection = alignment['_bijections'][0] link2label =", "triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] # Copied from model_core and model_extra", "if is_blurring_label else dest_node.label triple = (link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples def", "accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] += 1 for l in links: numbered_links[l.id]", "numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id", "Optional, Set from data_structure import Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph", "= align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return", "link2label, bijection.prime2x, alignment['f1'] # Copied from model_core and model_extra def numbering_link_labels(links: List[GraphLink]) ->", "get_numbered_link_label(link_label: str, number: int) -> str: \"\"\"Number a link\"\"\" return \"%s:_%d\" % (link_label,", "max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment =", "dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\",", "is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in gold_triples", "{} for l in links: if l.label not in accum_numbered_links: accum_numbered_links[l.label] = 1", "dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else", "return link2label, bijection.prime2x, alignment['f1'] # Copied from model_core and model_extra def numbering_link_labels(links: List[GraphLink])", "= {} # build example from this candidate model for node in pred_sm.iter_class_nodes():", "get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples = set() for", "from this candidate model for node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links =", "numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id]", "this candidate model for node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links)", "triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x,", "import align_graph # from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def", "l in links: if l.label not in accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label]", "numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple", "None, None bijection = alignment['_bijections'][0] link2label = {} # build example from this", "build example from this candidate model for node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links())", "= 1 else: accum_numbered_links[l.label] += 1 for l in links: numbered_links[l.id] = accum_numbered_links[l.label]", "gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]):", "else dest_node.label triple = (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in gold_triples return", "1: return None, None, None bijection = alignment['_bijections'][0] link2label = {} # build", "bijection.prime2x, alignment['f1'] # Copied from model_core and model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int,", "pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm,", "from typing import Dict, Tuple, List, Union, Optional, Set from data_structure import Graph,", "pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label = {} #", "in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id else: dest_label =", "dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = link.target_id else: dest_label = get_numbered_link_label( \"DATA_NODE\",", "Tuple, List, Union, Optional, Set from data_structure import Graph, GraphLink from experiments.evaluation_metrics import", "numbered_links = {} for l in links: if l.label not in accum_numbered_links: accum_numbered_links[l.label]", "for l in links: if l.label not in accum_numbered_links: accum_numbered_links[l.label] = 1 else:", "dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (bijection.prime2x[link.source_id], link.label,", "bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple =", "gold_triples return link2label, bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples:", "outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node =", "from data_structure import Graph, GraphLink from experiments.evaluation_metrics import DataNodeMode from semantic_modeling.assembling.autolabel.align_graph import align_graph", "None, None, None bijection = alignment['_bijections'][0] link2label = {} # build example from", "gold_triples return link2label, bijection.prime2x, alignment['f1'] # Copied from model_core and model_extra def numbering_link_labels(links:", "= numbering_link_labels(outgoing_links) for link in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label =", "model for node in pred_sm.iter_class_nodes(): outgoing_links = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link", "-> Dict[int, int]: accum_numbered_links = {} numbered_links = {} for l in links:", "is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples = set() for node in", "l in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links def get_numbered_link_label(link_label:", "int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) !=", "bijection.prime2x, alignment['f1'] def max_f1_no_ambiguous(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool, gold_triples: Set[Tuple[int, bytes, Union[bytes,", "DataNodeMode.NO_TOUCH) if len(alignment['_bijections']) != 1: return None, None, None bijection = alignment['_bijections'][0] link2label", "align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection = alignment['_bijections'][0] link2label = {}", "(link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples def max_f1(gold_sm: Graph, pred_sm: Graph, is_blurring_label: bool,", "Union[bytes, int]]]: gold_triples = set() for node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links())", "1 for l in links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links", "in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in outgoing_links:", "link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] # Copied from model_core", "-= 1 return numbered_links def get_numbered_link_label(link_label: str, number: int) -> str: \"\"\"Number a", "in outgoing_links: dest_node = link.get_target_node() if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label =", "if dest_node.is_class_node(): dest_label = bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label", "Copied from model_core and model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links =", "and model_extra def numbering_link_labels(links: List[GraphLink]) -> Dict[int, int]: accum_numbered_links = {} numbered_links =", "# from semantic_modeling.assembling.undirected_graphical_model.model_core import numbering_link_labels # from semantic_modeling.assembling.undirected_graphical_model.model_extra import get_numbered_link_label def get_gold_triples(gold_sm: Graph,", "= bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label triple", "Union[bytes, int]]]): alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_LABEL_DATA_NODE if is_blurring_label else DataNodeMode.NO_TOUCH) bijection =", "accum_numbered_links[l.label] -= 1 return numbered_links def get_numbered_link_label(link_label: str, number: int) -> str: \"\"\"Number", "dest_label = bijection.prime2x[link.target_id] else: dest_label = get_numbered_link_label( \"DATA_NODE\", numbered_links[link.id]) if is_blurring_label else dest_node.label", "= (bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1']", "import get_numbered_link_label def get_gold_triples(gold_sm: Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples", "{} numbered_links = {} for l in links: if l.label not in accum_numbered_links:", "node in gold_sm.iter_class_nodes(): outgoing_links: List[GraphLink] = list(node.iter_outgoing_links()) numbered_links = numbering_link_labels(outgoing_links) for link in", "links: numbered_links[l.id] = accum_numbered_links[l.label] accum_numbered_links[l.label] -= 1 return numbered_links def get_numbered_link_label(link_label: str, number:", "links: if l.label not in accum_numbered_links: accum_numbered_links[l.label] = 1 else: accum_numbered_links[l.label] += 1", "(bijection.prime2x[link.source_id], link.label, dest_label) link2label[link.id] = triple in gold_triples return link2label, bijection.prime2x, alignment['f1'] def", "numbered_links[link.id]) if is_blurring_label else dest_node.label triple = (link.source_id, link.label, dest_label) gold_triples.add(triple) return gold_triples", "Graph, is_blurring_label: bool) -> Set[Tuple[int, bytes, Union[bytes, int]]]: gold_triples = set() for node" ]
[ "img def __len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root)", "torchvision.transforms as T from torch.utils import data from PIL import Image #定义自己的数据集合 class", "return dataloader #返回的是一个dataloader的迭代器 # if __name__ == '__main__': # pil_saliency = Image.open(saliency_path) #", "1. Load the image pil_img = Image.open(img_path) # 2. Resize and normalize the", "= self.imgs[index] # 1. Load the image pil_img = Image.open(img_path) # 2. Resize", "self.saliency_root = saliency_root self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self, index): img_path = self.imgs[index]", "= MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 #", "__len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(),", "saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self): return len(self.imgs)", "Image.open(saliency_path) # 2. Resize and normalize the images using torchvision. img = self.transform(pil_img)", "len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in", "Load the image pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2. Resize and", "self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self): return", "__init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform def __getitem__(self, index):", "def __getitem__(self, index): img_path = self.imgs[index] # 1. Load the image pil_img =", "the images using torchvision. # img = self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) #", "= 1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose = [ T.Resize((img_size[0],", "normalize the images using torchvision. img = self.transform(pil_img) return img def __len__(self): return", "#从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1]", "T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset,", "in self.imgs] self.root = root self.saliency_root = saliency_root self.transform=transform self.transform_saliency = transform_saliency def", "Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2. Resize and normalize the images using torchvision.", "= 1)) return dataloader #返回的是一个dataloader的迭代器 # if __name__ == '__main__': # pil_saliency =", "# 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs] self.root = root self.saliency_root =", "data from PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root)", "utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose", "return img, saliency def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose =", "and normalize the images using torchvision. img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency", "import data from PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径", "batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose =", "= Image.open(saliency_path) # # 2. Resize and normalize the images using torchvision. #", "= DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 def", "using torchvision. img = self.transform(pil_img) return img def __len__(self): return len(self.imgs) #定义自己的数据集合 class", "def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs] self.root = root", "#定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs]", "img_path = self.imgs[index] # 1. Load the image pil_img = Image.open(img_path) # 2.", "torch import utils import torchvision.transforms as T from torch.utils import data from PIL", "= iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size):", "= [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose)", "img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader =", "return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3),", "# if __name__ == '__main__': # pil_saliency = Image.open(saliency_path) # # 2. Resize", "compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset", "T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency)", "#返回的是一个dataloader的迭代器 # if __name__ == '__main__': # pil_saliency = Image.open(saliency_path) # # 2.", "imgs] self.transform=transform def __getitem__(self, index): img_path = self.imgs[index] # 1. Load the image", "batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform", "= self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self): return len(self.imgs) def", "= utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size):", "Resize and normalize the images using torchvision. # img = self.transform(pil_img) # saliency_1channel", "dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器", "[ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset", "compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform =", "image pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2. Resize and normalize the", "for k in self.imgs] self.root = root self.saliency_root = saliency_root self.transform=transform self.transform_saliency =", "2. Resize and normalize the images using torchvision. img = self.transform(pil_img) saliency_1channel =", "Resize and normalize the images using torchvision. img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency)", "return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k", "iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose", "img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform)", "Image.open(saliency_path) # # 2. Resize and normalize the images using torchvision. # img", "img, saliency def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [", "os import torch import utils import torchvision.transforms as T from torch.utils import data", "T from torch.utils import data from PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def", "# 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform def __getitem__(self, index): img_path", "if __name__ == '__main__': # pil_saliency = Image.open(saliency_path) # # 2. Resize and", "k in self.imgs] self.root = root self.saliency_root = saliency_root self.transform=transform self.transform_saliency = transform_saliency", "T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1),", "import torch import utils import torchvision.transforms as T from torch.utils import data from", "num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 # if __name__ == '__main__': # pil_saliency", "T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader", "get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1]", "self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs] self.root = root self.saliency_root = saliency_root self.transform=transform", "img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name)", "= Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2. Resize and normalize the images using", "1)) return dataloader #返回的是一个dataloader的迭代器 # if __name__ == '__main__': # pil_saliency = Image.open(saliency_path)", "the image pil_img = Image.open(img_path) # 2. Resize and normalize the images using", "] transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers =", "所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs] self.root = root self.saliency_root = saliency_root", "= self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) # 1. Load the", "import os import torch import utils import torchvision.transforms as T from torch.utils import", "return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1]", "= [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset =", "T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader", "DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir,", "saliency_root self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self, index): img_path = self.imgs[index] img_name =", "index): img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root,", "from PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k)", "self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) # 1. Load the image", "self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform def __getitem__(self, index): img_path = self.imgs[index] #", "get_gray_dataloader(image_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1]", "] transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers =", "def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))", "= T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency =", "== '__main__': # pil_saliency = Image.open(saliency_path) # # 2. Resize and normalize the", "images using torchvision. img = self.transform(pil_img) return img def __len__(self): return len(self.imgs) #定义自己的数据集合", "T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency =", "pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2. Resize and normalize the images", "image pil_img = Image.open(img_path) # 2. Resize and normalize the images using torchvision.", "for k in imgs] self.transform=transform def __getitem__(self, index): img_path = self.imgs[index] # 1.", "Resize and normalize the images using torchvision. img = self.transform(pil_img) return img def", "= T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return", "T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size,", "T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader", "images using torchvision. img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return", "__name__ == '__main__': # pil_saliency = Image.open(saliency_path) # # 2. Resize and normalize", "len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5),", "pil_saliency = Image.open(saliency_path) # # 2. Resize and normalize the images using torchvision.", "os.path.join(self.saliency_root, saliency_name) # 1. Load the image pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path)", "import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k", "= transform_saliency def __getitem__(self, index): img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name =", "normalize the images using torchvision. # img = self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency)", "__getitem__(self, index): img_path = self.imgs[index] # 1. Load the image pil_img = Image.open(img_path)", "import utils import torchvision.transforms as T from torch.utils import data from PIL import", "img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) # 1. Load", "img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency = [", "[ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency", "in imgs] self.transform=transform def __getitem__(self, index): img_path = self.imgs[index] # 1. Load the", "self.transform_saliency = transform_saliency def __getitem__(self, index): img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name", "(0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size,", "# 2. Resize and normalize the images using torchvision. # img = self.transform(pil_img)", "images using torchvision. # img = self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) # saliency", "= img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) # 1. Load the image pil_img =", "compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform =", "saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) # 1. Load the image pil_img", "saliency_path = os.path.join(self.saliency_root, saliency_name) # 1. Load the image pil_img = Image.open(img_path) pil_saliency", "class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs] self.root", "__init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs] self.root = root self.saliency_root", "using torchvision. img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img,", "transform = T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency", "using torchvision. # img = self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) # saliency =", "img = self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) # saliency = get_saleincy_2channel(saliency_1channel) # return", "batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 # if __name__ == '__main__': #", "self.transform(pil_img) return img def __len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): #", "self.imgs[index] # 1. Load the image pil_img = Image.open(img_path) # 2. Resize and", "saliency def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [ T.Resize((img_size[0],", "= [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose)", "= iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 # if __name__ ==", "class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform", "MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 # if", "= self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) # saliency = get_saleincy_2channel(saliency_1channel) # return img,", "dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器", "#返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5),", "utils import torchvision.transforms as T from torch.utils import data from PIL import Image", "#转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers", "self.imgs=[os.path.join(root,k) for k in self.imgs] self.root = root self.saliency_root = saliency_root self.transform=transform self.transform_saliency", "img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) # 1. Load the image pil_img = Image.open(img_path)", "= self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self):", "iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 # if __name__ == '__main__':", "T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset =", "Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in", "1. Load the image pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2. Resize", "= saliency_root self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self, index): img_path = self.imgs[index] img_name", "index): img_path = self.imgs[index] # 1. Load the image pil_img = Image.open(img_path) #", "# # 2. Resize and normalize the images using torchvision. # img =", "img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency def", "saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size,", "__len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for", "and normalize the images using torchvision. img = self.transform(pil_img) return img def __len__(self):", "torch.utils import data from PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): #", "= self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) #", "img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ]", "#定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs]", "def __len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k)", "= Image.open(saliency_path) # 2. Resize and normalize the images using torchvision. img =", "return img def __len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径", "root self.saliency_root = saliency_root self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self, index): img_path =", "MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform def", "] transform = T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ]", "self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir,", "the image pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2. Resize and normalize", "[ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency)", "img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ]", "PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for", "img = self.transform(pil_img) return img def __len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def", "batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform", "# 2. Resize and normalize the images using torchvision. img = self.transform(pil_img) saliency_1channel", "(0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=1), T.ToTensor(),", "#转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency = [ T.Resize((img_size[0], img_size[1])),", "T.Grayscale(num_output_channels=1), T.ToTensor(), #转到[0,1] ] transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset,", "= T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return", "pil_img = Image.open(img_path) # 2. Resize and normalize the images using torchvision. img", "as T from torch.utils import data from PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset):", "DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency): # 所有图片的绝对路径 self.imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in self.imgs] self.root =", "self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) # saliency = get_saleincy_2channel(saliency_1channel) # return img, saliency", "# 1. Load the image pil_img = Image.open(img_path) # 2. Resize and normalize", "transform_saliency def __getitem__(self, index): img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\"", "= os.path.join(self.saliency_root, saliency_name) # 1. Load the image pil_img = Image.open(img_path) pil_saliency =", "self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path = os.path.join(self.saliency_root, saliency_name) # 1.", "= self.transform(pil_img) return img def __len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset): def __init__(self,root,saliency_root,transform,transform_saliency):", "# 2. Resize and normalize the images using torchvision. img = self.transform(pil_img) return", "'__main__': # pil_saliency = Image.open(saliency_path) # # 2. Resize and normalize the images", "T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader =", "dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(),", "1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])),", "T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader", "transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1))", "2. Resize and normalize the images using torchvision. img = self.transform(pil_img) return img", "and normalize the images using torchvision. # img = self.transform(pil_img) # saliency_1channel =", "def __init__(self,root,transform): # 所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform def __getitem__(self,", "torchvision. # img = self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) # saliency = get_saleincy_2channel(saliency_1channel)", "2. Resize and normalize the images using torchvision. # img = self.transform(pil_img) #", "normalize the images using torchvision. img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency =", "T.ToTensor(), #转到[0,1] T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) #从[0,1]转到[-1,1] ] transform = T.Compose(compose) compose_saliency = [ T.Resize((img_size[0],", "# pil_saliency = Image.open(saliency_path) # # 2. Resize and normalize the images using", "def __len__(self): return len(self.imgs) def get_saliency_dataloader(image_dir,saliency_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])),", "self.imgs] self.root = root self.saliency_root = saliency_root self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self,", "#从[0,1]转到[-1,1] ] transform = T.Compose(compose) dataset = MyDataSet(image_dir,transform) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers", "dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size,", "from torch.utils import data from PIL import Image #定义自己的数据集合 class MyDataSet(data.Dataset): def __init__(self,root,transform):", "dataloader #返回的是一个dataloader的迭代器 # if __name__ == '__main__': # pil_saliency = Image.open(saliency_path) # #", "= Image.open(img_path) # 2. Resize and normalize the images using torchvision. img =", "k in imgs] self.transform=transform def __getitem__(self, index): img_path = self.imgs[index] # 1. Load", "__getitem__(self, index): img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path =", "transform_saliency = T.Compose(compose_saliency) dataset = DataSetWithSalieny(image_dir,saliency_dir,transform,transform_saliency) dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1))", "imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform def __getitem__(self, index): img_path = self.imgs[index]", "# img = self.transform(pil_img) # saliency_1channel = self.transform_saliency(pil_saliency) # saliency = get_saleincy_2channel(saliency_1channel) #", "Load the image pil_img = Image.open(img_path) # 2. Resize and normalize the images", "# 1. Load the image pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path) # 2.", "def get_gray_dataloader(image_dir, img_size, batch_size): compose = [ T.Resize((img_size[0], img_size[1])), T.Grayscale(num_output_channels=3), T.ToTensor(), T.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))", "dataloader = iter(torch.utils.data.DataLoader(dataset, batch_size, num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 # if __name__", "the images using torchvision. img = self.transform(pil_img) return img def __len__(self): return len(self.imgs)", "def __getitem__(self, index): img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1] saliency_name = img_name.split('.')[0]+\".png\" saliency_path", "self.root = root self.saliency_root = saliency_root self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self, index):", "saliency_name) # 1. Load the image pil_img = Image.open(img_path) pil_saliency = Image.open(saliency_path) #", "torchvision. img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel) return img, saliency", "torchvision. img = self.transform(pil_img) return img def __len__(self): return len(self.imgs) #定义自己的数据集合 class DataSetWithSalieny(data.Dataset):", "所有图片的绝对路径 imgs=os.listdir(root) self.imgs=[os.path.join(root,k) for k in imgs] self.transform=transform def __getitem__(self, index): img_path =", "the images using torchvision. img = self.transform(pil_img) saliency_1channel = self.transform_saliency(pil_saliency) saliency = utils.get_saleincy_2channel(saliency_1channel)", "Image.open(img_path) # 2. Resize and normalize the images using torchvision. img = self.transform(pil_img)", "import torchvision.transforms as T from torch.utils import data from PIL import Image #定义自己的数据集合", "num_workers = 1)) return dataloader #返回的是一个dataloader的迭代器 def get_gray_dataloader(image_dir, img_size, batch_size): compose = [", "= root self.saliency_root = saliency_root self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self, index): img_path", "pil_saliency = Image.open(saliency_path) # 2. Resize and normalize the images using torchvision. img", "self.transform=transform def __getitem__(self, index): img_path = self.imgs[index] # 1. Load the image pil_img", "self.transform=transform self.transform_saliency = transform_saliency def __getitem__(self, index): img_path = self.imgs[index] img_name = self.imgs[index].split('/')[-1]" ]
[ "return assert False, 'Should not have gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null()", "Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME> <EMAIL>", "under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME> <EMAIL> from", "assertions # https://github.com/heynemann/preggy # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright", "# Copyright (c) 2013 <NAME> <EMAIL> from preggy import expect #----------------------------------------------------------------------------- def test_to_be_null():", "'Should not have gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except", "expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return assert False, 'Should", "MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME> <EMAIL> from preggy import", "-*- coding: utf-8 -*- # preggy assertions # https://github.com/heynemann/preggy # Licensed under the", "this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return assert False,", "expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return assert False, 'Should not have gotten", "the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME> <EMAIL> from preggy", "gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return assert", "(c) 2013 <NAME> <EMAIL> from preggy import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try:", "# https://github.com/heynemann/preggy # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c)", "False, 'Should not have gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null()", "far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return assert False, 'Should", "Copyright (c) 2013 <NAME> <EMAIL> from preggy import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null()", "-*- # preggy assertions # https://github.com/heynemann/preggy # Licensed under the MIT license: #", "test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return assert False, 'Should not have", "expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return assert False, 'Should not have gotten this", "try: expect(None).not_to_be_null() except AssertionError: return assert False, 'Should not have gotten this far'", "from preggy import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return", "def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return assert False, 'Should not", "try: expect('something').to_be_null() except AssertionError: return assert False, 'Should not have gotten this far'", "http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME> <EMAIL> from preggy import expect #----------------------------------------------------------------------------- def", "preggy assertions # https://github.com/heynemann/preggy # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license #", "https://github.com/heynemann/preggy # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013", "license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME> <EMAIL> from preggy import expect", "except AssertionError: return assert False, 'Should not have gotten this far' def test_not_to_be_null():", "AssertionError: return assert False, 'Should not have gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null()", "# preggy assertions # https://github.com/heynemann/preggy # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license", "#----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return assert False, 'Should not", "def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return assert False, 'Should not have", "expect(None).not_to_be_null() except AssertionError: return assert False, 'Should not have gotten this far' def", "# Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME>", "2013 <NAME> <EMAIL> from preggy import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null()", "coding: utf-8 -*- # preggy assertions # https://github.com/heynemann/preggy # Licensed under the MIT", "test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return assert False, 'Should not have gotten", "utf-8 -*- # preggy assertions # https://github.com/heynemann/preggy # Licensed under the MIT license:", "import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return assert False,", "<NAME> <EMAIL> from preggy import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except", "preggy import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError: return assert", "# http://www.opensource.org/licenses/mit-license # Copyright (c) 2013 <NAME> <EMAIL> from preggy import expect #-----------------------------------------------------------------------------", "# -*- coding: utf-8 -*- # preggy assertions # https://github.com/heynemann/preggy # Licensed under", "not have gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError:", "have gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return", "assert False, 'Should not have gotten this far' def test_not_to_be_null(): expect('something').Not.to_be_null() expect('something').not_to_be_null() try:", "expect('something').not_to_be_null() try: expect('something').to_be_null() except AssertionError: return assert False, 'Should not have gotten this", "<EMAIL> from preggy import expect #----------------------------------------------------------------------------- def test_to_be_null(): expect(None).to_be_null() try: expect(None).not_to_be_null() except AssertionError:" ]
[ "self._maxLength = self._depth self._update_parent() # Update height and maxLength of parent def __repr__(self):", "node._letter return str[::-1] # Return max word length on node's path def maxLength(self):", "# Update height and maxLength of parent def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self):", "if all(getattr(self, attr) == getattr(other, attr) for attr in attrs): return all(self[i] ==", "# Longest string on whose path node is on (includes the current node)", "node is on (includes the current node) self._maxLength = self._depth self._update_parent() # Update", "depth (first letter is depth = 1) self._depth = parent._depth+1 else: self._depth =", "= 0 # Longest path existing underneath this nodes # Longest string on", "the current node) self._maxLength = self._depth self._update_parent() # Update height and maxLength of", "self._parent = parent # Pointer to parent self._letter = letter # Current letter", "TypeError(\"Node indices must be int or char\") def __setitem__(self, i, value): if isinstance(i,", "# if other check asserts that other != None if other: attrs =", "'a' = 0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97 # Return word represented by", "= node._letter while node._parent: node = node._parent str += node._letter return str[::-1] #", "node = self str = node._letter while node._parent: node = node._parent str +=", "f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) # Raises exception/Returns None on failure def __getitem__(self,", "parent, letter, word): self._parent = parent # Pointer to parent self._letter = letter", "else: self._depth = 0 # Root node self._height = 0 # Longest path", "_charToInt(letter): return ord(letter.lower())-97 # Return word represented by node. def whichWord(self): node =", "elif isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must be int or", "= [None for x in range(26)] if parent: # Current depth (first letter", "Current depth (first letter is depth = 1) self._depth = parent._depth+1 else: self._depth", "to parent self._letter = letter # Current letter self.word = word # Bool", "while node._parent: node = node._parent str += node._letter return str[::-1] # Return max", "isinstance(i, int): return self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices", "# Current depth (first letter is depth = 1) self._depth = parent._depth+1 else:", "= value else: raise TypeError(\"Node indices must be int or char\") def __eq__(self,", "attr) == getattr(other, attr) for attr in attrs): return all(self[i] == other[i] for", "other[i] for i in range(26)) return False def _update_parent(self): parent = self._parent if", "i in range(26)) return False def _update_parent(self): parent = self._parent if parent: if", "self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height # Calculates an index from", "str += node._letter return str[::-1] # Return max word length on node's path", "i): if isinstance(i, int): return self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise", "ord(letter.lower())-97 # Return word represented by node. def whichWord(self): node = self str", "(includes the current node) self._maxLength = self._depth self._update_parent() # Update height and maxLength", "whichWord(self): node = self str = node._letter while node._parent: node = node._parent str", "def _update_max_length(self): self._maxLength = self._depth+self._height # Calculates an index from a corresponding char", "__eq__(self, other): # if other check asserts that other != None if other:", "= value elif isinstance(i, str): self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node indices must", "corresponding char (e.g. 'a' = 0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97 # Return", "letter self.word = word # Bool (Yes or no) # Pointers to other", "in range(26)) return False def _update_parent(self): parent = self._parent if parent: if self._height+1", "self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must be int", "by node. def whichWord(self): node = self str = node._letter while node._parent: node", "is on (includes the current node) self._maxLength = self._depth self._update_parent() # Update height", "in attrs): return all(self[i] == other[i] for i in range(26)) return False def", "is depth = 1) self._depth = parent._depth+1 else: self._depth = 0 # Root", "path existing underneath this nodes # Longest string on whose path node is", "= self str = node._letter while node._parent: node = node._parent str += node._letter", "parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height # Calculates", "char (e.g. 'a' = 0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97 # Return word", "__init__(self, parent, letter, word): self._parent = parent # Pointer to parent self._letter =", "__repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) # Raises exception/Returns None on failure", "# Return word represented by node. def whichWord(self): node = self str =", "node = node._parent str += node._letter return str[::-1] # Return max word length", "= node._parent str += node._letter return str[::-1] # Return max word length on", "raise TypeError(\"Node indices must be int or char\") def __setitem__(self, i, value): if", "self._depth = parent._depth+1 else: self._depth = 0 # Root node self._height = 0", "parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height # Calculates an index from a corresponding", "__setitem__(self, i, value): if isinstance(i, int): self._pointers[i] = value elif isinstance(i, str): self._pointers[self._charToInt(i)]", "all(getattr(self, attr) == getattr(other, attr) for attr in attrs): return all(self[i] == other[i]", "Current letter self.word = word # Bool (Yes or no) # Pointers to", "False def _update_parent(self): parent = self._parent if parent: if self._height+1 > parent._height: parent._height", "parent = self._parent if parent: if self._height+1 > parent._height: parent._height = self._height+1 parent._update_max_length()", "+= node._letter return str[::-1] # Return max word length on node's path def", "must be int or char\") def __setitem__(self, i, value): if isinstance(i, int): self._pointers[i]", "indices must be int or char\") def __setitem__(self, i, value): if isinstance(i, int):", "iter(self._pointers) # Raises exception/Returns None on failure def __getitem__(self, i): if isinstance(i, int):", "<reponame>EdwardG5/tempCrossword<filename>nodeClass.py<gh_stars>0 class Node: def __init__(self, parent, letter, word): self._parent = parent # Pointer", "= [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr) == getattr(other, attr) for", "TypeError(\"Node indices must be int or char\") def __eq__(self, other): # if other", "nodes # Longest string on whose path node is on (includes the current", "self._depth self._update_parent() # Update height and maxLength of parent def __repr__(self): return f\"({self._letter},{self.word})\"", "char\") def __eq__(self, other): # if other check asserts that other != None", "string on whose path node is on (includes the current node) self._maxLength =", "range(26)] if parent: # Current depth (first letter is depth = 1) self._depth", "isinstance(i, str): self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node indices must be int or", "existing underneath this nodes # Longest string on whose path node is on", "\"_height\", \"_maxLength\"] if all(getattr(self, attr) == getattr(other, attr) for attr in attrs): return", "self.word = word # Bool (Yes or no) # Pointers to other child", "parent: # Current depth (first letter is depth = 1) self._depth = parent._depth+1", "if isinstance(i, int): self._pointers[i] = value elif isinstance(i, str): self._pointers[self._charToInt(i)] = value else:", "def __setitem__(self, i, value): if isinstance(i, int): self._pointers[i] = value elif isinstance(i, str):", "= 1) self._depth = parent._depth+1 else: self._depth = 0 # Root node self._height", "__iter__(self): return iter(self._pointers) # Raises exception/Returns None on failure def __getitem__(self, i): if", "[None for x in range(26)] if parent: # Current depth (first letter is", "return str[::-1] # Return max word length on node's path def maxLength(self): return", "value elif isinstance(i, str): self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node indices must be", "node._parent str += node._letter return str[::-1] # Return max word length on node's", "of parent def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) # Raises exception/Returns", "or no) # Pointers to other child nodes self._pointers = [None for x", "parent._height = self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height # Calculates an", "def _charToInt(letter): return ord(letter.lower())-97 # Return word represented by node. def whichWord(self): node", "failure def __getitem__(self, i): if isinstance(i, int): return self._pointers[i] elif isinstance(i, str): return", "# Longest path existing underneath this nodes # Longest string on whose path", "value): if isinstance(i, int): self._pointers[i] = value elif isinstance(i, str): self._pointers[self._charToInt(i)] = value", "attr in attrs): return all(self[i] == other[i] for i in range(26)) return False", "other child nodes self._pointers = [None for x in range(26)] if parent: #", "def __getitem__(self, i): if isinstance(i, int): return self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)]", "if isinstance(i, int): return self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node", "return self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must be", "must be int or char\") def __eq__(self, other): # if other check asserts", "Pointer to parent self._letter = letter # Current letter self.word = word #", "node self._height = 0 # Longest path existing underneath this nodes # Longest", "= 0 # Root node self._height = 0 # Longest path existing underneath", "depth = 1) self._depth = parent._depth+1 else: self._depth = 0 # Root node", "= letter # Current letter self.word = word # Bool (Yes or no)", "int or char\") def __setitem__(self, i, value): if isinstance(i, int): self._pointers[i] = value", "i, value): if isinstance(i, int): self._pointers[i] = value elif isinstance(i, str): self._pointers[self._charToInt(i)] =", "isinstance(i, int): self._pointers[i] = value elif isinstance(i, str): self._pointers[self._charToInt(i)] = value else: raise", "__getitem__(self, i): if isinstance(i, int): return self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)] else:", "word): self._parent = parent # Pointer to parent self._letter = letter # Current", "Longest string on whose path node is on (includes the current node) self._maxLength", "represented by node. def whichWord(self): node = self str = node._letter while node._parent:", "and maxLength of parent def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) #", "= 0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97 # Return word represented by node.", "self._depth+self._height # Calculates an index from a corresponding char (e.g. 'a' = 0)", "parent def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) # Raises exception/Returns None", "# Current letter self.word = word # Bool (Yes or no) # Pointers", "> parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height #", "else: raise TypeError(\"Node indices must be int or char\") def __setitem__(self, i, value):", "other: attrs = [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr) == getattr(other,", "for attr in attrs): return all(self[i] == other[i] for i in range(26)) return", "def _update_parent(self): parent = self._parent if parent: if self._height+1 > parent._height: parent._height =", "str = node._letter while node._parent: node = node._parent str += node._letter return str[::-1]", "[\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr) == getattr(other, attr) for attr", "\"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr) == getattr(other, attr) for attr in attrs):", "this nodes # Longest string on whose path node is on (includes the", "if parent: # Current depth (first letter is depth = 1) self._depth =", "def whichWord(self): node = self str = node._letter while node._parent: node = node._parent", "class Node: def __init__(self, parent, letter, word): self._parent = parent # Pointer to", "else: raise TypeError(\"Node indices must be int or char\") def __eq__(self, other): #", "in range(26)] if parent: # Current depth (first letter is depth = 1)", "node._letter while node._parent: node = node._parent str += node._letter return str[::-1] # Return", "Root node self._height = 0 # Longest path existing underneath this nodes #", "other != None if other: attrs = [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if", "path node is on (includes the current node) self._maxLength = self._depth self._update_parent() #", "(first letter is depth = 1) self._depth = parent._depth+1 else: self._depth = 0", "for x in range(26)] if parent: # Current depth (first letter is depth", "# Raises exception/Returns None on failure def __getitem__(self, i): if isinstance(i, int): return", "all(self[i] == other[i] for i in range(26)) return False def _update_parent(self): parent =", "char\") def __setitem__(self, i, value): if isinstance(i, int): self._pointers[i] = value elif isinstance(i,", "= self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height # Calculates an index", "raise TypeError(\"Node indices must be int or char\") def __eq__(self, other): # if", "str): self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node indices must be int or char\")", "0 # Longest path existing underneath this nodes # Longest string on whose", "None if other: attrs = [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr)", "def __iter__(self): return iter(self._pointers) # Raises exception/Returns None on failure def __getitem__(self, i):", "other check asserts that other != None if other: attrs = [\"_letter\", \"_depth\",", "that other != None if other: attrs = [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"]", "from a corresponding char (e.g. 'a' = 0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97", "str[::-1] # Return max word length on node's path def maxLength(self): return self._maxLength", "exception/Returns None on failure def __getitem__(self, i): if isinstance(i, int): return self._pointers[i] elif", "indices must be int or char\") def __eq__(self, other): # if other check", "a corresponding char (e.g. 'a' = 0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97 #", "# Root node self._height = 0 # Longest path existing underneath this nodes", "self._update_parent() # Update height and maxLength of parent def __repr__(self): return f\"({self._letter},{self.word})\" def", "parent self._letter = letter # Current letter self.word = word # Bool (Yes", "# Bool (Yes or no) # Pointers to other child nodes self._pointers =", "Longest path existing underneath this nodes # Longest string on whose path node", "self._letter = letter # Current letter self.word = word # Bool (Yes or", "attrs): return all(self[i] == other[i] for i in range(26)) return False def _update_parent(self):", "# Calculates an index from a corresponding char (e.g. 'a' = 0) @staticmethod", "an index from a corresponding char (e.g. 'a' = 0) @staticmethod def _charToInt(letter):", "asserts that other != None if other: attrs = [\"_letter\", \"_depth\", \"word\", \"_height\",", "word represented by node. def whichWord(self): node = self str = node._letter while", "def __eq__(self, other): # if other check asserts that other != None if", "child nodes self._pointers = [None for x in range(26)] if parent: # Current", "1) self._depth = parent._depth+1 else: self._depth = 0 # Root node self._height =", "self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must be int or char\") def __setitem__(self, i,", "underneath this nodes # Longest string on whose path node is on (includes", "def __init__(self, parent, letter, word): self._parent = parent # Pointer to parent self._letter", "Bool (Yes or no) # Pointers to other child nodes self._pointers = [None", "return ord(letter.lower())-97 # Return word represented by node. def whichWord(self): node = self", "return all(self[i] == other[i] for i in range(26)) return False def _update_parent(self): parent", "node. def whichWord(self): node = self str = node._letter while node._parent: node =", "parent: if self._height+1 > parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength", "Return word represented by node. def whichWord(self): node = self str = node._letter", "self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node indices must be int or char\") def", "index from a corresponding char (e.g. 'a' = 0) @staticmethod def _charToInt(letter): return", "on failure def __getitem__(self, i): if isinstance(i, int): return self._pointers[i] elif isinstance(i, str):", "height and maxLength of parent def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers)", "\"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr) == getattr(other, attr) for attr in", "\"_maxLength\"] if all(getattr(self, attr) == getattr(other, attr) for attr in attrs): return all(self[i]", "x in range(26)] if parent: # Current depth (first letter is depth =", "elif isinstance(i, str): self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node indices must be int", "0 # Root node self._height = 0 # Longest path existing underneath this", "!= None if other: attrs = [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self,", "# Pointer to parent self._letter = letter # Current letter self.word = word", "(e.g. 'a' = 0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97 # Return word represented", "self._height+1 > parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height", "= word # Bool (Yes or no) # Pointers to other child nodes", "= parent # Pointer to parent self._letter = letter # Current letter self.word", "Raises exception/Returns None on failure def __getitem__(self, i): if isinstance(i, int): return self._pointers[i]", "= self._depth+self._height # Calculates an index from a corresponding char (e.g. 'a' =", "= self._parent if parent: if self._height+1 > parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent()", "self._parent if parent: if self._height+1 > parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent() def", "be int or char\") def __setitem__(self, i, value): if isinstance(i, int): self._pointers[i] =", "current node) self._maxLength = self._depth self._update_parent() # Update height and maxLength of parent", "if other: attrs = [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr) ==", "# Pointers to other child nodes self._pointers = [None for x in range(26)]", "if parent: if self._height+1 > parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self):", "@staticmethod def _charToInt(letter): return ord(letter.lower())-97 # Return word represented by node. def whichWord(self):", "node) self._maxLength = self._depth self._update_parent() # Update height and maxLength of parent def", "on (includes the current node) self._maxLength = self._depth self._update_parent() # Update height and", "value else: raise TypeError(\"Node indices must be int or char\") def __eq__(self, other):", "parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength = self._depth+self._height # Calculates an index from a", "node._parent: node = node._parent str += node._letter return str[::-1] # Return max word", "return iter(self._pointers) # Raises exception/Returns None on failure def __getitem__(self, i): if isinstance(i,", "letter # Current letter self.word = word # Bool (Yes or no) #", "check asserts that other != None if other: attrs = [\"_letter\", \"_depth\", \"word\",", "self._maxLength = self._depth+self._height # Calculates an index from a corresponding char (e.g. 'a'", "= self._depth self._update_parent() # Update height and maxLength of parent def __repr__(self): return", "attrs = [\"_letter\", \"_depth\", \"word\", \"_height\", \"_maxLength\"] if all(getattr(self, attr) == getattr(other, attr)", "other): # if other check asserts that other != None if other: attrs", "int): self._pointers[i] = value elif isinstance(i, str): self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node", "0) @staticmethod def _charToInt(letter): return ord(letter.lower())-97 # Return word represented by node. def", "= parent._depth+1 else: self._depth = 0 # Root node self._height = 0 #", "if self._height+1 > parent._height: parent._height = self._height+1 parent._update_max_length() parent._update_parent() def _update_max_length(self): self._maxLength =", "(Yes or no) # Pointers to other child nodes self._pointers = [None for", "to other child nodes self._pointers = [None for x in range(26)] if parent:", "_update_parent(self): parent = self._parent if parent: if self._height+1 > parent._height: parent._height = self._height+1", "str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must be int or char\") def", "self._depth = 0 # Root node self._height = 0 # Longest path existing", "return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) # Raises exception/Returns None on failure def", "self._height = 0 # Longest path existing underneath this nodes # Longest string", "self._pointers[i] = value elif isinstance(i, str): self._pointers[self._charToInt(i)] = value else: raise TypeError(\"Node indices", "or char\") def __setitem__(self, i, value): if isinstance(i, int): self._pointers[i] = value elif", "_update_max_length(self): self._maxLength = self._depth+self._height # Calculates an index from a corresponding char (e.g.", "== getattr(other, attr) for attr in attrs): return all(self[i] == other[i] for i", "== other[i] for i in range(26)) return False def _update_parent(self): parent = self._parent", "for i in range(26)) return False def _update_parent(self): parent = self._parent if parent:", "Calculates an index from a corresponding char (e.g. 'a' = 0) @staticmethod def", "return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must be int or char\") def __setitem__(self,", "if other check asserts that other != None if other: attrs = [\"_letter\",", "Pointers to other child nodes self._pointers = [None for x in range(26)] if", "self._pointers = [None for x in range(26)] if parent: # Current depth (first", "None on failure def __getitem__(self, i): if isinstance(i, int): return self._pointers[i] elif isinstance(i,", "letter is depth = 1) self._depth = parent._depth+1 else: self._depth = 0 #", "def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) # Raises exception/Returns None on", "self str = node._letter while node._parent: node = node._parent str += node._letter return", "word # Bool (Yes or no) # Pointers to other child nodes self._pointers", "be int or char\") def __eq__(self, other): # if other check asserts that", "attr) for attr in attrs): return all(self[i] == other[i] for i in range(26))", "nodes self._pointers = [None for x in range(26)] if parent: # Current depth", "Node: def __init__(self, parent, letter, word): self._parent = parent # Pointer to parent", "getattr(other, attr) for attr in attrs): return all(self[i] == other[i] for i in", "return False def _update_parent(self): parent = self._parent if parent: if self._height+1 > parent._height:", "int): return self._pointers[i] elif isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must", "letter, word): self._parent = parent # Pointer to parent self._letter = letter #", "parent # Pointer to parent self._letter = letter # Current letter self.word =", "parent._depth+1 else: self._depth = 0 # Root node self._height = 0 # Longest", "whose path node is on (includes the current node) self._maxLength = self._depth self._update_parent()", "isinstance(i, str): return self._pointers[self._charToInt(i)] else: raise TypeError(\"Node indices must be int or char\")", "on whose path node is on (includes the current node) self._maxLength = self._depth", "int or char\") def __eq__(self, other): # if other check asserts that other", "maxLength of parent def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return iter(self._pointers) # Raises", "Update height and maxLength of parent def __repr__(self): return f\"({self._letter},{self.word})\" def __iter__(self): return", "or char\") def __eq__(self, other): # if other check asserts that other !=", "no) # Pointers to other child nodes self._pointers = [None for x in", "range(26)) return False def _update_parent(self): parent = self._parent if parent: if self._height+1 >" ]
[ "from dtool_lookup_server.utils import dataset_info_is_valid for key in INFO.keys(): info = INFO.copy() del info[key]", "info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils", "dataset_info_is_valid for key in INFO.keys(): info = INFO.copy() del info[key] assert not dataset_info_is_valid(info),", "to register a dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\",", "assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"uuid\"]", "helper function.\"\"\" # Minimum data required to register a dataset. INFO = {", "def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"type\"] = \"protodataset\" assert", "dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"base_uri\"] = \"file:///tmp/\"", "\"\"\"Test dtool_lookup_server.utils.dataset_info_is_valid helper function.\"\"\" # Minimum data required to register a dataset. INFO", "{ \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\":", "\"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid", "from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info)", "\"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941,", "required to register a dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\":", "{\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\":", "= INFO.copy() info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import", "\"readme\": {\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} },", "for key in INFO.keys(): info = INFO.copy() del info[key] assert not dataset_info_is_valid(info), key", "\"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\",", "\"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info)", "\"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\":", "in INFO.keys(): info = INFO.copy() del info[key] assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset():", "info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info", "not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"type\"]", "\"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\":", "import dataset_info_is_valid info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash():", "\"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\",", "[\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() assert", "\"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info():", "\"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\":", "\"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\",", "register a dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\":", "from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils", "dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import", "dtool_lookup_server.utils import dataset_info_is_valid for key in INFO.keys(): info = INFO.copy() del info[key] assert", "test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"base_uri\"] = \"file:///tmp/\" assert not", "assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for key in INFO.keys(): info", "import dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid", "INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\":", "INFO.copy() info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid", "test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for key in INFO.keys(): info = INFO.copy() del", "= \"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info =", "\"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\":", "1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import", "INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for key in INFO.keys():", "\"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {}", "dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def", "info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info", "= INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for key in", "dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for key in INFO.keys(): info =", "del info[key] assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info", "\"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\": {", "key in INFO.keys(): info = INFO.copy() del info[key] assert not dataset_info_is_valid(info), key def", "function.\"\"\" # Minimum data required to register a dataset. INFO = { \"uuid\":", "info = INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for key", "\"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5},", "\"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], } def", "def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert", "= INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import", "dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"type\"] =", "= INFO.copy() del info[key] assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import", "\"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy()", "\"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\",", "dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info) def", "\"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy()", "info = INFO.copy() del info[key] assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils", "\"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils", "\"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\":", "# Minimum data required to register a dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\",", "}, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"],", "\"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], }", "\"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy()", "} def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info) def", "\"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\":", "data required to register a dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\",", "dataset_info_is_valid info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from", "test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from", "= \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info =", "not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"uuid\"] =", "5}, \"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info =", "\"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from", "info = INFO.copy() info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils", "{} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\": [\"empty\",", "dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\"", "dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for", "test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"type\"] = \"protodataset\" assert not", "from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"base_uri\"] = \"file:///tmp/\" assert not dataset_info_is_valid(info)", "def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"base_uri\"] = \"file:///tmp/\" assert", "\"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\": \"olssont\", \"frozen_at\": 1536238185.881941, \"annotations\": {\"stars\": 5}, \"tags\":", "def test_dataset_info_returns_false_when_key_data_is_missing(): from dtool_lookup_server.utils import dataset_info_is_valid for key in INFO.keys(): info = INFO.copy()", "dataset_info_is_valid info = INFO.copy() info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid(): from", "INFO.keys(): info = INFO.copy() del info[key] assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from", "dtool_lookup_server.utils.dataset_info_is_valid helper function.\"\"\" # Minimum data required to register a dataset. INFO =", "{ \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test dataset\"},", "{\"stars\": 5}, \"tags\": [\"empty\", \"dataset\"], } def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info", "import dataset_info_is_valid info = INFO.copy() info[\"type\"] = \"protodataset\" assert not dataset_info_is_valid(info) def test_dataset_info_returns_false_if_uuid_looks_invalid():", "INFO.copy() del info[key] assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid", "dataset\"}, \"manifest\": { \"dtoolcore_version\": \"3.7.0\", \"hash_function\": \"md5sum_hexdigest\", \"items\": {} }, \"base_uri\": \"file:///tmp\", \"creator_username\":", "= { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\": {\"description\": \"test", "dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\", \"readme\":", "Minimum data required to register a dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\":", "def test_dataset_info_is_valid_returns_true_on_valid_info(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() assert dataset_info_is_valid(info) def test_dataset_info_returns_false_when_key_data_is_missing():", "import dataset_info_is_valid for key in INFO.keys(): info = INFO.copy() del info[key] assert not", "not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"base_uri\"] =", "INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid", "test_dataset_info_returns_false_if_uuid_looks_invalid(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not", "info[key] assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info =", "assert not dataset_info_is_valid(info), key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy()", "assert not dataset_info_is_valid(info) def test_dataset_info_is_valid_returns_false_if_base_uri_ends_with_slash(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"base_uri\"]", "a dataset. INFO = { \"uuid\": \"af6727bf-29c7-43dd-b42f-a5d7ede28337\", \"type\": \"dataset\", \"uri\": \"file:///tmp/a_dataset\", \"name\": \"my-dataset\",", "from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"uuid\"] = \"af6727bf-29c7-43dd-b42f\" assert not dataset_info_is_valid(info)", "key def test_dataset_info_returns_false_when_type_is_not_dataset(): from dtool_lookup_server.utils import dataset_info_is_valid info = INFO.copy() info[\"type\"] = \"protodataset\"" ]
[ "{'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}), ] list_display =", "FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance',", ": ['collapse']}), ] list_display = ('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets =", "('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}),", "('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' :", "'active_minutes', 'weight'], 'classes' : ['collapse']}), ] list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight')", "'classes' : ['collapse']}), ] list_display = ('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets", "Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}), ] list_display", "['steps', 'distance'], 'classes' : ['collapse']}), ] list_display = ('entry_date' , 'steps', 'distance') class", "fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes',", "[ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes'", "import admin from .models import Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date", "'classes' : ['collapse']}), ] list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight') # Register", "['collapse']}), ] list_display = ('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [", "import Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit", "admin from .models import Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information',", "'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data',", "list_display = ('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information',", "= ('entry_date', 'steps', 'distance', 'active_minutes', 'weight') # Register your models here. admin.site.register(Activity) admin.site.register(Fitbit)", "] list_display = ('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date", "Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}),", "('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}), ] list_display = ('entry_date' ,", "{'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}), ]", "from .models import Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields':", "= [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' :", "('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}), ] list_display =", "[ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}),", "'weight'], 'classes' : ['collapse']}), ] list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight') #", "] list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight') # Register your models here.", "= [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'],", "django.contrib import admin from .models import Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [", ", 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit", "'distance'], 'classes' : ['collapse']}), ] list_display = ('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin):", "('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}), ]", "class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps',", "['collapse']}), ] list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight') # Register your models", "Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}), ] list_display = ('entry_date',", "Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data',", "['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}), ] list_display", "['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}), ] list_display = ('entry_date'", "class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps',", "ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'],", "from django.contrib import admin from .models import Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets =", "{'fields': ['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}), ] list_display = ('entry_date', 'steps',", ".models import Activity, Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}),", "fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields': ['steps', 'distance'], 'classes'", "= ('entry_date' , 'steps', 'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields':", "'distance') class FitbitAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields':", ": ['collapse']}), ] list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight') # Register your", "['steps', 'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}), ] list_display = ('entry_date', 'steps', 'distance',", "list_display = ('entry_date', 'steps', 'distance', 'active_minutes', 'weight') # Register your models here. admin.site.register(Activity)", "Data', {'fields': ['steps', 'distance'], 'classes' : ['collapse']}), ] list_display = ('entry_date' , 'steps',", "{'fields': ['steps', 'distance'], 'classes' : ['collapse']}), ] list_display = ('entry_date' , 'steps', 'distance')", "'distance', 'active_minutes', 'weight'], 'classes' : ['collapse']}), ] list_display = ('entry_date', 'steps', 'distance', 'active_minutes',", "Fitbit class ActivityAdmin(admin.ModelAdmin): fieldsets = [ ('Date Information', {'fields': ['entry_date']}), ('Fitbit Data', {'fields':" ]
[]
[ "<gh_stars>1-10 # coding:utf-8 import numpy as np import tensorflow as tf from tensorflow.contrib", "x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1])", "import predictor from pathlib import Path from config import config as _cg def", "tensorflow.contrib import predictor from pathlib import Path from config import config as _cg", "import tensorflow as tf from tensorflow.contrib import predictor from pathlib import Path from", "import numpy as np import tensorflow as tf from tensorflow.contrib import predictor from", "[[2, 3], [5, 6], [7, 8]] features = {'input_x': np.array(data, dtype=np.float32)} features_batch =", "latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn if __name__ == '__main__':", "import config as _cg def predict(): # find the pb file subdirs =", "if x.is_dir() and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn =", "predict() data = [[1, 2]] # although single data, however, must be batch", "be batch format data_batch = [[2, 3], [5, 6], [7, 8]] features =", "in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model)", "6], [7, 8]] features = {'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)}", "np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions = predict_fn(features)['result_1'] predictions_batch = predict_fn(features_batch)['result_1']", "[5, 6], [7, 8]] features = {'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch,", "== '__main__': predict_fn = predict() data = [[1, 2]] # although single data,", "if __name__ == '__main__': predict_fn = predict() data = [[1, 2]] # although", "batch format data_batch = [[2, 3], [5, 6], [7, 8]] features = {'input_x':", "str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn if __name__ == '__main__': predict_fn =", "format data_batch = [[2, 3], [5, 6], [7, 8]] features = {'input_x': np.array(data,", "as np import tensorflow as tf from tensorflow.contrib import predictor from pathlib import", "data, however, must be batch format data_batch = [[2, 3], [5, 6], [7,", "__name__ == '__main__': predict_fn = predict() data = [[1, 2]] # although single", "numpy as np import tensorflow as tf from tensorflow.contrib import predictor from pathlib", "pathlib import Path from config import config as _cg def predict(): # find", "import Path from config import config as _cg def predict(): # find the", "predictor from pathlib import Path from config import config as _cg def predict():", "as _cg def predict(): # find the pb file subdirs = [x for", "data_batch = [[2, 3], [5, 6], [7, 8]] features = {'input_x': np.array(data, dtype=np.float32)}", "2]] # although single data, however, must be batch format data_batch = [[2,", "predict_fn = predict() data = [[1, 2]] # although single data, however, must", "= [[2, 3], [5, 6], [7, 8]] features = {'input_x': np.array(data, dtype=np.float32)} features_batch", "as tf from tensorflow.contrib import predictor from pathlib import Path from config import", "from pathlib import Path from config import config as _cg def predict(): #", "pb file subdirs = [x for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp'", "config import config as _cg def predict(): # find the pb file subdirs", "file subdirs = [x for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not", "features = {'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions = predict_fn(features)['result_1']", "= [x for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)]", "= str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn if __name__ == '__main__': predict_fn", "{'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions = predict_fn(features)['result_1'] predictions_batch =", "and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return", "np import tensorflow as tf from tensorflow.contrib import predictor from pathlib import Path", "[[1, 2]] # although single data, however, must be batch format data_batch =", "= [[1, 2]] # although single data, however, must be batch format data_batch", "however, must be batch format data_batch = [[2, 3], [5, 6], [7, 8]]", "str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn if __name__ ==", "the pb file subdirs = [x for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and", "8]] features = {'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions =", "[x for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest_model", "single data, however, must be batch format data_batch = [[2, 3], [5, 6],", "def predict(): # find the pb file subdirs = [x for x in", "_cg def predict(): # find the pb file subdirs = [x for x", "'__main__': predict_fn = predict() data = [[1, 2]] # although single data, however,", "for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest_model =", "coding:utf-8 import numpy as np import tensorflow as tf from tensorflow.contrib import predictor", "predict(): # find the pb file subdirs = [x for x in Path(_cg.infer_pb_path).iterdir()", "from config import config as _cg def predict(): # find the pb file", "tensorflow as tf from tensorflow.contrib import predictor from pathlib import Path from config", "data = [[1, 2]] # although single data, however, must be batch format", "features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions = predict_fn(features)['result_1'] predictions_batch = predict_fn(features_batch)['result_1'] print(predictions) print(predictions_batch)", "x.is_dir() and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model)", "3], [5, 6], [7, 8]] features = {'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x':", "not in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn if", "subdirs = [x for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not in", "Path(_cg.infer_pb_path).iterdir() if x.is_dir() and 'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn", "in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn if __name__", "'temp' not in str(x)] latest_model = str(sorted(subdirs)[-1]) print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn", "return predict_fn if __name__ == '__main__': predict_fn = predict() data = [[1, 2]]", "Path from config import config as _cg def predict(): # find the pb", "# find the pb file subdirs = [x for x in Path(_cg.infer_pb_path).iterdir() if", "must be batch format data_batch = [[2, 3], [5, 6], [7, 8]] features", "predict_fn if __name__ == '__main__': predict_fn = predict() data = [[1, 2]] #", "print(latest_model) predict_fn = predictor.from_saved_model(latest_model) return predict_fn if __name__ == '__main__': predict_fn = predict()", "config as _cg def predict(): # find the pb file subdirs = [x", "dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions = predict_fn(features)['result_1'] predictions_batch = predict_fn(features_batch)['result_1'] print(predictions)", "# coding:utf-8 import numpy as np import tensorflow as tf from tensorflow.contrib import", "[7, 8]] features = {'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions", "# although single data, however, must be batch format data_batch = [[2, 3],", "predict_fn = predictor.from_saved_model(latest_model) return predict_fn if __name__ == '__main__': predict_fn = predict() data", "predictor.from_saved_model(latest_model) return predict_fn if __name__ == '__main__': predict_fn = predict() data = [[1,", "= predictor.from_saved_model(latest_model) return predict_fn if __name__ == '__main__': predict_fn = predict() data =", "from tensorflow.contrib import predictor from pathlib import Path from config import config as", "tf from tensorflow.contrib import predictor from pathlib import Path from config import config", "although single data, however, must be batch format data_batch = [[2, 3], [5,", "= {'input_x': np.array(data, dtype=np.float32)} features_batch = {'input_x': np.array(data_batch, dtype=np.float32)} predictions = predict_fn(features)['result_1'] predictions_batch", "= predict() data = [[1, 2]] # although single data, however, must be", "find the pb file subdirs = [x for x in Path(_cg.infer_pb_path).iterdir() if x.is_dir()" ]
[ "that # they add up to a specific target. # # You may", "You may assume that each input would have exactly one solution, and you", "and you # may not use the same element twice. class Solution(object): def", "specific target. # # You may assume that each input would have exactly", "\"\"\" possible_values = {} for index, val in enumerate(nums): if val in possible_values:", "you # may not use the same element twice. class Solution(object): def twoSum(self,", "input would have exactly one solution, and you # may not use the", "Given an array of integers, return indices of the two numbers such that", "for index, val in enumerate(nums): if val in possible_values: return [possible_values[val], index] else:", "that each input would have exactly one solution, and you # may not", "they add up to a specific target. # # You may assume that", "same element twice. class Solution(object): def twoSum(self, nums, target): \"\"\" :type nums: List[int]", "# You may assume that each input would have exactly one solution, and", "assume that each input would have exactly one solution, and you # may", "twice. class Solution(object): def twoSum(self, nums, target): \"\"\" :type nums: List[int] :type target:", "nums, target): \"\"\" :type nums: List[int] :type target: int :rtype: List[int] \"\"\" possible_values", "solution, and you # may not use the same element twice. class Solution(object):", "the two numbers such that # they add up to a specific target.", "not use the same element twice. class Solution(object): def twoSum(self, nums, target): \"\"\"", "# # You may assume that each input would have exactly one solution,", ":type nums: List[int] :type target: int :rtype: List[int] \"\"\" possible_values = {} for", "each input would have exactly one solution, and you # may not use", "indices of the two numbers such that # they add up to a", "index, val in enumerate(nums): if val in possible_values: return [possible_values[val], index] else: possible_values[target-val]", "possible_values = {} for index, val in enumerate(nums): if val in possible_values: return", "a specific target. # # You may assume that each input would have", "val in enumerate(nums): if val in possible_values: return [possible_values[val], index] else: possible_values[target-val] =", "use the same element twice. class Solution(object): def twoSum(self, nums, target): \"\"\" :type", "class Solution(object): def twoSum(self, nums, target): \"\"\" :type nums: List[int] :type target: int", "List[int] :type target: int :rtype: List[int] \"\"\" possible_values = {} for index, val", "would have exactly one solution, and you # may not use the same", "target. # # You may assume that each input would have exactly one", "Solution(object): def twoSum(self, nums, target): \"\"\" :type nums: List[int] :type target: int :rtype:", "nums: List[int] :type target: int :rtype: List[int] \"\"\" possible_values = {} for index,", "def twoSum(self, nums, target): \"\"\" :type nums: List[int] :type target: int :rtype: List[int]", "an array of integers, return indices of the two numbers such that #", "may not use the same element twice. class Solution(object): def twoSum(self, nums, target):", "array of integers, return indices of the two numbers such that # they", "have exactly one solution, and you # may not use the same element", "element twice. class Solution(object): def twoSum(self, nums, target): \"\"\" :type nums: List[int] :type", "add up to a specific target. # # You may assume that each", "numbers such that # they add up to a specific target. # #", "one solution, and you # may not use the same element twice. class", "{} for index, val in enumerate(nums): if val in possible_values: return [possible_values[val], index]", "to a specific target. # # You may assume that each input would", "exactly one solution, and you # may not use the same element twice.", "of integers, return indices of the two numbers such that # they add", "up to a specific target. # # You may assume that each input", "integers, return indices of the two numbers such that # they add up", ":type target: int :rtype: List[int] \"\"\" possible_values = {} for index, val in", "# may not use the same element twice. class Solution(object): def twoSum(self, nums,", "such that # they add up to a specific target. # # You", "target: int :rtype: List[int] \"\"\" possible_values = {} for index, val in enumerate(nums):", "target): \"\"\" :type nums: List[int] :type target: int :rtype: List[int] \"\"\" possible_values =", "of the two numbers such that # they add up to a specific", "List[int] \"\"\" possible_values = {} for index, val in enumerate(nums): if val in", "= {} for index, val in enumerate(nums): if val in possible_values: return [possible_values[val],", "int :rtype: List[int] \"\"\" possible_values = {} for index, val in enumerate(nums): if", "in enumerate(nums): if val in possible_values: return [possible_values[val], index] else: possible_values[target-val] = index", "return indices of the two numbers such that # they add up to", "may assume that each input would have exactly one solution, and you #", "the same element twice. class Solution(object): def twoSum(self, nums, target): \"\"\" :type nums:", "# Given an array of integers, return indices of the two numbers such", "twoSum(self, nums, target): \"\"\" :type nums: List[int] :type target: int :rtype: List[int] \"\"\"", "\"\"\" :type nums: List[int] :type target: int :rtype: List[int] \"\"\" possible_values = {}", ":rtype: List[int] \"\"\" possible_values = {} for index, val in enumerate(nums): if val", "two numbers such that # they add up to a specific target. #", "# they add up to a specific target. # # You may assume" ]
[ "object, it is designed to use an independent # attribute of mmobject so", "a minimal header is created @type filename: string @param filename: name of the", "is complete!\" keyboardInterruptSent = True # Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not", "* (Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating number to make sure it gets", "_TableBaseHDU)) and hdu.data is not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data is", "data object @param data: data to write to the new file @type header:", "=\", hdu.name, _extver if 'data' in dir(hdu): if hdu.data is not None: hdu._file.seek(hdu._datLoc)", "getattr(self, cname+'s') del attr[indx] del self._arrays[indx] self._nfields -= 1 def change_attrib(self, col_name, attrib,", "of a Column's data as an array.\"\"\" indx = _get_index(self._coldefs.names, key) if (self._convert[indx]", "0 elif isinstance(_start, (int, long)): _start = _normalize(_start, naxis) else: raise IndexError, 'Illegal", "are \"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append HDU\", hdu.name, _extver hdu._new", "for j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if", "indx >= 0 and indx < naxis: if naxis > 1: return _SinglePoint(1,", "does not exist if _index is None: err_text = \"'%s' card does not", "extension in the file so we # must change the Primary header provided", "after the last non-commentary card. If =1, the card will be appended after", "max(map(len, VLdata)) val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max else:", "hdulist[1] _data = hdu.data except IndexError: raise IndexError, 'No data in this HDU.'", "if header is None: if 'header' in keys: header = keys['header'] hdu=_makehdu(data, header)", "\"\"\" result = \"\" element = 0 # go through the list twice,", "an ASCII column has no width, add one if tbtype == 'TableHDU': for", "the EXTEND keyword is there if there is extension if len(self) > 1:", "= ImageHDU # insert the require keywords PCOUNT and GCOUNT dim = `self.header['NAXIS']`", ", bool): valStr = '%20s' % `self.value`[0] elif isinstance(self.value , (int, long)): valStr", "by specifying attributes. All attributes except format can be optional. name: column name,", "if len(block) != _blockLen: raise IOError, 'Block length is not %d: %d' %", "user. _isInt = \"isinstance(val, (int, long))\" # Functions def _padLength(stringLen): \"\"\"Bytes needed to", "64 output = '' # do the value string valfmt = \"'%-s&'\" val", "verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all HDUs which are not", "_width[indx] - self._coldefs.starts[indx] if _trail < 0: raise ValueError, \"column `%s` ending point", "(self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = '' # string value should occupies at least", "at column 8).' raise ValueError, self._err_text, '\\n%s' % self._cardimage elif option in ['fix',", "(keyword in _keyNames): col = eval(_key.group('num')) if col <= _nfields and col >", "name == 'key': raise SyntaxError, 'keyword name cannot be reset.' elif name ==", "% self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append': for hdu in self:", "= eval(width) except: raise ValueError, 'Illegal format `%s` for ASCII table.' % input_format", "def change_name(self, col_name, new_name): \"\"\"Change a Column's name.\"\"\" if new_name != col_name and", "!= 8: if option in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card image is not", "close(self): \"\"\" Close the 'physical' FITS file. :Parameters: None :Returns: None \"\"\" self._ffo.close()", "name): \"\"\"Populate the attributes.\"\"\" cname = name[:-1] if cname in _commonNames: attr =", "end. key: keyword name value: keyword value (to be used for updating) comment:", "= self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment)", "'' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in [None, '']: commentStr = ''", "for the first case. bitpix: data type as expressed in FITS BITPIX value", "isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled data = 0, not the stored data", "file if hdus is None: hdus = [] # can take one HDU,", "name[:-1] if cname in _commonNames: attr = [''] * len(self) for i in", "indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal index %s' % indx", "of a keyword in the CardList. key: the keyword name (a string) or", "name value: keyword value (to be used for updating) comment: keyword comment (to", "_imagStr + ')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the equal sign", "Float32 self.data = num.array(raw_data, type=num.Float32) else: # floating point cases if self._ffile.memmap: self.data", "1 for j in range(groups,naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX']", "key): tmp = rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out = tmp out._parent =", "length table if isinstance(coldata, _VLF): for i in coldata: if not isinstance(i, chararray.CharArray):", "is not None: _data = self.data.copy() else: _data = None return self.__class__(data=_data, header=self.header.copy())", "there is extension if len(self) > 1: self.update_extend() def index_of(self, key): \"\"\"Get the", "'big' # scale by TSCAL and TZERO if _scale or _zero: for i", "+ 3 # if the card EXTEND exists, must be after it. try:", "'X': nbytes = ((repeat-1) / 8) + 1 # use an array, even", "_unique[_name] = [i] self.__dict__[attr] = _unique try: return self.__dict__[attr] except KeyError: raise AttributeError(attr)", "> 0: hdu.header['PCOUNT'] = _pcount # update TFORM for variable length columns for", "update the keywords of BSCALE and BZERO in self.header. This method should only", "fix_text = 'Fixed by inserting one as 0th HDU.' fix = \"self.insert(0, PrimaryHDU())\"", "begins with HIERARCH which allows keyword name longer than 8 characters. \"\"\" def", "and blanks. If there are two or more attribute names, they must be", "the right place (card %d).\" % insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d,", "self._cardimage # verify the comment (string), it is never fixable if result is", "for ASCII table cell with value = TNULL # this can be reset", "if isinstance(key, str): while 1: try: del self.ascard[key] self._mod = 1 except: return", "header: header to be used to populate the non-required keywords nrows: number of", "to pad the input stringLen to the next FITS block.\"\"\" return (_blockLen -", "input is a FITS_rec tmp = hdu.columns = input._coldefs else: # input is", "'number of array dimensions'), Card('NAXIS1', 0, 'length of dimension 1'), Card('NAXIS2', 0, 'length", "value = chararray.array(value, itemsize=1) else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max", "(dtype, width) def _get_index(nameList, key): \"\"\" Get the index of the key in", "first (0th) element must be a primary HDU if len(self) > 0 and", "\"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1] print results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory", "be written to the beginning of the file. If the file does not", "array=None): \"\"\"Construct a Column by specifying attributes. All attributes except format can be", "if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True: self._hdutype = GroupsHDU elif cards[0].value", "=\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)')", "all Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list of Columns, an", "os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data,", "if _data is None: raise IndexError, 'No data in this HDU.' if _gethdr:", "self.__dict__[attr] except KeyError: raise AttributeError(attr) def par(self, parName): \"\"\"Get the group parameter values.\"\"\"", "= header.copy() # # Check if the file already exists. If it does", "val = self.header[keywd] if not eval(test): err_text = \"'%s' card has invalid value", "slice %s, stop must be integer.' % input if _stop < _start: raise", "= hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart - _tbsize _pcount = hdu.data._heapsize + hdu.data._gap", "_ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied data is not the correct type.\" if", "with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign')", "header=None, **keys): \"\"\"Create a new FITS file using the supplied data/header. @type filename:", "self._byteoffset) / self._strides[0] return _Group(self, row) class _Group(rec.Record): \"\"\"One group of the random", "card) else: # try not to use CONTINUE if the string value can", "raw string. option: verification option, default=silentfix. \"\"\" # Only if the card image", "self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try: return self.__dict__[attr]", "_list if _count == 1: indx = _list.index(_key) elif _count == 0: raise", "hdu) except IndexError: raise IndexError, 'Extension %s is out of bound or not", "CardList. cards: A list of Cards, default=[]. \"\"\" # decide which kind of", "\"\"\"Get the _mm attribute.\"\"\" if attr == '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try:", "truncated.' output = output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self, val): \"\"\"Verify val to", "None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ') if real.group('sign') is not", "with the data to be written to the file. :Returns: None Notes -----", "to the original file if self._resize: oldName = self.__file.name oldMemmap = self.__file.memmap _name", "in Card._commentaryKeys: return result else: if option in ['fix', 'silentfix']: result = self._check('parse')", "if _index is None: err_text = \"'%s' card does not exist.\" % keywd", "if dtype == 'a': data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i], type=dtype)", "for extension specification. See L{getdata} for explanations/examples. @rtype: L{Header} object @return: header \"\"\"", "else: self._hdutype = _ValidHDU except: self._hdutype = _CorruptedHDU # populate the cardlist self.ascard", "name of the mktemp() output. \"\"\" dirName = os.path.dirname(input) if dirName != '':", "isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise ValueError, 'Illegal value %s' %", "None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out # if not a", "i in indx[1:]: result += self.field(i) return result def setpar(self, parName, value): \"\"\"Set", "+= c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where #", "= True # deprecated FALSE = False # deprecated _INDENT = \" \"", "r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC + ') *,", "card EXTEND exists, must be after it. try: _dum = self.header['EXTEND'] #_after +=", "dependence of the format (e.g. E-009 vs. E-09) elif isinstance(self.value, float): if self._valueModified:", "the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU", "construct a table HDU hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype ==", "changes of columns.\"\"\" _update = self.header.update _append = self.header.ascard.append _cols = self.columns _update('naxis1',", "def verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option = option.lower() if _option not", "= dtype if dtype == 'a': _nbytes = 1 else: _nbytes = num.getType(dtype).bytes", "None, _isInt, None, option, _err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\"", "import re, os, tempfile, exceptions import operator import __builtin__ import urllib import tempfile", "output def _checkText(self, val): \"\"\"Verify val to be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val)", "_VLF \"\"\" \"\"\" Do you mean: \"Profits\"? - Google Search, when asked for", "SyntaxError, \"%s is not a Card\" % str(card) def _pos_insert(self, card, before, after,", "corrupted HDU except ValueError: print 'Warning: Required keywords missing when trying to read", "chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset", "hdu def writeto(filename, data, header=None, **keys): \"\"\"Create a new FITS file using the", "header is None: raise ValueError, \"No header to setup HDU.\" # if the", "is not None: _comment = comment else: _comment = self.ascard[j].comment self.ascard[j] = Card(key,", "self._convert[indx] += bzero elif _bool: self._convert[indx] = num.equal(dummy, ord('T')) else: return dummy return", "keyword name %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _extractKey(self):", "dtype. The descriptor location will have a zero offset for all columns after", "in self: output += str(card) + '\\n' return output[:-1] # ----------------------------- HDU classes", "the RecArray, so we can deal with scaled columns. \"\"\" def __init__(self, input):", "HIERARCH which allows keyword name longer than 8 characters. \"\"\" def _verify(self, option='warn'):", "desp_output, dtype): \"\"\"Construct the P format column array, both the data descriptors and", "null: null value, corresponding to TNULL keyword bscale: bscale value, corresponding to TSCAL", "tbtype='BinTableHDU'): \"\"\"input: a list of Columns, an (table) HDU tbtype: which table HDU,", "is not None: # check format try: # legit FITS format? convert to", "exist and the provided header is not a Primary header, a default Primary", "= tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] ==", "positional arguments\" elif n_ext1 == 1: if n_ext2 == 0: ext = ext1[0]", "\"\"\"Construct a card from key, value, and (optionally) comment. Any specifed arguments, except", "_zero = 0 else: # flat the shape temporarily to save memory dims", "list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1] for att in list: if att not", "the name, type, length of header, data shape and type for each extension.", "# collect the pieces in a list tmp = input[xoffset:offset] list.append(tmp) if len(input)", "self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1 if option == 'ignore': return elif option", "copied.\"\"\" # touch the data, so it's defined (in the case of reading", "naxis < 1000: for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and", "pos is not None: test_pos = '_index '+ pos if not eval(test_pos): err_text", "KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data),", "self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self, start,", "return tmp # synchronize the sliced FITS_rec and its ._parent def __getitem__(self, key):", "Header: # Add 1 to .ascard to include the END card _nch80 =", "slice's start/stop in the regular range.\"\"\" def _normalize(indx, npts): if indx < -npts:", "for integer key only delete once else: del self.ascard[key] self._mod = 1 def", "+ 1 _end = self.starts[i] + _width - 1 attr[i] = _end -", "- _width[indx] - self._coldefs.starts[indx] if _trail < 0: raise ValueError, \"column `%s` ending", "# construct the Header object, using the cards. try: header = Header(CardList(_cardList, keylist=_keyList))", "try: del self.ascard[key] self._mod = 1 except: return # for integer key only", "verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val == 0', 0, option,", "'.gz': # Handle gzip files if mode in ['update', 'append']: raise \"Writing to", "/= bscale self.array = array def __repr__(self): text = '' for cname in", "input' if option == 'left': tmp = list(self.data) + b else: tmp =", "method will scale self.data and update the keywords of BSCALE and BZERO in", "None Notes ----- The file will be opened and the header appended to", "else: result = self.field(indx[0]).astype('f8') for i in indx[1:]: result += self.field(i) return result", "_value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is not correct", "_imagStr = imag.group('sign') + _imagStr _valStr = '(' + _realStr + ', '", "elif dtype == 'X': nbytes = ((repeat-1) / 8) + 1 # use", "= '' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\"", "coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes > 1: # do", "attrib can be one or more of the attributes listed in _commonNames. The", "_fits2rec[dtype]+`int(option)` # make sure option is integer else: _repeat = '' if repeat", "1 if verbose: print \"One or more data area is resized.\" break #", "bzeros for the parameters \"\"\" if isinstance(input, num.NumArray): _formats = '' _cols =", "else: # if the keyword EXTVER does not exist, default it to 1", "len(key): key = key + (slice(None),) * (naxis-len(key)) offset = 0 for i", "integers to Float32 self.data = num.array(raw_data, type=num.Float32) else: # floating point cases if", "in an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC", "def _verify(self, option='warn'): _err = _ErrList([], unit='Card') isValid = \"val in [8, 16,", "dat_format, bscale = _bscale, bzero = _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT']", "must be integer.' % input _stop = input.stop if _stop is None: _stop", "insert the require keywords PCOUNT and GCOUNT dim = `self.header['NAXIS']` if dim ==", "= dummy[i]*bscale+bzero # Boolean (logical) column if self._coldefs._recformats[indx]._dtype is _booltype: for i in", "hdus=[], file=None): \"\"\"Construct a HDUList object. hdus: Input, can be a list of", "result += _INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result += _dummy element += 1", "of # a field may not be the column right after the last", "(does not include bscale/bzero for now XXX) _bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix]", "class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the file is first opened. This is", "- last_end last_end = _end self._Formats = self.formats self._arrays[i] = input[i].array \"\"\" def", "if self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr):", "the first extension in the file so we # must change the Primary", "self.ascard: pairs.append((card.key, card.value)) return pairs def has_key(self, key): \"\"\"Check for existence of a", "for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for i in", "and 'extver' in keys: ext = ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s):", "locations of the blanks blank_loc = num.nonzero(arr == ' ')[0] offset = 0", "memmap and mode not in ['readonly', 'copyonwrite', 'update']: raise \"Memory mapping is not", "self._ffo.getfile().tell() - self._datLoc == self._size: # # the stream is full so pad", "the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied data is not the", "= [k.upper() for k in self.keys()] else: self._keylist = keylist # find out", "= 'big' else: if coldata._type.bytes > 1: if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder", "CardList, _Card_with_continue, Header, _Hierarch @group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU,", "output = keyStr + eqStr + valStr + commentStr # need this in", "array, using the (latest) scaled array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'}", "The argument `before' takes precedence over `after' if both specified. They can be", "r'(?P<valu>' # The <strg> regex is not correct for all cases, but #", "[None, '']: commentStr = '' else: commentStr = ' / ' + self.comment", "width = None else: width = eval(width) except: raise ValueError, 'Illegal format `%s`", "if the input is a list of Columns elif isinstance(input, (list, tuple)): for", "\"\"\"FITS image extension HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"Construct an image", "avoid infinite loop), # fix it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage']", "in range(len(self.parnames)): _name = self.parnames[i] if _name in _unique: _unique[_name].append(i) else: _unique[_name] =", "else: if option == 'old': _scale = self._bscale _zero = self._bzero elif option", "have wrong byteorder if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder = 'big' # In", "chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' #", "case _zero = min _scale = (max - min) / (2.**8 - 1)", "ValueError, \"Data is inconsistent with the format `%s`.\" % format else: raise ValueError,", "name, mode='copyonwrite', memmap=0): if mode not in _python_mode.keys(): raise \"Mode '%s' not recognized\"", "raise KeyError, 'Input argument has wrong data type.' if 'header' in extkeys: header", "image is not FITS standard (equal sign not at column 8).' raise ValueError,", "duplicate name. \"\"\" oldkey = oldkey.strip().upper() newkey = newkey.strip().upper() if newkey == 'CONTINUE':", "populate the new table definition keywords for i in range(len(_cols)): for cname in", "card in self: output += str(card) + '\\n' return output[:-1] # ----------------------------- HDU", "_get_index(self._coldefs.names, key) if (self._convert[indx] is None): # for X format if isinstance(self._coldefs._recformats[indx], _FormatX):", "isinstance(_ext, _Zero): try: hdu = hdulist[1] _data = hdu.data except IndexError: raise IndexError,", "if valu is not None: _comm = valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] =", "= hdu self.field = field # translation table for floating value string _fix_table", "numarray.strings as chararray import numarray.records as rec import numarray.objects as objects import numarray.memmap", "= _bzero)) data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE',", "else: raise IOError, \"File '%s' already exist.\" % name # make sure the", "bzero elif _bool: self._convert[indx] = num.equal(dummy, ord('T')) else: return dummy return self._convert[indx] def", "dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking", "dim = '' else: dim = str(dim) self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim)", "dat, hdr, 3) # update the 3rd extension >>> update(file, dat, 'sci', 2)", "self.mmobject.close() except: pass def info(self): \"\"\"Summarize the info of the HDU's in this", "None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt =", "dat, 3, header=hdr) # update the 3rd extension >>> update(file, dat, header=hdr, ext=5)", "it also delete the keylist item def keys(self): \"\"\"Return a list of all", "if bscale not in ['', None, 1]: array /= bscale self.array = array", "if self.has_key(key): j = self.ascard.index_of(key) if comment is not None: _comment = comment", "even if there are blank cards in front of END. \"\"\" if isinstance", "self._tbtype == 'TableHDU': last_end = 0 attr = [0] * len(self) for i", "_width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx in range(self._nfields): if (self._convert[indx] is not None):", "= real.group('digt').translate(_fix_table, ' ') if real.group('sign') is not None: _realStr = real.group('sign')+_realStr imag", "\"\"\" # no equal sign for commentary cards (i.e. part of the string", "self.hdu = hdu def __getitem__(self, key): dims = [] if not isinstance(key, tuple):", "if curDataSize + data.itemsize()*data._size > self._size: raise IOError, \"Supplied data will overflow the", "(C) 2004 Association of Universities for Research in Astronomy (AURA) Redistribution and use", "= self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE', 1) _bzero =", "FITS format? convert to record format (e.g. '3J'->'3i4') recfmt = _convert_format(format) except: try:", "!= 1 or bzero !=0): _scale = bscale _zero = bzero else: if", "header: the header associated with 'data', if None, an appropriate header will be", "2. # throw away -2^N _scale = (max - min) / (2.**(8*_type.bytes) -", "def _parse_tformat(tform): \"\"\"Parse the TFORM value into repeat, data type, and option.\"\"\" try:", "check format try: # legit FITS format? convert to record format (e.g. '3J'->'3i4')", "list of parameter names. bscale: BSCALE of the data bzero: BZERO of the", "else: if coldata._type.bytes > 1: if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder = 'big'", "the HDU's data part.\"\"\" self._file.seek(0, 2) return self._file.tell() - self._datLoc def _summary(self): return", "999\", 0, option, _err) naxis = self.header.get('NAXIS', 0) if naxis < 1000: for", "parbzeros = [None]*npars if bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix]", "if format is not None: # check format try: # legit FITS format?", "attribute.\"\"\" if isinstance(val, (str, int, long, float, complex, bool, Undefined)): if isinstance(val, str):", "{'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True # deprecated FALSE = False # deprecated", "= \"Filename: %s\\nNo. Name Type\"\\ \" Cards Dimensions Format\\n\" % _name for j", "and return its location. It returns None if equal sign is not present,", "super(CardList, self).__getitem__(_key) def __getslice__(self, start, end): _cards = super(CardList, self).__getslice__(start,end) result = CardList(_cards,", "self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] = value def _verify(self,", "but is at the beginning in Solaris. self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0)", "This is the top-level FITS object. When a FITS file is opened, a", "== 'data': self.__dict__[attr] = self.field('data') elif attr == '_unique': _unique = {} for", "exception. If the dtype of the input data does not match what is", "== 0: raise NameError, \"Key '%s' does not exist.\" % key else: #", "through the next level items, each of the next level # must present,", "class _AllHDU: \"\"\"Base class for all HDU (header data unit) classes.\"\"\" pass class", "input.header _nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go through", "!= col_name and new_name in self.names: raise ValueError, 'New name %s already exists.'", "gcount = input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale =", "\"\"\"Read one FITS HDU, data portions are not actually read here, but the", "exist, force to have duplicate name. \"\"\" oldkey = oldkey.strip().upper() newkey = newkey.strip().upper()", "self.__file def _readheader(self, cardList, keyList, blocks): \"\"\"Read blocks of header, and put each", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES", "\"\"\"Execute the verification with selected option.\"\"\" _text = err_text if not fixable: option", "\"%.16G\" % value if \".\" not in valueStr and \"E\" not in valueStr:", "_getKeyString(self): \"\"\"Locate the equal sign in the card image and return the string", "= self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu = self._check(option='parse') if name == 'value':", "# then delete from the end so as not to confuse the indexing.", "- _indx - 1 return _indx except: raise KeyError, 'Keyword %s not found.'", "if 'D' in _format: self._parent.field(indx).sub('E', 'D') # binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType):", "_scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if self.data._type != _type: self.data = num.array(num.around(self.data),", "self.columns.formats # if data is not touched yet, use header info. else: _shape", "return found def readall(self): \"\"\"Read data of all HDU's into memory.\"\"\" for i", "keyword argument(s): %s' % ext2 elif n_ext1 == 2: if n_ext2 == 0:", "self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE']", "self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim", "occurrence of _key in _list if _count == 1: indx = _list.index(_key) elif", "= '%20s' % _tmp else: valStr = '%20s' % self._valuestring elif isinstance(self.value, Undefined):", "used for appending @type header: L{Header} object or None @param header: the header", "writing # output = data.byteswapped() else: output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() -", "def update_tbhdu(self): \"\"\"Update all table HDU's for scaled fields.\"\"\" for hdu in self:", "for both ASCII and binary tables if _number or _str: if _number and", "TTYPE keyword format: column format, corresponding to TFORM keyword unit: column unit, corresponding", "before != None or after != None: _card = Card(key, value, comment) self.ascard._pos_insert(_card,", "HDU class. This class is the base class for the TableHDU, ImageHDU, and", "be one, i.e. # input arrays can be just list or tuple, not", "the heap of variable length array columns # this has to be done", "a Card to the CardList. pos: The position (index, keyword name will not", "= oldkey.strip().upper() newkey = newkey.strip().upper() if newkey == 'CONTINUE': raise ValueError, 'Can not", "populate the non-required keywords nrows: number of rows in the new table fill:", "# only do the scaling the first time and store it in _convert", "mmobject so if the HDUList object is created from files # other than", "takes precedence over `after' if both specified. They can be either a keyword", "values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:] = value else: indx = self._unique[parName] if", "the middle of the word. \"\"\" list = [] _nblanks = input.count(' ')", "not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale,", "num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8': # UInt8 case _zero", "self: block = block + repr(card) return block def __str__(self): \"\"\"Format a list", "_offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file,", "self.header['BZERO'] def update_header(self): \"\"\"Update the header keywords to agree with the data.\"\"\" old_naxis", "HIERARCH.' % val else: raise ValueError, 'keyword name %s is not a string'", "other, option='left'): if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b =", "_end - last_end last_end = _end self._width = _end else: raise KeyError, 'Attribute", "keyword value.\"\"\" return self.ascard[key].value def __setitem__ (self, key, value): \"\"\"Set a header keyword", "be used to reconstruct another kind of header. \"\"\" try: # have both", "FITS format spec to record format spec. Do the opposite if reverse =", "an image # extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if not", "of header. \"\"\" try: # have both SIMPLE and XTENSION to accomodate Extension", "shape and type for each extension. @type filename: string @param filename: input FITS", "NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A module for reading and writing", "Card('BITPIX', 8, 'array data type'), Card('NAXIS', 0, 'number of array dimensions'), ]) if", "hdu._datSpan = _size + _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() >", "None, i.e. an empty HDUList. file: The opened physical file associated with the", "after # the comment separator resulting in an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|))", "default is \"all\" which will print out all attributes. It forgives plurals and", "'E' -> 'f4' _formats = (_fmt+',') * npars data_fmt = '%s%s' % (`input.shape[1:]`,", "= self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple == 'T' and randomGroups == 'T':", "# check format try: # legit FITS format? convert to record format (e.g.", "card(s) with the name 'key'.\"\"\" # delete ALL cards with the same keyword", "name and a header. :Parameters: name : string The name of the file", "may not be used to endorse or promote products derived from this software", "memmap=memmap) hduList = HDUList(file=ffo) # read all HDU's while 1: try: hduList.append(ffo._readHDU()) except", "Returns 1 if found, otherwise, 0. key: keyword name. If given an index,", "= raw_data if self._bscale != 1: num.multiply(self.data, self._bscale, self.data) if self._bzero != 0:", "__getitem__(self, key): x = self.data[key] if isinstance(key, (int, long)): return x else: return", "= self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder =", "long, comment is truncated.' output = output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self, val):", "ValueError, 'New name %s already exists.' % new_name else: self.change_attrib(col_name, 'name', new_name) def", "self.header['NAXIS'] if dim == 0: dim = '' else: dim = str(dim) self.header.update('PCOUNT',", "elif isinstance(_stop, (int, long)): _stop = _normalize(_stop, naxis) else: raise IndexError, 'Illegal slice", "value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU class.\"\"\" def __init__(self, data=None, header=None,", "**extkeys): \"\"\"Get the header from an extension of a FITS file. @param filename:", "TypeError exception is raised. \"\"\" if self.writeComplete: raise IOError, \"The stream is closed", "VerifyError, '\\n'+x if (_option != \"silentfix\") and x: print 'Output verification result:' print", "card if nc > 0: _longstring = _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring", "= self.__file.tell() # beginning of the data area # data area size, including", "'extver' in keys: ext = ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword argument(s):", "r = rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs = self._coldefs f = FITS_rec(r) f._convert", "None: # if image, need to deal with byte order if isinstance(hdu, _ImageBaseHDU):", "x.find('Unfixable') != -1: raise VerifyError, '\\n'+x if (_option != \"silentfix\") and x: print", "self.starts[i] is '': self.starts[i] = last_end + 1 _end = self.starts[i] + _width", "the new data used for appending @type header: L{Header} object or None @param", "precise. # # Note that a non-greedy match is done for a string,", "= 'big' # In case the FITS_rec was created in a LittleEndian machine", "a valid value/comment string. # The valu group will return a match if", "verify the value, it may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is", "None: self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0] == ' ': useblanks = new_card._cardimage", "_number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')') # FSC commentary card string", "= _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size = self.size()", "pos.split() if _parse[0] in ['>=', '==']: insert_pos = eval(_parse[1]) # if the card", "'s'+str(size) + ',' strlen = strlen + size else: strfmt = '>' +", "result = self.field(parName) else: indx = self._unique[parName.lower()] if len(indx) == 1: result =", "into an image # extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if", "or XTENSION' for i in range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key =", "be anywhere. If the card does not exist, the new card will have", "bzero value, corresponding to TZERO keyword disp: display format, corresponding to TDISP keyword", "list. The key can be an integer or string. If integer, it is", "% _tmp else: valStr = '%20s' % self._valuestring elif isinstance(self.value, Undefined): valStr =", "isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars =", "already exists, it's value/comment will be updated. If it does not exist, a", "Column): raise \"Element %d in the ColDefs input is not a Column.\" %", "self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx] if _lead", "touched, use data info. if 'data' in dir(self): if self.data is None: _shape,", "after=None): \"\"\"Add a HISTORY card. value: History text to be added. before: [same", "the input data does not match what is expected by the header, a", "BSCALE and BZERO after scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else:", "a float in fixed or # scientific notation. One for FSC and one", "lambda x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array)) except: try: # this handles", "rest of the arguments are for extension specification. See L{getdata} for explanations/examples. @return:", "on this issue and only states that a # string should not end", "value to be returned. \"\"\" try: return self[key] except: return default def update(self,", "data=data, header=header, name=name) self._xtn = 'BINTABLE' hdr = self.header if hdr[0] != self._xtn:", "self.ascard[key] self._mod = 1 except: return # for integer key only delete once", "name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'BINTABLE' hdr", "is not a string' % val self.__dict__['comment'] = val def __setattr__(self, name, val):", "If is None, use the current data type. option: how to scale the", "eqStr + valStr) > Card.length: raise ValueError, \"The keyword %s with its value", "the scaled data = 0, not the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else:", "-1, -1): # locate last non-commentary card if self[i].key not in Card._commentaryKeys: break", "NAXIS1 should be 0, so we skip NAXIS1. if naxis > 1: size", "_fmt = _fits2rec[fits_fmt] # 'E' -> 'f4' _formats = (_fmt+',') * npars data_fmt", "output = hdu.data.byteswapped() else: output = hdu.data # Binary table byteswap elif isinstance(hdu,", "if pos is a string, it must be of the syntax of \">", "_nfields and col > 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data", "key): \"\"\"Delete an HDU from the HDUList, indexed by number or name.\"\"\" key", "else: self._hdutype = _ExtensionHDU else: self._hdutype = _ValidHDU except: self._hdutype = _CorruptedHDU #", "if mo is None: hdu._raw += block block = self.__file.read(_blockLen) if block ==", "now XXX) _bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code,", "range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True,", "return text[:-1] def copy(self): tmp = Column(format='I') # just use a throw-away format", "more header is resized.\" break # Data: if 'data' not in dir(hdu): continue", "# then try to conver it to a strings array array = chararray.array(array,", "\"old\", use the original BSCALE and BZERO values when the data was read/created.", "the comment if isinstance(self.value, str) and len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue output", "= 'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype else: _str = self._coldefs.formats[indx][0]", "def __iter__(self): return [self[i] for i in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an", "resized, update in place else: for hdu in self: if (verbose): try: _extver", "self.header['EXTNAME'] self.name = name def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute.\"\"\"", "hdu.columns = input._coldefs else: # input is a list of Columns tmp =", "an exception since there is no unique mapping. If there is a field", "the extension spec if n_ext1 > 2: raise ValueError, \"too many positional arguments\"", "to the next column\" % indx+1 if 'A' in _format: _pc = '%-'", "to the urllibrary urllib._urlopener.tempcache = {} # Initialize tempcache with an empty #", "\"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list class. This", "the string = TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type)", "_VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def __init__(self, input): \"\"\" input: a sequence of", "types # Boolean is also OK in this constructor _card = \"Card('%s', %s)\"", "except: try: # this handles ['abc'] and [['a','b','c']] # equally, beautiful! _func =", "elif valu.group('strg') != None: _val = re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') != None:", "ts own private attribute __file. \"\"\" if self.__file != None: if self.__file.memmap ==", "data._nfields self.data = data self.columns = data._coldefs self.update() elif data is None: pass", "['', None, 1]: array /= bscale self.array = array def __repr__(self): text =", "notice, this list of conditions and the following disclaimer. 2. Redistributions in binary", "axes else: offset *= _naxis if dims == []: dims = [1] npt", "if j != _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift the", "type of the variable array \"\"\" _offset = 0 data_output = _VLF([None]*len(input)) data_output._dtype", "if self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt) for fmt in self.formats] elif self._tbtype", "else: dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data", "- self._coldefs.starts[indx] if _trail < 0: raise ValueError, \"column `%s` ending point overlaps", "\"Inconsistent input data array: %s\" % array array._dtype = recfmt._dtype else: raise ValueError,", "FITS file name @param ext: The rest of the arguments are for extension", "= self._datSpan hdu._ffile = self._ffile hdu.name = self.name hdu._extver = self._extver hdu._new =", "self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) if (data is DELAYED): return self.data =", "# must change the Primary header provided into an image # extension header.", "i in range(len(val_list)): if i == 0: headstr = \"%-8s= \" % self.key", "piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct a", "self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\" if attr == '_mm': self.__dict__[attr]", "+= tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None,", "represents a Primary header, the header will be modified to an image extension", "attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1] for att", "overlaps to the previous column\" % indx+1 _trail = _loc[indx+1] - _width[indx] -", "must have CONTINUE cards after the first card.' if not isinstance(_card.value, str): raise", "raise IndexError, 'Extension %s is out of bound or not found.' % key", "self._blanks = 0 self.count_blanks() def __getitem__(self, key): \"\"\"Get a Card by indexing or", "bscale, self._convert[indx]) if _zero: self._convert[indx] += bzero elif _bool: self._convert[indx] = num.equal(dummy, ord('T'))", "expand (as C/Python does). for i in range(len(dummy)): x = _fmt % dummy[i]", "self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu = self._check(option='parse') if name ==", "(verbose): print \"open a temp file\", _name for hdu in self: (hdu._hdrLoc, hdu._datLoc,", "DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "start/stop in the regular range.\"\"\" def _normalize(indx, npts): if indx < -npts: indx", "# go through the list twice, first time print out all top level", "npts return indx _start = input.start if _start is None: _start = 0", "import tempfile import gzip import zipfile import numarray as num import numarray.generic as", "(b) When you *refer* to a field (presumably with the field method), it", "# equally, beautiful! _func = lambda x: chararray.array(x, itemsize=1) array = _VLF(map(_func, array))", "= _Card_with_continue output = self._breakup_strings() else: print 'card is too long, comment is", "desc = self._parent.field(indx) desc[:] = 0 # reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0]", "NumArray Data to stream to the file. :Returns: writeComplete : integer Flag that", "output_verify='exception', verbose=0): \"\"\"Force a write of the HDUList back to the file (for", "\"flush for '%s' mode is not supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if", "tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else:", "def _normalize_slice(input, naxis): \"\"\"Set the slice's start/stop in the regular range.\"\"\" def _normalize(indx,", "dir(hdu): continue if hdu.data is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes", "'value': if valu is None: raise ValueError, \"Unparsable card, fix it first with", "0: self.__file.write(_padLength(_size)*'\\0') # flush, to make sure the content is written self.__file.flush() #", "strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT',", "if hdu.data is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes)", "specified extension with the input data/header. @type filename: string @param filename: name of", "a convenience method to provide a user easier output interface if only one", "cname in _commonNames: value = eval(cname) # get the argument's value keyword =", "hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if", "if _name == _key: # if only specify extname, can only have one", "self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims) else: raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder", "_WholeLine(naxis, 0) else: if indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step,", "Do the first card here, instead of in the respective HDU classes, #", "value '%s'.\" % (keywd, val) fix_text = \"Fixed by setting a new value", "'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format' elif dtype+option in", "header, a TypeError exception is raised. \"\"\" if self.writeComplete: raise IOError, \"The stream", "comment else: _comment = self.ascard[j].comment self.ascard[j] = Card(key, value, _comment) elif before !=", "\"> n\", # where n is an int if isinstance(pos, str): _parse =", "and j == 0: continue _shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self,", "the content is written self.__file.flush() return loc def writeHDUdata(self, hdu): \"\"\"Write FITS HDU", "= FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows))", "in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize the HDU:", "data=None, header=None, name=None): \"\"\" header: header to be used data: data to be", "the old table definition keywords. Mark them first, # then delete from the", "== 'comment': self.__dict__['comment'] = '' if valu is not None: _comm = valu.group('comm')", "bool is also int elif isinstance(self.value , bool): valStr = '%20s' % `self.value`[0]", "= self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format = '[' for j in range(_ncols): _format", "' '*_trail # not using numarray.strings's num2char because the # result is not", "as CONTINUE cards may span across blocks. \"\"\" if len(block) != _blockLen: raise", "is resized, need to write it to a tmp file, # delete the", "super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) # update the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards())", "the name 'key'.\"\"\" # delete ALL cards with the same keyword name if", "the equal sign. If there is no equal sign, return the string before", "del self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update the header keywords to agree with", "# not using self.key eqStr = '' if self.__dict__.has_key('value'): valStr = str(self.value) #", "= self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in", "1, will fill all cells with zeros or blanks if = 0, copy", "hdu._getsize(hdu._raw) # get extname and extver if hdu.name == '': hdu.name, hdu._extver =", "1 # use an array, even if it is only ONE u1 (i.e.", "value.type() == self._dtype: pass elif isinstance(value, chararray.CharArray) and value.itemsize() == 1: pass elif", "_val += eval(imag.group('sign') + _idigt)*1j else: _val = UNDEFINED self.__dict__['value'] = _val if", "= num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale by TSCAL and TZERO", "fmt.lstrip()[0] == 'A' and option != '': output_format = _fits2rec[dtype]+`int(option)` # make sure", "= self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1 def __delslice__(self, i, j): \"\"\"Delete a", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "for P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for", "= npts return indx _start = input.start if _start is None: _start =", "to the file. :Returns: writeComplete : integer Flag that when true indicates that", "_fits2rec[fits_fmt] # 'E' -> 'f4' _formats = (_fmt+',') * npars data_fmt = '%s%s'", "\"%-10s %-11s %5d %-12s %s\" % \\ (self.name, type, len(self.header.ascard), _dims, _format) def", "is only ONE u1 (i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx =", "_index) fix_text = \"Fixed by moving it to the right place (card %d).\"", "= 1 def copy(self): \"\"\"Make a copy of the Header.\"\"\" tmp = Header(self.ascard.copy())", "\"\"\"A class to use with urlretrieve to allow IOError exceptions to be raised", "__builtin__ import urllib import tempfile import gzip import zipfile import numarray as num", "hdu): \"\"\"Write FITS HDU header part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END') blocks =", "Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS', 0, 'number of table fields') ]) if", "'NAXIS', or 'END' cards. A corrupted HDU usually means that the data size", "as Memmap from string import maketrans import copy import signal import threading #", "hdulist = open(filename, mode=mode) n_ext1 = len(ext1) n_ext2 = len(ext2) keys = ext2.keys()", "change_format(self, col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\" Get", "self._extver hdu._new = 0 hdu.header._mod = 0 hdu.header.ascard._mod = 0 except: pass return", "_convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\" Get the table data from input", "1 comm_list = self._words_group(comm, comm_len) for i in comm_list: commstr = \"CONTINUE '&'", "%d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = '' return \"%-10s %-11s %5d", "valu.group('bool')=='T' elif valu.group('strg') != None: _val = re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') !=", "by error messages generated by verifications at different class levels. \"\"\" def __init__(self,", "_err = _ErrList([], unit='Card') isValid = \"val in [8, 16, 32, 64, -32,", "a copy of the table HDU, both header and data are copied.\"\"\" #", "card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError,", "self.verify(option=output_verify) # check if the output file already exists if os.path.exists(name): if clobber:", "is considered # to be more than one 80-char \"physical\" cards. _max =", "not match what is expected by the header, a TypeError exception is raised.", "an integer, a string, or a tuple of (string, integer). \"\"\" if isinstance(key,", "'Illegal keyword name %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def", "self.__dict__.has_key('value'): valStr = str(self.value) # put all parts together output = keyStr +", "self.spans[i] = _end - last_end last_end = _end self._Formats = self.formats self._arrays[i] =", "instead of appending after these blank cards, so the total space will not", "= tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if _scale: _arr *= bscale if _zero:", "attr): \"\"\"Get the data attribute.\"\"\" if attr == 'section': return Section(self) elif attr", "data as an array.\"\"\" indx = _get_index(self._coldefs.names, key) if (self._convert[indx] is None): #", "['update', 'append']: raise \"Writing to gzipped fits files is not supported\" zfile =", "val == 1\", 1, option, _err) return _err # 0.8.8 def _iswholeline(indx, naxis):", "= value def _verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option) # Verify location and", "= list(other.data) else: raise TypeError, 'Wrong type of input' if option == 'left':", "not support ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if", "and Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage, see", "format else: raise ValueError, \"Must specify format to construct Column\" # scale the", "keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and isinstance(self, BinTableHDU): val = _cols._recformats[i]", "is not NDarray, make it to be one, i.e. # input arrays can", "_data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize # comment", "image (80 columns). If the card image is longer than 80, assume it", "type. option: how to scale the data: if \"old\", use the original BSCALE", "Data to stream to the file. :Returns: writeComplete : integer Flag that when", "it will be split in the middle of the word. \"\"\" list =", "= '' # string value should occupies at least 8 columns, unless it", "may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None or self.key", "for i in range(_nfields)] # definition dictionaries for each field for _card in", "rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs = self._coldefs f = FITS_rec(r) f._convert = copy.deepcopy(self._convert)", "_err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the file is first opened. This", "+= desc[:,0].sum()*_dtype.bytes # conversion for both ASCII and binary tables if _number or", "0, 'number of table fields') ]) if header is not None: # Make", "type=recfmt._dtype) array = _VLF(map(_func, array)) except: try: # this handles ['abc'] and [['a','b','c']]", "err_text = \"HDUList's element %s is not an extension HDU.\" % `i` _text", "input for HDUList.\" for hdu in hdus: if not isinstance(hdu, _AllHDU): raise \"Element", "to the HDUList, indexed by number or name.\"\"\" _key = self.index_of(key) if isinstance(hdu,", "or corrupted HDU except ValueError: print 'Warning: Required keywords missing when trying to", "comm_list: commstr = \"CONTINUE '&' / \" + commfmt % i output =", "image with the specified option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable']", "return the string after the equal sign. If there is no equal sign,", "_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when", "_count == 0: raise NameError, \"Key '%s' does not exist.\" % key else:", "+ ']' _dims = \"%dR x %dC\" % (_nrows, _ncols) return \"%-10s %-11s", "% ext2 else: if 'extname' in keys: if 'extver' in keys: ext =", "a FITS file. This includes the name, type, length of header, data shape", "part of a table HDU's data part. This is a layer over the", "etc.). If is None, use the current data type. option: how to scale", "'silentfix': _text += ' ' + fix_text return _text def verify (self, option='warn'):", "ASCIITNULL = 0 # value for ASCII table cell with value = TNULL", "delete once else: del self.ascard[key] self._mod = 1 def __str__(self): return self.ascard.__str__() def", "[] blocks = self._raw if (len(blocks) % _blockLen) != 0: raise IOError, 'Header", "1 if not _zero: bzero = 0 return (_str, _bool, _number, _scale, _zero,", "_where = _keyList[_start:].index('CONTINUE') + _start for nc in range(1, _max+1): if _where+nc >=", "TypeError, \"table data has incorrect type\" # set extension name if not name", "TSCAL and TZERO if _scale or _zero: for i in range(len(self._parent)): dummy[i][:] =", "BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "must present, even it has nothing. for item in self: if isinstance(item, _ErrList):", "If the provided data would cause the stream to overflow, an IOError exception", "if keyStr.strip() in Card._commentaryKeys: # not using self.key eqStr = '' if self.__dict__.has_key('value'):", "'D':'D24.16'} self._tbtype = tbtype if isinstance(input, ColDefs): self.data = [col.copy() for col in", "return both the location and the size of the data area return loc,", "arguments implies the primary header >>> getdata('in.fits') By extension number: >>> getdata('in.fits', 0)", "type to be created (BinTableHDU or TableHDU) \"\"\" # construct a table HDU", "ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out # if not a slice, do this", "update(file, dat, 'sci', 2) # update the 2nd SCI extension >>> update(file, dat,", "# # Note that a non-greedy match is done for a string, #", "\"\"\"Make a copy of the HDU, both header and data are copied.\"\"\" if", "= attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1] for", "by the keyword name.\"\"\" if isinstance (value, Card): _key = self.index_of(key) # only", "'parse': # check the value only, no need to check key and comment", "= bzero)) self._coldefs = ColDefs(_cols) self.parnames = [i.lower() for i in parnames] tmp", "new card will be placed. The argument `before' takes precedence over `after' if", "cause the stream to overflow, an IOError exception is raised and the data", "files and manipulating their contents. A module for reading and writing Flexible Image", "def _use_blanks(self, how_many): if self._blanks > 0: for i in range(min(self._blanks, how_many)): del", "= _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims) else:", "None, None, None, option, _err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension", "# do the _parent too, otherwise the _parent # of a scaled column", "# reset the resize attributes after updating self._resize = 0 for hdu in", "the floating number to make sure it gets the decimal point.\"\"\" valueStr =", "UNDEFINED self.__dict__['value'] = _val if '_valuestring' not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if", "a new file. name: output FITS file name to be written to. output_verify:", "= _pnames self.__dict__[attr] = _coldefs elif attr == '_theap': self.__dict__[attr] = 0 try:", "'big': coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes > 1: # do the _parent", "SCI extension >>> update(file, dat, 3, header=hdr) # update the 3rd extension >>>", "data self.columns = data._coldefs self.update() elif data is None: pass else: raise TypeError,", "maximum of the data to scale. The option will be overwritten by any", "self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan hdu._ffile = self._ffile", "a card's keyword in the header. oldkey: old keyword, can be a name", "Header.\"\"\" tmp = Header(self.ascard.copy()) # also copy the class tmp._hdutype = self._hdutype return", "them first, # then delete from the end so as not to confuse", "_list.append(i) for i in _list: del self.header.ascard[i] del _list # populate the new", "self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for", "length array except: if isinstance(recfmt, _FormatP): try: _func = lambda x: num.array(x, type=recfmt._dtype)", "= self._parent.field(indx) desc[:] = 0 # reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0] =", "(int, slice)): return key elif isinstance(key, tuple): _key = key[0] _ver = key[1]", "and store it in _convert self._convert[indx] = num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale,", "Card after which the new card will be placed. default=None. \"\"\" if self.has_key(key):", "One for FSC and one for non-FSC (NFSC) format: # NFSC allows lower", "FITS file. @param filename: input FITS file name @type: string @param ext: The", "keyword %s with its value is too long.\" % self.key if len(output) <=", "'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value into", "to make sure it gets the decimal point.\"\"\" valueStr = \"%.16G\" % value", "(NFSC) format: # NFSC allows lower case of DE for exponent, allows space", "'Fixed card to be FITS standard.: %s' % self.key else: self.__dict__['_err_text'] = 'Card", "eval(cname) # get the argument's value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self,", "each field for ASCII table if self._coldefs._tbtype == 'TableHDU': _loc = [1] _width", "if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`,", "'Warning: Required keywords missing when trying to read HDU #%d.\\n There may be", "x else: return ColDefs(x) def __len__(self): return len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)`", "data. For details of the FITS standard, see the NASA/Science Office of Standards", "IOError, (errcode, errmsg, url) urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener", "= self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr):", "= val def _setcomment(self, val): \"\"\"Set the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else:", "is the data part of a table HDU's data part. This is a", "if not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if", "# delete ALL cards with the same keyword name if isinstance(key, str): while", "just a view) of the input header, since it # may get modified.", "_convert self._convert[indx] = num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx]", "self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _summary(self):", "dict = [{} for i in range(_nfields)] # definition dictionaries for each field", "one-to-one correspondence when updating the list(s). # Use lists, instead of dictionaries so", "the minimum and maximum of the data to scale. The option will be", "record array class. FITS record array is the data part of a table", "== 2', 2, option, _err) self.req_cards('BITPIX', None, 'val == 8', 8, option, _err)", "keyword argument: %s' % ext2 return hdulist, ext def getheader(filename, *ext, **extkeys): \"\"\"Get", "longer than strlen, break in the middle if offset <= xoffset: offset =", "supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append': for hdu in", "bzero = _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames", "BinTableHDU classes. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None, None,", "0\", 0, option, _err) _after = self.header['NAXIS'] + 3 # if the card", "_shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format = `self.data.type()`", "_card.value).rstrip() # drop the ending \"&\" if _val[-1] == '&': _val = _val[:-1]", "output \"descriptor\" array of data type 2Int32 dtype: data type of the variable", "- _tbsize _pcount = hdu.data._heapsize + hdu.data._gap if _pcount > 0: hdu.header['PCOUNT'] =", "exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an array, record array (i.e.", "update(file, dat, 3) # update the 3rd extension >>> update(file, dat, hdr, 3)", "str): self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise ValueError, 'Illegal value %s' % str(val)", "16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header)", "check for one word longer than strlen, break in the middle if offset", "data is None: pass else: raise TypeError, \"table data has incorrect type\" #", "first, # then delete from the end so as not to confuse the", "== 'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate both the ASCII table and", "key[0] _ver = key[1] else: _key = key _ver = None if not", "data will be scaled and is therefore not very usable after the call.", "% \\ (self.name, type, len(self.header.ascard), _shape, _format, _gcount) def scale(self, type=None, option=\"old\", bscale=1,", "parent data, just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] =", "isinstance(col, Column): raise \"Element %d in the ColDefs input is not a Column.\"", "KeyError: raise AttributeError(attr) def par(self, parName): \"\"\"Get the group parameter values.\"\"\" if isinstance(parName,", "integer key only delete once else: del self.ascard[key] self._mod = 1 def __str__(self):", "_tmp = '(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')' valStr", "of the random group data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self, input, row) def", "'XTENSION': xtension = cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype = TableHDU elif xtension", "if _keyList[_start:].count('CONTINUE') == 0: break # construct the Header object, using the cards.", "= [_get_index(self.names, key) for key in other] indx=range(len(self)) for x in _other: indx.remove(x)", "a default Primary HDU will be inserted at the beginning of the file", "empty HDUList. file: The opened physical file associated with the HDUList. Default =", "calls the close method of the _File class. It has this two-tier calls", "else: if isinstance(value, (list, tuple)) and len(indx) == len(value): for i in range(len(indx)):", "characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks for a valid", "to write to the new file @type header: L{Header} object or None @param", "0: self.data += -_zero # 0.9.6.3 to avoid out of range error for", "self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] =", "\"\"\"Verify the card image with the specified option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text']", "in self.ascard: pairs.append((card.key, card.value)) return pairs def has_key(self, key): \"\"\"Check for existence of", "original array if bzero not in ['', None, 0] or bscale not in", "to scale the data # bscale and bzero takes priority if (bscale !=", "_pos, 'val == True', True, option, _err) return _err # --------------------------Table related code----------------------------------", "= `self.header['NAXIS']` if dim == '0': dim = '' # set extension name", "bits \"\"\" output[...] = 0 # reset the output nbytes = ((nx-1) /", "read HDU #%d.\\n There may be extra bytes after the last HDU or", "optional. @keyword clobber: (optional) if True and if filename already exists, it will", "del self['TFIELDS'] for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i", "self) def index_of(self, key, backward=0): \"\"\"Get the index of a keyword in the", "of appending after these blank cards, so the total space will not increase", "it will return # None, meaning the keyword is undefined. The comment field", "for i in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else:", "== 'key': raise SyntaxError, 'keyword name cannot be reset.' elif name == 'value':", "header. Strip cards like SIMPLE, BITPIX, etc. so the rest of the header", "_zero = self._bzero elif option == 'minmax': if isinstance(_type, num.FloatingType): _scale = 1", "# use the largest column shape as the shape of the record if", "default=[]. \"\"\" # decide which kind of header it belongs to try: if", "temporarily to save memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max =", "as a regular card and use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i", "_err def append(self, hdu): \"\"\"Append a new HDU to the HDUList.\"\"\" if isinstance(hdu,", "'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip() # drop the ending \"&\" if _val[-1]", "Search, when asked for \"PyFITS\" \"\"\" import re, os, tempfile, exceptions import operator", "0) if naxis > 0: simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple", "_data._byteorder = 'big' # pass datLoc, for P format _data._heapoffset = hdu._theap +", "class Section: \"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu = hdu def __getitem__(self, key):", "the card image.\"\"\" head = self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else:", "', ' _format = _format[:-2] + ']' _dims = \"%dR x %dC\" %", "ImageHDU, and BinTableHDU classes. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc =", "Cards are corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or 'END' cards. A", "is in order, in case of required cards in wrong order. if isinstance(self,", "value, _comment) elif before != None or after != None: _card = Card(key,", "extkeys: header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename, 'update',", "keyword name @param ext: The rest of the arguments are for extension specification.", "header will be modified to an image extension header and appended to the", "this is a single treaded application threadName = threading.currentThread() singleThread = (threading.activeCount() ==", "comment (string), it is never fixable if result is not None: _str =", "= [0] * len(self) for i in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if", "value/comment into CONTINUE cards. This is a primitive implementation, it will put the", "1 self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\" Return the size (in bytes) of", "corresponding to TSCAL keyword bzero: bzero value, corresponding to TZERO keyword disp: display", "data from input, undefined cells will still be filled with zeros/blanks. tbtype: table", "return result class _Verify: \"\"\"Shared methods for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\",", "comment is truncated.' output = output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self, val): \"\"\"Verify", "hdu._datLoc _data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize #", "_shift - hdu.data._gap _size = _size + _shift # pad the FITS data", "\"append HDU\", hdu.name, _extver hdu._new = 0 elif self.__file.mode == 'update': if not", "extra arguments implies the primary header >>> getdata('in.fits') By extension number: >>> getdata('in.fits',", "dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec to record format spec.", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN", "for i in range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def new_table (input, header=None,", "the __setattr__ key case.\"\"\" if isinstance(val, str): val = val.strip() if len(val) <=", "+ _val elif name == 'comment': _comm = _card.comment if isinstance(_comm, str) and", "accomodate both string and non-string types # Boolean is also OK in this", "=\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo", "array is the data part of a table HDU's data part. This is", "\"HDUList's 0th element is not a primary HDU.\" fix_text = 'Fixed by inserting", "length table.\"\"\" pass # TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition", "self.header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount + size) / 8", "self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key # verify", "PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY", "'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate the starting point and width of each", "if not isinstance(coldata, chararray.CharArray): # only swap unswapped # deal with var length", "if attrib.strip().lower() in ['all', '']: list = _commonNames else: list = attrib.split(',') for", "if 'A' in _format: _pc = '%-' else: _pc = '%' _fmt =", "is extension if len(self) > 1: self.update_extend() def index_of(self, key): \"\"\"Get the index", "# and Corrupted cases del self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS']", "'_valueModified' not in self.__dict__: self.__dict__['_valueModified'] = 0 elif name == 'comment': self.__dict__['comment'] =", "elements in the object array are consistent. if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)):", "table extension' ''' def format(self): strfmt, strlen = '', 0 for j in", "def verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all HDUs which are", "_scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx] += bzero elif _bool: self._convert[indx] =", "If there is no equal sign, return the string before column 9. \"\"\"", "the HDU is accessed. \"\"\" def _getname(self): \"\"\"Get the extname and extver from", "new_name != col_name and new_name in self.names: raise ValueError, 'New name %s already", "# initialize/reset attributes to be used in \"update/append\" mode # CardList needs its", "= 'big' output = hdu.data else: output = hdu.data output.tofile(self.__file) _size = output.nelements()", "in range(naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT',", "for indx in range(self._nfields): if (self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx],", "# sure to preserve the one-to-one correspondence when updating the list(s). # Use", "original file\", oldName # reopen the renamed new file with \"update\" mode os.rename(_name,", "for \"PyFITS\" \"\"\" import re, os, tempfile, exceptions import operator import __builtin__ import", "dictionary of the above _rec2fits = {} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class", "stream. :Parameters: data : NumArray Data to stream to the file. :Returns: writeComplete", "else: _shape = list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape = tuple(_shape) _format =", "the newly renamed file\", oldName # reset the resize attributes after updating self._resize", "None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data is not None: # check TFIELDS", "_keyList.count('CONTINUE') _start = 0 for i in range(_max): _where = _keyList[_start:].index('CONTINUE') + _start", "card image before column 10 and return its location. It returns None if", "HDU class.\"\"\" _dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self, data=None,", "at the end after the open in # Linux, but is at the", "self._file.seek(0, 2) return self._file.tell() - self._datLoc def _summary(self): return \"%-10s %-11s\" % (self.name,", "def add_blank(self, value='', before=None, after=None): \"\"\"Add a blank card. value: Text to be", "a message only if there is something if _dummy.strip(): if self.unit: result +=", "it can be anywhere. If the card does not exist, the new card", "an appropriate header will be created for the data object supplied. \"\"\" if", "object # for a valid value/comment string. # The valu group will return", "import numarray.memmap as Memmap from string import maketrans import copy import signal import", "input) self._max = 0 def __setitem__(self, key, value): \"\"\"To make sure the new", "in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file) _shift = self.__file.tell()", "elif option == 'parse': # check the value only, no need to check", "_floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')' valStr = '%20s' % _tmp", "even if it is only ONE u1 (i.e. use tuple always) output_format =", "if _scale: dummy /= bscale elif self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx] else:", "is shortened if not isinstance(self, _Hierarch): self.__class__ = Card else: # does not", "is not None: continue def update_tbhdu(self): \"\"\"Update all table HDU's for scaled fields.\"\"\"", "self._file, self._offset, self._datLoc = None, None, None self.header = header self.data = data", "'COMMENT': output.append(_card.value) return output def _add_commentary(self, key, value, before=None, after=None): \"\"\"Add a commentary", "== 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled data = 0, not", "it in _convert self._convert[indx] = num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if", "be appended at the end. key: keyword name value: keyword value (to be", "[] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx in", "self._parent.field(i)[:] = pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars] =", "image from the attributes: key, value, and comment. Core code for ascardimage. \"\"\"", "(a numarray) or a record array (FITS_rec) which will contain both group parameter", "END card mo = end_RE.search(block) if mo is None: hdu._raw += block block", "from input (an HDU object).\"\"\" tmp = hdu.columns # get the right shape", "written (above) _where = self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields):", "= r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC # This regex", "+ os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name else: raise _name, \"exists\" class VerifyError(exceptions.Exception):", "specified if pos is not None: test_pos = '_index '+ pos if not", "start: column starting position (ASCII table only), corresponding to TBCOL keyword dim: column", "boolean needs to be scaled too if recfmt == _booltype: _out = num.zeros(array.shape,", "_trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if _trail < 0: raise ValueError,", "return pairs def has_key(self, key): \"\"\"Check for existence of a keyword. Returns 1", "zeros/blanks. tbtype: table type to be created (BinTableHDU or TableHDU) \"\"\" # construct", "2, 1] nbytes = ((nx-1) / 8) + 1 for i in range(nbytes):", "# scale the array back to storage values if there is bscale/bzero if", "re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo: name = mo.group(1).rstrip() else: name = ''", "= _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the equal sign in the card image", "3', _isInt+\" and val == 0\", 0, option, _err) _after = self.header['NAXIS'] +", "else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n < nrows: if tbtype == 'BinTableHDU': if", "a regular card and use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i >", "fields to expand the original ._convert list # so the sliced FITS_rec will", "in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical) column if self._coldefs._recformats[indx]._dtype is _booltype:", "Once sufficient data has been written to the stream to satisfy the amount", "_blockLen: raise IOError, 'Block length is not %d: %d' % (_blockLen, len(block)) elif", "continue (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # add the", "current thread and determine if this is a single treaded application threadName =", "input elif _len > Card.length: strlen = _len % Card.length if strlen ==", "HDU, used when the file is first opened. This is to speed up", "from the card list. if keylist is None: self._keylist = [k.upper() for k", "position (index, keyword name will not be allowed) to insert. The new card", "dims = [1] npt = 1 for n in dims: npt *= n", "column array, both the data descriptors and the data. It returns the output", "repr (not str) in case of control character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text']", "END card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) * Card.length _bytes", "def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass # needed for __add__", "else: hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename, data, header=None, **keys): \"\"\"Create a new", "self._cardimage output = '' for i in range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n'", "\"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1 if option ==", "_unique: _unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr] = _unique try: return self.__dict__[attr] except", "isinstance(val, str): val = val.strip() if len(val) <= 8: val = val.upper() if", "indx is the index of the field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str", "0th HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text)", "else: pcount = 0 mo = re_groups.search(block) if mo and simple: groups =", "keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) # pass these attributes hdu._file = self._file hdu._hdrLoc", "key[0] == ' ': useblanks = new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1)", "the original file, and rename the tmp to the original file if self._resize:", "in tmp._arrays: if arr is not None: dim = arr._shape[0] else: dim =", "if str(self[-i]) != ' '*Card.length: self._blanks = i - 1 break def append(self,", "= 'big' if coldata2._type.bytes > 1: # do the _parent too, otherwise the", "10 and return its location. It returns None if equal sign is not", "if _lead < 0: raise ValueError, \"column `%s` starting point overlaps to the", "a list of Cards. cards: A list of Cards, default=[]. \"\"\" list.__init__(self, cards)", "raise ValueError, \"Data is inconsistent with the format `%s`.\" % format else: raise", "= self.starts[i] + _width - 1 self.spans[i] = _end - last_end last_end =", "value type' value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value,", "_err = _ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err", "specified attribute object.\"\"\" if name == '_cardimage': self.ascardimage() elif name == 'key': self._extractKey()", "sign in the card image and return the string before the equal sign.", "extension', after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim ==", "return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from the", "key, value, and comment, or from raw string. option: verification option, default=silentfix. \"\"\"", "by user. _isInt = \"isinstance(val, (int, long))\" # Functions def _padLength(stringLen): \"\"\"Bytes needed", "area for each # variable length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx)", "for the data object supplied. \"\"\" if not os.path.exists(filename): writeto(filename, data, header) else:", "card as a regular card and use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if", "# open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True # deprecated", "verification result:' print x if _option == 'exception' and x: raise VerifyError def", "option in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card image is not FITS standard (equal", "a Header object\" if data is DELAYED: # this should never happen if", "one must start with CONTINUE and the whole card must have string value.", "IndexError, 'Extension %s is out of bound or not found.' % key self._resize", "\"\"\"Update all table HDU's for scaled fields.\"\"\" for hdu in self: if 'data'", "# insert the require keywords PCOUNT and GCOUNT dim = `self.header['NAXIS']` if dim", "datasize) / 8 if simple and not groups: name = 'PRIMARY' else: name", "if len(blocks)%_blockLen != 0: raise IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) # flush,", "valStr = valStr.strip() # comment string if keyStr.strip() in Card._commentaryKeys: # do NOT", "is no match if (keyword in _keyNames): _list.append(i) for i in _list: del", "info(self): \"\"\"Summarize the info of the HDU's in this HDUList.\"\"\" if self.__file is", "for attr in ['formats', 'names']: setattr(_data, attr, getattr(tmp, attr)) for i in range(len(tmp)):", "= int(key) elif isinstance(key, str): # try to find exact match first try:", "= 0 for i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis)", "raise ValueError, 'Value in a commentary card must be a string' else: self.__dict__['_cardimage']", "`hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force a write of the HDUList", "HDU's data part.\"\"\" self._file.seek(0, 2) return self._file.tell() - self._datLoc def _summary(self): return \"%-10s", "# also more efficient. else: return tmp def _get_scale_factors(self, indx): \"\"\" Get the", "self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file", "# Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append', 'update'): print \"flush", "range(len(_cols)): for cname in _commonNames: val = getattr(_cols, cname+'s')[i] if val != '':", "raise IOError, \"File '%s' already exist.\" % name # make sure the EXTEND", "is no equal sign, return the string before column 9. \"\"\" eqLoc =", "= _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and val == 0\", 0, option, _err)", "0, not the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update()", "of the data to scale. The option will be overwritten by any user", "PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct a primary HDU.", "written to the stream. Notes ----- Only the amount of data specified in", "longer than strlen, then it will be split in the middle of the", "== 1: if n_ext2 == 0: ext = ext1[0] else: if isinstance(ext1[0], (int,", "'unit', new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s) information of the column definition.\"\"\" \"\"\"The", "else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero: _arr = tmp._arrays[i].copy() else:", "form must reproduce the above copyright notice, this list of conditions and the", "_TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self, start, end): _hdus =", "key, backward=0): \"\"\"Get the index of a keyword in the CardList. key: the", "file (None) \"\"\" # mappings between FITS and numarray typecodes NumCode = {8:'UInt8',", "# if not resized, update in place else: for hdu in self: if", "recfmt == _booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array =", "self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format, bscale = _bscale,", "= \"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr valStr =", "name # make sure the EXTEND keyword is there if there is extension", "tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column definitions class. It has attributes corresponding to", "not required to be NDArray if format is not None: # check format", "== 'TableHDU': _loc = [1] _width = [] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize())", "for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i in range(_tfields):", "= new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last = self.ascard.index_of(key,", "output_verify: output verification option, default = 'exception'. verbose: print out verbose messages? default", "= `hdu.header['extver']` except: _extver = '' # only append HDU's which are \"new\"", "# for integer key only delete once else: del self.ascard[key] self._mod = 1", "`self.header['NAXIS']` if dim == '0': dim = '' # set extension name if", "consistent data type to avoid misalignment. \"\"\" if isinstance(value, num.NumArray) and value.type() ==", "if _parse[0] in ['>=', '==']: insert_pos = eval(_parse[1]) # if the card does", "*\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC #", "= 0 attr = [0] * len(self) for i in range(len(self)): (_format, _width)", "_step) class _KeyType: def __init__(self, npts, offset): self.npts = npts self.offset = offset", "(repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is not recognized.' %", "% mode else: if os.path.splitext(self.name)[1] == '.gz': # Handle gzip files if mode", "\"CONTINUE '&' / \" + commfmt % i output = output + '%-80s'", "None: parbzeros = [None]*npars if bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt =", "base HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\" header: header to be", "+ ' = ' + `value` + '\\n' return text[:-1] def copy(self): tmp", "of the input file and the base name of the mktemp() output. \"\"\"", "= self return hdu def writeHDU(self, hdu): \"\"\"Write *one* FITS HDU. Must seek", ">>> update(file, dat, hdr, 3) # update the 3rd extension >>> update(file, dat,", "scaling the first time and store it in _convert self._convert[indx] = num.array(dummy, type=num.Float64)", "by TSCAL and TZERO if _scale or _zero: for i in range(len(self._parent)): dummy[i][:]", "!= None or after != None: self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0] ==", "out all attributes. It forgives plurals and blanks. If there are two or", "# Check for numbers with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2,", "i in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy", "data to write to the new file @type header: L{Header} object or None", "1: try: hduList.append(ffo._readHDU()) except EOFError: break # check in the case there is", "IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) # flush, to make sure the content", "extkeys['header'] del extkeys['header'] else: _gethdr = False hdulist, _ext = _getext(filename, 'readonly', *ext,", "resulting in an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' +", "if bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32 ->", "need to write it to a tmp file, # delete the original file,", "not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update data in place: Name =\",", "indx = int(key) elif isinstance(key, str): # try to find exact match first", "header is resized.\" break # Data: if 'data' not in dir(hdu): continue if", "appropriate type is created for the supplied data. This argument is optional. @keyword", "SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "self._convert[indx] = num.equal(dummy, ord('T')) else: return dummy return self._convert[indx] def _scale_back(self): \"\"\"Update the", "rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields self.data = data", "comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if val is not None: raise ValueError,", "== ' ')[0] offset = 0 xoffset = 0 for i in range(nmax):", "name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'TABLE' if", "is nothing to write.\" return self.update_tbhdu() if output_verify == 'warn': output_verify = 'exception'", "header and data will be streamed. header : Header The header object associated", "= () for j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j == 0:", "else: keyStr = '%-8s' % self.key else: keyStr = ' '*8 # value", "pass class CardList(list): \"\"\"FITS header card list class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct", "if attr == 'section': return Section(self) elif attr == 'data': self.__dict__[attr] = None", "from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw)", "\"\"\"Update the parent array, using the (latest) scaled array.\"\"\" _dict = {'A':'s', 'I':'d',", "of the arguments are used only for the first case. bitpix: data type", "_mm attribute.\"\"\" if attr == '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr]", "because _File has ts own private attribute __file. \"\"\" if self.__file != None:", "for i in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return self._convert[indx]", "must be integer.' % input if _stop < _start: raise IndexError, 'Illegal slice", "GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes = [0] + axes", "open(filename) f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C) 2004 Association of Universities", "bzero else: if option == 'old': _scale = self._bscale _zero = self._bzero elif", "exists. If it does not, check to see # if we were provided", "xoffset + strlen # collect the pieces in a list tmp = input[xoffset:offset]", "strfmt = '>' + strfmt[:-1] return strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU verify", "result = self._check('parse') self._fixValue(result) if option == 'fix': self.__dict__['_fix_text'] = 'Fixed card to", "card for _card in self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self, keywd, pos, test,", "card (or blank card), append at the end. \"\"\" new_card = Card(key, value)", "is None: err_text = \"'%s' card does not exist.\" % keywd fix_text =", "% (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text,", "val >= 0\", 0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val ==", "'Card image is not FITS standard (equal sign not at column 8).' raise", "header. \"\"\" try: # have both SIMPLE and XTENSION to accomodate Extension #", "= CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8, 'array data type'), Card('NAXIS', 2, 'number", "i.e. A7 in ASCII table is the same as 7A in # binary", ":Returns: size : integer The number of bytes of data required to fill", "blank cards are *directly* before the END card self._blanks = 0 self.count_blanks() def", "isinstance(hdu, _AllHDU): raise ValueError, \"%s is not an HDU.\" % hdu try: super(HDUList,", "(verbose): try: _extver = `hdu.header['extver']` except: _extver = '' # only append HDU's", "= name def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT',", "conver it to a strings array array = chararray.array(array, itemsize=eval(recfmt[1:])) # then try", "the arguments are flexible: the 3rd argument can be the header associated with", "cards._keylist and cards['GROUPS'].value == True: self._hdutype = GroupsHDU elif cards[0].value == True: self._hdutype", "Call to this method will scale self.data and update the keywords of BSCALE", "fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table from the input column definitions.\"\"\" \"\"\" input:", "range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option, _err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII", "writeComplete : integer Flag that when true indicates that all of the required", "the urllibrary urllib._urlopener.tempcache = {} # Initialize tempcache with an empty # dictionary", "0: dim = '' else: dim = str(dim) self.header.update('PCOUNT', 0, 'number of parameters',", ": integer The number of bytes of data required to fill the stream", "backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod = 1 def copy(self): \"\"\"Make a", "or index. \"\"\" if before != None: loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks)", "input.start if _start is None: _start = 0 elif isinstance(_start, (int, long)): _start", "match will find a single-quote after # the comment separator resulting in an", "= _VLF(map(_func, array)) except: try: # this handles ['abc'] and [['a','b','c']] # equally,", "indx): \"\"\" Get the scaling flags and factors for one field. indx is", "constructed from the card list. if keylist is None: self._keylist = [k.upper() for", "type dtype. The descriptor location will have a zero offset for all columns", "[] # can take one HDU, as well as a list of HDU's", "key case.\"\"\" if isinstance(val, str): val = val.strip() if len(val) <= 8: val", "array.\"\"\" indx = _get_index(self._coldefs.names, key) if (self._convert[indx] is None): # for X format", "only data is supplied, a minimal header is created @type filename: string @param", "default=0. If backward = 1, search from the end. \"\"\" if isinstance(key, (int,", "update()] \"\"\" self._add_commentary('history', value, before=before, after=after) def add_comment(self, value, before=None, after=None): \"\"\"Add a", "output[:-1] # ----------------------------- HDU classes ------------------------------------ class _AllHDU: \"\"\"Base class for all HDU", "_isInt+\" and val == 1\", 1, option, _err) return _err # 0.8.8 def", "group data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self, input, row) def par(self, fieldName): \"\"\"Get", "not supported\" zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist) != 1: raise", "ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if isinstance(input, ColDefs):", "When a FITS file is opened, a HDUList object is returned. \"\"\" def", "_TableBaseHDU) and hdu.data is not None: # check TFIELDS and NAXIS2 hdu.header['TFIELDS'] =", "/ Card.length def _verify(self, option='warn'): \"\"\"Card class verification method.\"\"\" _err = _ErrList([]) try:", "if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new = 1 self._resize = 1 else: raise", "VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors list class. It has", "name == 'comment': self._setcomment(val) else: raise AttributeError, name # When an attribute (value", "there is extra space after the last HDU or corrupted HDU except ValueError:", "name): \"\"\"Exatrct the keyword value or comment from the card image.\"\"\" longstring =", "update_header(self): \"\"\"Update the header keywords to agree with the data.\"\"\" old_naxis = self.header.get('NAXIS',", "is accessed. \"\"\" def _getname(self): \"\"\"Get the extname and extver from the header.\"\"\"", "len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key == 'END': break", "desc[:] = 0 # reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype", "is \"all\" which will print out all attributes. It forgives plurals and blanks.", "raise \"Writing to gzipped fits files is not supported\" zfile = gzip.GzipFile(self.name) self.tfile", "_numr_NFSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys", "file, as the data will be scaled and is therefore not very usable", "# since a greedy match will find a single-quote after # the comment", "it should not end with an even number of # quotes to be", "'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx] if _lead < 0:", "mo is None: hdu._raw += block block = self.__file.read(_blockLen) if block == '':", "allows keyword name longer than 8 characters. \"\"\" def _verify(self, option='warn'): \"\"\"No verification", "= (max - min) / (2.**(8*_type.bytes) - 2) # Do the scaling if", "= float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx) # further conversion for both ASCII", "name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i in range(_tfields): del", "_key = self.index_of(key) # only set if the value is different from the", "isinstance(data, FITS_rec): hdu = BinTableHDU(data) else: raise KeyError, 'data must be numarray or", "if val[:8].upper() == 'HIERARCH': val = val[8:].strip() self.__class__ = _Hierarch else: raise ValueError,", "card image with the specified option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = ''", "_index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking is specified if", "\"\"\"Locate the equal sign in the card image and return the string before", "If not we will need # to prepend a default PrimaryHDU to the", "\"\"\" Get the table data from input (an HDU object).\"\"\" tmp = hdu.columns", "dirName != '': dirName += '/' _name = dirName + os.path.basename(tempfile.mktemp()) if not", "!= None: loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos, card, useblanks=1):", "_verify (self, option='warn'): _text = '' _err = _ErrList([], unit='HDU') # the first", "\"\"\"Print the summary information on a FITS file. This includes the name, type,", "in ['warn', 'exception']: #raise VerifyError, _text #elif option == 'warn': pass # fix", "strlen, then it will be split in the middle of the word. \"\"\"", "_isInt, None, option, _err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def", "input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero", "hdu.header._mod = 0 hdu.header.ascard._mod = 0 except: pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An", "0, and if there are blank cards directly before END, it will use", "default = 'exception'. verbose: print out verbose messages? default = 0. \"\"\" #", "output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format' elif dtype+option in _rec2fits.keys():", "= keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber", "specified. default=None. after: name of the keyword, or index of the Card after", "many indices.' elif naxis > len(key): key = key + (slice(None),) * (naxis-len(key))", "use the minimum and maximum of the data to scale. The option will", "_Group(rec.Record): \"\"\"One group of the random group data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self,", "is not touched yet, use header info. else: _shape = () for j", "case (CONTINUE card) else: # try not to use CONTINUE if the string", "data = None self.__dict__[attr] = data elif attr == 'columns': class_name = str(self.__class__)", "len(ext1) n_ext2 = len(ext2) keys = ext2.keys() # parse the extension spec if", "extension name if not name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name", "location and value of mandatory keywords. # Do the first card here, instead", "dims: npt *= n # Now, get the data (does not include bscale/bzero", "/ (2.**(8*_type.bytes) - 2) # Do the scaling if _zero != 0: self.data", "nx) output: output Uint8 array of shape (s, nbytes) nx: number of bits", "0 def __setitem__(self, key, value): \"\"\"To make sure the new item has consistent", "# This regex helps delete leading zeros from numbers, otherwise # Python might", "= r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A number sub-string, either an integer", "starting point and width of each field for ASCII table if self._coldefs._tbtype ==", "str): if self.value == '': valStr = \"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr", "= num.array(_mmap, type=code, shape=dims) else: raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big'", "'warn']: self.__dict__['_err_text'] = 'Card image is not FITS standard (equal sign not at", "self.header if hdr[0] != self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment = 'binary table extension'", "VerifyError def _pad(input): \"\"\"Pad balnk space to the input string to be multiple", "fixable=fixable)) else: # if the supposed location is specified if pos is not", "parent card if nc > 0: _longstring = _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]:", "found def readall(self): \"\"\"Read data of all HDU's into memory.\"\"\" for i in", "indexed by number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize = 1 def _verify (self,", "try: # legit RecArray format? recfmt = format format = _convert_format(recfmt, reverse=1) except:", "\"\"\"An extension HDU class. This class is the base class for the TableHDU,", "self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\"", "dummy -= bzero if _scale: dummy /= bscale elif self._coldefs._tbtype == 'TableHDU': dummy", "in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data',", "the amount specified in the header, the stream is padded to fill a", "are not corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size (in bytes) of the data", "is never fixable # always fix silently the case where \"=\" is before", "TypeError, \"input to ColDefs must be a table HDU or a list of", "This class is the base class for the TableHDU, ImageHDU, and BinTableHDU classes.", "(_scale or _zero): dummy = self._convert[indx].copy() if _zero: dummy -= bzero if _scale:", "file and memmap object, if any. output_verify: output verification option, default = 'exception'.", "Card.length if strlen == 0: return input else: return input + ' '", "oldkey = oldkey.strip().upper() newkey = newkey.strip().upper() if newkey == 'CONTINUE': raise ValueError, 'Can", "/ ' + self.comment else: commentStr = '' # equal sign string eqStr", "task may be difficult when the extension is a TableHDU containing ASCII data.", "last non-commentary card. If =1, the card will be appended after the last", "str): val = val.strip() if len(val) <= 8: val = val.upper() if val", "specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND", "self.__dict__[attr] = _unique try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def par(self, parName):", "== 'HIERARCH': self.__class__ = _Hierarch # for card image longer than 80, assume", "the 'data' or 'columns' attribute. The data of random group FITS file will", "match object # for a valid value/comment string. # The valu group will", "*' r'(?P<valu>' # The <strg> regex is not correct for all cases, but", "standard compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A", "for i in range(min(self._blanks, how_many)): del self[-1] # it also delete the keylist", "these attributes hdu._file = self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan =", "= self.__file.name oldMemmap = self.__file.memmap _name = _tmpName(oldName) _hduList = open(_name, mode=\"append\") if", "_value = self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() #", "_cols._recformats[i] if isinstance(val, _FormatX): val = `val._nx` + 'X' elif isinstance(val, _FormatP): VLdata", "raise IOError, (errcode, errmsg, url) urllib._urlopener = ErrorURLopener() # Assign the locally subclassed", "=\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)')", "% `i` _text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result = self[i]._verify(option) if", "if self.unit: result += _INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result += _dummy element", "(the definition of) one Column.\"\"\" indx = _get_index(self.names, col_name) for cname in _commonNames:", "needs its own _mod attribute since it has methods to change # the", "is in primary HDU if there is extension if len(self) > 1: self.update_extend()", "since there is no unique mapping. If there is a field named \"XYZ\"", "def info(filename): \"\"\"Print the summary information on a FITS file. This includes the", "is None: _shape, _format = (), '' _nrows = 0 else: _nrows =", "= self._cardimage output = '' for i in range(len(kard)/80): output += kard[i*80:(i+1)*80] +", "= _data.field(i) return FITS_rec(_data) def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a", "also more efficient. else: return tmp def _get_scale_factors(self, indx): \"\"\" Get the scaling", "\"Writing to zipped fits files is not supported\" zfile = zipfile.ZipFile(self.name) namelist =", "lower case of DE for exponent, allows space between sign, # digits, exponent", "= _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big' return raw_data", "# only swap unswapped # deal with var length table if isinstance(coldata, _VLF):", "name, extver def _getsize(self, block): \"\"\"Get the size from the first block of", "self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] # update the keylist self.count_blanks() self._mod = 1", "_loc = [1] _width = [] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize", "= _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo if (verbose): print \"reopen the newly", "except format can be optional. name: column name, corresponding to TTYPE keyword format:", "if both specified. default=None. after: name of the keyword, or index of the", "is not the length of a card image (80 columns). If the card", "the starting point and width of each field for ASCII table if self._coldefs._tbtype", "self._ffo.getfile().tell() self._size = self.size() if self._size != 0: self.writeComplete = 0 else: self.writeComplete", "output += kard[i*80:(i+1)*80] + '\\n' return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the keyword", "string. It returns a match object # for a valid value/comment string. #", "_list = map(lambda x: x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key) # occurrence of", "int if isinstance(pos, str): _parse = pos.split() if _parse[0] in ['>=', '==']: insert_pos", "== 'X': nbytes = ((repeat-1) / 8) + 1 # use an array,", "0: for arr in tmp._arrays: if arr is not None: dim = arr._shape[0]", "_dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes #", "not None: gcount = int(mo.group(1)) else: gcount = 1 mo = re_pcount.search(block) if", "is too long (> 8), use HIERARCH.' % val else: raise ValueError, 'keyword", "class _FormatX(str): \"\"\"For X format in binary tables.\"\"\" pass class _FormatP(str): \"\"\"For P", "tmp[:loc+7] + `self._coldefs.names` + ')' return tmp # synchronize the sliced FITS_rec and", "the file. :Returns: None Notes ----- The file will be opened and the", "tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled data = 0,", "return input + ' ' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating number", "% _floatFormat(self.value) else: valStr = '%20s' % self._valuestring elif isinstance(self.value, complex): if self._valueModified:", "list[i]=list[i][:-1] for att in list: if att not in _commonNames: print \"'%s' is", "_TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0] =", "be added as the first extension. If the file does already exist, but", "self._parent.field(indx).sub('E', 'D') # binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:]", "self.name, fileheader = urllib.urlretrieve(name) else: self.name = name self.mode = mode self.memmap =", "HDUList. The key can be an integer, a string, or a tuple of", "else: raise IndexError, 'Illegal index %s' % indx def _normalize_slice(input, naxis): \"\"\"Set the", "to change # the content of header without being able to pass it", "new_card = Card(key, value) if before != None or after != None: self.ascard._pos_insert(new_card,", "if both specified. They can be either a keyword name or index. \"\"\"", "= `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format %s\" % fmt", "list structure constructed by error messages generated by verifications at different class levels.", "(_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT', 1, 'number of groups')) if", "name == 'value': if valu is None: raise ValueError, \"Unparsable card, fix it", "are *directly* before the END card.\"\"\" for i in range(1, len(self)): if str(self[-i])", "TableHDU containing ASCII data. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc =", "self._resize = 1 else: raise \"HDUList can only append an HDU\" # make", "in self: if 'data' in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is", "string elif isinstance(self.value, str): if self.value == '': valStr = \"''\" else: _expValStr", "x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`, dummy) elif name ==", "= re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the table", "= self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod = 1 def copy(self):", "group parameters'), Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS', 0, 'number of table fields')", "mo and simple: groups = 1 else: groups = 0 mo = re_naxis.search(block)", "_width[indx]) else: self._parent.field(indx)[i] = x if 'D' in _format: self._parent.field(indx).sub('E', 'D') # binary", "Header object\" if data is DELAYED: # this should never happen if header", "hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS", "# self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def get(self, key, default=None): \"\"\"Get", "update()] after: [same as in update()] \"\"\" self._add_commentary(' ', value, before=before, after=after) def", "GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames = self.columns._pnames else: data = None self.__dict__[attr] =", "be used to populate the non-required keywords nrows: number of rows in the", "keyword. Returns 1 if found, otherwise, 0. key: keyword name. If given an", "if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from", "hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS record array is the data", "of random group FITS file will be like a binary table's data. \"\"\"", "elif attr == 'columns': _cols = [] _pnames = [] _pcount = self.header['PCOUNT']", "in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end", "cards in a later stage as CONTINUE cards may span across blocks. \"\"\"", "# locations of the blanks blank_loc = num.nonzero(arr == ' ')[0] offset =", "keyword name will not be allowed) to insert. The new card will be", "no exact name matched, it will try to match the name with case", "\"keyword 'END' not allowed\" self._checkKey(val) else: if val[:8].upper() == 'HIERARCH': val = val[8:].strip()", "keyword value or comment from the card image.\"\"\" longstring = '' ncards =", "(except blank card). If there is no card (or blank card), append at", "format in binary tables.\"\"\" pass class _FormatP(str): \"\"\"For P format in variable length", "= value else: indx = self._unique[parName] if len(indx) == 1: self.field(indx[0])[:] = value", "if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder = 'big' # In case the FITS_rec", "EXTNAME and EXTVER, as separate arguments or as a tuple: >>> getdata('in.fits', 'sci',", "value = eval(cname) # get the argument's value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value,", "names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder = 'big' # pass datLoc, for P", "def format(self): strfmt, strlen = '', 0 for j in range(self.header['TFIELDS']): bcol =", "and value.itemsize() == 1: pass elif self._dtype == 'a': value = chararray.array(value, itemsize=1)", "pad the input stringLen to the next FITS block.\"\"\" return (_blockLen - stringLen%_blockLen)", "class FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS record array is the data part", "if None, an appropriate header will be created for the data object supplied.", "= 'PRIMARY' # insert the keywords EXTEND if header is None: dim =", "This is a layer over the RecArray, so we can deal with scaled", "'columns' attribute. The data of random group FITS file will be like a", "dummy /= bscale elif self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx] else: continue #", "hdu in self: if 'data' in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data", "range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key) if _key", "used (as a template), default=None. If header=None, a minimal Header will be provided.", "_pcount = hdu.data._heapsize + hdu.data._gap if _pcount > 0: hdu.header['PCOUNT'] = _pcount #", "comm_len + 1 comm_list = self._words_group(comm, comm_len) for i in comm_list: commstr =", "# self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def get(self, key, default=None): \"\"\"Get a keyword", "If no keyword is found, return the default value. key: keyword name or", "# self.ascard._keylist[_index] = newkey def get(self, key, default=None): \"\"\"Get a keyword value from", "== 'fix': self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key", "for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format in binary tables.\"\"\"", "format column Boolean array into an UInt8 array. input: input Boolean array of", "for i in range(len(tmp)): if tmp._arrays[i] is None: size = 0 else: size", "open(name, mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close", "'(%d)' % VLdata._max else: val = _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def", "as ndarray import numarray.strings as chararray import numarray.records as rec import numarray.objects as", "cannot be reset.' elif name == 'value': self._setvalue(val) elif name == 'comment': self._setcomment(val)", "__init__(self, input): \"\"\"Construct a FITS record array from a RecArray.\"\"\" # input should", "comment=''): \"\"\"Construct a card from key, value, and (optionally) comment. Any specifed arguments,", "= hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go through header keywords to pick out", "print \"KeyboardInterrupt ignored until flush is complete!\" keyboardInterruptSent = True # Install new", "before=before, after=after) def add_comment(self, value, before=None, after=None): \"\"\"Add a COMMENT card. value: Comment", "_limit = 10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc = None return eqLoc", "(FITS_rec) which will contain both group parameter info and the data. The rest", "gcount * (pcount + size) / 8 return size def _verify(self, option='warn'): _err", "commentary card string which must contain printable ASCII characters. _ASCII_text = r'[ -~]*$'", "_list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data is not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1']", "threading.currentThread() singleThread = (threading.activeCount() == 1) and (threadName.getName() == 'MainThread') if singleThread: #", "field may not be the column right after the last field if self._tbtype", "the parent array, using the (latest) scaled array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f',", "indx _start = input.start if _start is None: _start = 0 elif isinstance(_start,", "(8, 16, 32, 64, -32, or -64) pardata: parameter data, as a list", "TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF", "range(nbytes): _min = i*8 _max = min((i+1)*8, nx) for j in range(_min, _max):", "elif input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ') if", "_digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')') # FSC commentary card", "attr == '_unique': _unique = {} for i in range(len(self.parnames)): _name = self.parnames[i]", "_makehdu(data, header): if header is None: if isinstance(data, num.NumArray): hdu = ImageHDU(data) elif", "if not _scale: bscale = 1 if not _zero: bzero = 0 return", "the class constructor may be written to the stream. If the provided data", "corresponding to TNULL keyword bscale: bscale value, corresponding to TSCAL keyword bzero: bzero", "= None return result else: # verify the equal sign position if self.key", "out column definition keywords dict = [{} for i in range(_nfields)] # definition", "0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size: print 'Warning: File size is smaller", "if simple and not groups: name = 'PRIMARY' else: name = '' return", "the CardList.\"\"\" cards = [None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def", "in keys: header = keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU): hdu =", "_card = Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value, comment)) self._mod", "must be before int checking since bool is also int elif isinstance(self.value ,", "in one block and the comment string in another. Also, it does not", "fp, errcode, errmsg, headers): raise IOError, (errcode, errmsg, url) urllib._urlopener = ErrorURLopener() #", "value, comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value, comment)) self._mod = 1 def", "_summary(self): return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU, _Verify):", "else: # does not support CONTINUE for HIERARCH if len(keyStr + eqStr +", "self.name = name if self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS')", "allow IOError exceptions to be raised when a file specified by a URL", "the last occurrence of cards of the same name (except blank card). If", "_digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC # This regex helps delete leading", "None, _isInt+\" and val == 0\", 0, option, _err) return _err class GroupsHDU(PrimaryHDU):", "keyword argument(s): %s' % ext2 elif n_ext1 == 0: if n_ext2 == 0:", "value checking is specified if test: val = self.header[keywd] if not eval(test): err_text", "header keywords to reflect recent changes of columns.\"\"\" _update = self.header.update _append =", "@param ext: The rest of the arguments are for extension specification. They are", "\"after\" is specified, it will be appended at the end. key: keyword name", "DE for exponent, allows space between sign, # digits, exponent sign, and exponents", "before != None: loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after != None:", "after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim == 0:", "except: _extver = '' # only append HDU's which are \"new\" if hdu._new:", "the new FITS file to write to @type data: array, record array, or", "attr in ['formats', 'names']: setattr(_data, attr, getattr(tmp, attr)) for i in range(len(tmp)): tmp._arrays[i]", "will not increase (default). When useblanks == 0, the card will be appended", "or after != None: _card = Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after) else:", "It forgives plurals and blanks. If there are two or more attribute names,", "HDU.' if _data is None: raise IndexError, 'No data in this HDU.' if", "the above copyright notice, this list of conditions and the following disclaimer in", "self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big'", "re_gcount.search(block) if mo is not None: gcount = int(mo.group(1)) else: gcount = 1", "= output.nelements() * output._itemsize # write out the heap of variable length array", "str): _key = key.strip().upper() if _key[:8] == 'HIERARCH': _key = _key[8:].strip() _keylist =", "dtype == 'F': output_format = 'f8' else: raise ValueError, \"Illegal format %s\" %", "os, tempfile, exceptions import operator import __builtin__ import urllib import tempfile import gzip", "== 1: indx = _list.index(_key) elif _count == 0: raise NameError, \"Key '%s'", "'CONTINUE ': break # combine contiguous CONTINUE cards with its parent card if", "result = self.field(indx[0]).astype('f8') for i in indx[1:]: result += self.field(i) return result def", "accessed\"\"\" def http_error_default(self, url, fp, errcode, errmsg, headers): raise IOError, (errcode, errmsg, url)", "_start = _where # if not the real CONTINUE card, skip to the", "self.__file.tell() self.__file.write(blocks) # flush, to make sure the content is written self.__file.flush() return", "satisfy the amount specified in the header, the stream is padded to fill", "self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\"", "self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size =", "= \"pass\", fixable=1): \"\"\"Execute the verification with selected option.\"\"\" _text = err_text if", "# ASCII table, convert numbers to strings if self._coldefs._tbtype == 'TableHDU': _format =", "= ['', 'COMMENT', 'HISTORY'] def __init__(self, key='', value='', comment=''): \"\"\"Construct a card from", "self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary cards, value can only be strings and", "# definition dictionaries for each field for _card in hdr.ascardlist(): _key = _tdef_re.match(_card.key)", "= key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force a write", "raise ValueError, \"column `%s` ending point overlaps to the next column\" % indx+1", "mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def getfile(self): return self.__file def", "_err) self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 0 and val <= 999\",", "# get extname and extver if hdu.name == '': hdu.name, hdu._extver = hdu._getname()", "is opened, a HDUList object is returned. \"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct", "in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does not begin with SIMPLE or", "if i > 0 and _card.key != 'CONTINUE': raise ValueError, 'Long card image", "one group parameter have the same name else: result = self.field(indx[0]).astype('f8') for i", "5th extension \"\"\" # parse the arguments header = None if len(ext) >", "of %s' % (nfound, `key`) else: return found def readall(self): \"\"\"Read data of", "parameter values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:] = value else: indx = self._unique[parName]", "to gzipped fits files is not supported\" zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits')", "hdu._file = self.__file hdu._hdrLoc = _hdrLoc # beginning of the header area hdu._datLoc", "= None, None, None self.header = header self.data = data self.name = None", "standard.: %s' % self.key else: self.__dict__['_err_text'] = 'Card image is not FITS standard", "]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT',", "\"\"\"Locate the equal sign in the card image and return the string after", "= 0 else: _nrows = len(self.data) _ncols = len(self.columns.formats) _format = self.columns.formats #", "del self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS']", "inserting a new '%s' card.\" % keywd if fixable: # use repr to", "# Parse the TFORM value into data type and width. try: (dtype, width)", "raise \"Element %d in the HDUList input is not an HDU.\" % hdus.index(hdu)", "clobber=clobber) def append(filename, data, header=None): \"\"\"Append the header/data to FITS file if filename", "self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append': for hdu in self: if", "range(ncards): # take each 80-char card as a regular card and use its", "never happen if header is None: raise ValueError, \"No header to setup HDU.\"", "'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value into data type and", "_ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val == 2', 2, option, _err) self.req_cards('BITPIX', None, 'val", "Assign the locally subclassed opener # class to the urllibrary urllib._urlopener.tempcache = {}", "except: self.ascard.append(new_card, bottom=1) self._mod = 1 def copy(self): \"\"\"Make a copy of the", "------------------------------------ class _AllHDU: \"\"\"Base class for all HDU (header data unit) classes.\"\"\" pass", "class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class", "extname and extver from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\")", "self.formats] elif self._tbtype == 'TableHDU': self._Formats = self.formats if len(self) == 1: dummy", "16, 32, 64, -32, or -64) pardata: parameter data, as a list of", "parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU class.\"\"\" def", "name = '' return size, name def setupHDU(self): \"\"\"Read one FITS HDU, data", "out verbose messages? default = 0. This simply calls the close method of", "word. \"\"\" list = [] _nblanks = input.count(' ') nmax = max(_nblanks, len(input)/strlen+1)", "_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the supposed location is specified", "in place else: for hdu in self: if (verbose): try: _extver = `hdu.header['extver']`", "else: _val = UNDEFINED self.__dict__['value'] = _val if '_valuestring' not in self.__dict__: self.__dict__['_valuestring']", "EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "= 'Card image is not FITS standard (equal sign not at column 8).'", "zipped fits files is not supported\" zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist() if", "dim == '0': dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS", "getfile(self): return self.__file def _readheader(self, cardList, keyList, blocks): \"\"\"Read blocks of header, and", "long)): if _step <= 0: raise IndexError, 'Illegal slice %s, step must be", "scaled column may have wrong byteorder if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder =", "\"Invalid input for HDUList.\" for hdu in hdus: if not isinstance(hdu, _AllHDU): raise", "yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap =", "BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "_key[:8] == 'HIERARCH': _key = _key[8:].strip() _keylist = self._keylist if backward: _keylist =", "isinstance(_type, num.FloatingType): _scale = 1 _zero = 0 else: # flat the shape", "to corrupt the original array if bzero not in ['', None, 0] or", "use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and _card.key !=", "update modes only). output_verify: output verification option, default = 'exception'. verbose: print out", "# data reading will be delayed for col in range(_nfields): dict[col]['array'] = Delayed(input,", "verbose=verbose) self.__file.close() # close the memmap object, it is designed to use an", "the arguments are for extension specification. See L{getdata} for explanations/examples. @return: keyword value", "# end of a string rather well, but will accept # strings with", "shape=nrows)) hdu.data._coldefs = hdu.columns # populate data to the new table for i", "valu.group('valu') if '_valueModified' not in self.__dict__: self.__dict__['_valueModified'] = 0 elif name == 'comment':", "not be used to endorse or promote products derived from this software without", "# a number/string for cname in _commonNames: value = eval(cname) # get the", "__init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None, None, None self.header = header", "in this constructor _card = \"Card('%s', %s)\" % (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d,", "1 and 'extver' in keys: ext = ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword", "TDISP keyword start: column starting position (ASCII table only), corresponding to TBCOL keyword", "except: pass class CardList(list): \"\"\"FITS header card list class.\"\"\" def __init__(self, cards=[], keylist=None):", "try: # then try to conver it to a strings array array =", "self.__file.memmap _name = _tmpName(oldName) _hduList = open(_name, mode=\"append\") if (verbose): print \"open a", "which is longer than strlen, then it will be split in the middle", "pass # fix the value elif option == 'unfixable': _text = \"Unfixable error:", "0 # reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype)", "newkey: new keyword, must be a string. force: if new key name already", "blank cards, so the total space will not increase (default). When useblanks ==", "self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip': # Handle zip", "if isinstance(key, (int, long)): indx = int(key) elif isinstance(key, str): # try to", "header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table from the input column definitions.\"\"\"", "parsable (i.e. everything else) result = None return result else: # verify the", "for all docstrings in this module. @group Header-related Classes: Card, CardList, _Card_with_continue, Header,", "from the CardList. If no keyword is found, return the default value. key:", "format column array, both the data descriptors and the data. It returns the", "error for BZERO = +32768 self.header.update('BZERO', _zero) else: del self.header['BZERO'] if _scale !=", "2, 'number of array dimensions'), Card('NAXIS1', 0, 'length of dimension 1'), Card('NAXIS2', 0,", "FSC commentary card string which must contain printable ASCII characters. _ASCII_text = r'[", "attribute. The data of random group FITS file will be like a binary", "if n > 0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n],", "self.header = header self.data = data self._xtn = ' ' def __setattr__(self, attr,", "!= 'big': # # byteswap little endian arrays before writing # output =", "header is None: dim = `self.header['NAXIS']` if dim == '0': dim = ''", "without being able to pass it to the header object hduList._resize = 0", "option='ignore'): \"\"\"Verify the card image with the specified option. \"\"\" self.__dict__['_err_text'] = ''", "only). output_verify: output verification option, default = 'exception'. verbose: print out verbose messages?", "in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format", "r.__class__ = rec.RecArray r._coldefs = self._coldefs f = FITS_rec(r) f._convert = copy.deepcopy(self._convert) return", "self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except", "== 0: if n_ext2 == 0: ext = _Zero() elif 'ext' in keys:", "length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart -", "each other.' elif (force == 0) and (newkey in self.ascard._keylist): raise ValueError, 'Intended", "fixable if result is not None: _str = result.group('comm') if _str is not", "self.data is not None: _data = self.data.copy() else: _data = None return self.__class__(data=_data,", "size is not multiple of %d: %d' % (_blockLen, len(blocks)) elif (blocks[:8] not", "specific to a certain kind of header. Strip cards like SIMPLE, BITPIX, etc.", "rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names` + ')' return tmp", "_cols = [] if pardata is None: npars = 0 else: npars =", "is not None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct a Card object from a", "the same name, the # value must be a list (or tuple) containing", "exponent sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?'", "reading in the FITS file), # it will be constructed from the card", "column dimension corresponding to TDIM keyword \"\"\" # any of the input argument", "more of the attributes listed in _commonNames. The default is \"all\" which will", "comm == '': nlines = len(comm) / comm_len + 1 comm_list = self._words_group(comm,", "THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED", "pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards begins with HIERARCH", "__getslice__(self, start, end): _hdus = super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return result def", "with the name 'key'.\"\"\" # delete ALL cards with the same keyword name", "= 0 hdu.header._mod = 0 hdu.header.ascard._mod = 0 except: pass return hdu class", "extra space after the last HDU or corrupted HDU except ValueError: print 'Warning:", "cards, so the total space will not increase (default). When useblanks == 0,", "class. This is the top-level FITS object. When a FITS file is opened,", "and formats.\"\"\" class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if data is touched,", "header from an extension of a FITS file. @param filename: input FITS file", "self._resize: # determine if any of the HDU is resized for hdu in", "self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name in ['TFORM', 'TSCAL',", "flexible and are best illustrated by examples: No extra arguments implies the primary", "(self.name, \"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all HDUs", "filename: string @param filename: input FITS file name \"\"\" f = open(filename) f.info()", "cards, no need to parse further if self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip()", "be provided. name: The name of the HDU, will be the value of", "self[i].data is not None: continue def update_tbhdu(self): \"\"\"Update all table HDU's for scaled", "if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan]", "last_end + 1 _end = self.starts[i] + _width - 1 self.spans[i] = _end", "column of # a field may not be the column right after the", "cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data reading will be delayed for", "hdus: Input, can be a list of HDU's or a single HDU. Default", "shape (s, nx) nx: number of bits \"\"\" pow2 = [128, 64, 32,", "# update the 'sci' extension >>> update(file, dat, 3) # update the 3rd", "_zero: _arr += bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n", "['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise ValueError, 'Option %s not recognized.' % option", "control character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal keyword name %s' %", "image # extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'):", "'update', *ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close() def info(filename): \"\"\"Print the summary information", "the Header object, using the cards. try: header = Header(CardList(_cardList, keylist=_keyList)) hdu =", "new FITS file to write to @type data: array, record array, or groups", "If header=None, a minimal Header will be provided. name: The name of the", "appended to the end of the file. If the file does not already", "_idigt)*1j else: _val = UNDEFINED self.__dict__['value'] = _val if '_valuestring' not in self.__dict__:", "% fix_value if fixable: fix = \"self.header['%s'] = %s\" % (keywd, `fix_value`) _err.append(self.run_option(option,", "'keyword name %s is too long (> 8), use HIERARCH.' % val else:", "None, add to the last occurrence of cards of the same name (except", "case-insentively, _key = key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(), nameList) _count = operator.countOf(_list,", "== 1: self.field(indx[0])[:] = value # if more than one group parameter have", "name, the # value must be a list (or tuple) containing arrays else:", "__init__(self, data=None, header=None, name=None): \"\"\" header: header to be used data: data to", "header=None): self._file, self._offset, self._datLoc = None, None, None self.header = header self.data =", "filename: input FITS file name @param ext: The rest of the arguments are", "instead of requiring data to all be written at once. The following psudo", "from the end so as not to confuse the indexing. _list = []", "view of a Column's data as an array.\"\"\" indx = _get_index(self._coldefs.names, key) if", "or None @param header: the header associated with 'data', if None, a header", "fix_value as its value when created. Also check the card's value by using", "self.data is None: _shape, _format = (), '' _nrows = 0 else: _nrows", "return output def _add_commentary(self, key, value, before=None, after=None): \"\"\"Add a commentary card. If", "2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A module for reading and writing FITS files", "r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC # This regex helps", "'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 2: if n_ext2 ==", "_value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def get(self, key,", "output verification option, default = 'exception'. verbose: print out verbose messages? default =", "raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 2: if", "extver=2) # equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications", "+ ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' + _numr_FSC", "self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr == '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif", "% key else: raise NameError, \"Illegal key '%s'.\" % `key` return indx def", "# Linux, but is at the beginning in Solaris. self.__file.seek(0, 2) self._size =", "_isInt+\" and val>= 0\", 1, option, _err) # verify each card for _card", "0 and indx < naxis: if naxis > 1: return _SinglePoint(1, indx) elif", "_card in self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self, keywd, pos, test, fix_value, option,", "the table's column definitions.\"\"\" return self.columns def update(self): \"\"\" Update header keywords to", "'extension %s not found' % `key` elif (nfound > 1): raise KeyError, 'there", "the card does not exist if _index is None: err_text = \"'%s' card", "int elif isinstance(self.value , bool): valStr = '%20s' % `self.value`[0] elif isinstance(self.value ,", "* _naxis + indx.offset # all elements after the first WholeLine must be", "area # data area size, including padding hdu._datSpan = _size + _padLength(_size) hdu._new", "be difficult when the extension is a TableHDU containing ASCII data. \"\"\" def", "# to be more than one 80-char \"physical\" cards. _max = _keyList.count('CONTINUE') _start", "2. Redistributions in binary form must reproduce the above copyright notice, this list", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "== 'T': groups = 1 else: groups = 0 size = 1 for", "items, each of the next level # must present, even it has nothing.", "analogous to tables \"\"\" def __init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None,", "data would cause the stream to overflow, an IOError exception is raised and", "else: raise IndexError, 'Illegal slice %s, step must be integer.' % input return", "offset = -1 except: offset = len(input) # check for one word longer", "\"\"\"Add a HISTORY card. value: History text to be added. before: [same as", "= offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType):", "data, either the group data itself (a numarray) or a record array (FITS_rec)", "old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:]", "data type'), Card('NAXIS', 2, 'number of array dimensions'), Card('NAXIS1', 0, 'length of dimension", "= self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size = self.size() if self._size != 0: self.writeComplete", "== 'END': break def _readHDU(self): \"\"\"Read the skeleton structure of the HDU.\"\"\" end_RE", "after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\" def __init__(self, data=None, header=None,", "binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] = dummy del", "new table definition keywords for i in range(len(_cols)): for cname in _commonNames: val", "first time and store it in _convert self._convert[indx] = num.array(dummy, type=num.Float64) if _scale:", "nx) for j in range(_min, _max): if j != _min: num.lshift(output[...,i], 1, output[...,i])", "*(?P<imag>' + _numr_FSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?'", "card will be placed. default=None. \"\"\" if self.has_key(key): j = self.ascard.index_of(key) if comment", "the top-level FITS object. When a FITS file is opened, a HDUList object", "a sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max = 0 def __setitem__(self,", "\"\"\"Construct a Header from a CardList. cards: A list of Cards, default=[]. \"\"\"", "2)) # equivalent Ambiguous or conflicting specifications will raise an exception, e.g., >>>", "Random Groups HDU class.\"\"\" _dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def", "if _scale or _zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale, _zero)", "self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS'] for i in range(_naxis):", "from the CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] # update the", "= [Column(**attrs) for attrs in dict] self.data = tmp else: raise TypeError, \"input", "in a LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big' output = hdu.data", "is None: _start = 0 elif isinstance(_start, (int, long)): _start = _normalize(_start, naxis)", "_rdigt = real.group('digt').translate(_fix_table2, ' ') if real.group('sign') == None: _val = eval(_rdigt) else:", "if imag.group('sign') is not None: _imagStr = imag.group('sign') + _imagStr _valStr = '('", "and the size of the data area return loc, _size+_padLength(_size) def close(self): \"\"\"Close", "isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self, start, end): _hdus", "= _ValidHDU elif cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip() if xtension == 'TABLE':", "and randomGroups == 'T': groups = 1 else: groups = 0 size =", "* npars data_fmt = '%s%s' % (`input.shape[1:]`, _fmt) _formats += data_fmt gcount =", "isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in", "isinstance(indx, slice): indx = _normalize_slice(indx, naxis) if (indx.start == 0) and (indx.stop ==", "= ffo.getfile() # if not resized, update in place else: for hdu in", "raise ValueError, self._err_text, '\\n%s' % self._cardimage elif option in ['fix', 'silentfix']: result =", "keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 1 and val <= 999\",", "\"\"\" def __init__(self, input): \"\"\"Construct a FITS record array from a RecArray.\"\"\" #", "filename already exists, it will overwrite the file. Default is False. \"\"\" if", "\"\"\"Column class which contains the definition of one column, e.g. ttype, tform, etc.", "elif name == '_arrays': attr = [col.array for col in self.data] elif name", "\"\"\" # make sure to consider the case that the starting column of", "a string' % val self.__dict__['key'] = val def _setvalue(self, val): \"\"\"Set the value", "self.__file.close() # close the memmap object, it is designed to use an independent", "None: naxis = int(mo.group(1)) pos = mo.end(0) else: raise ValueError(\"NAXIS not found where", "BZERO after scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data =", "= _format[:-2] + ']' _dims = \"%dR x %dC\" % (_nrows, _ncols) return", "writeHDUheader(self, hdu): \"\"\"Write FITS HDU header part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END') blocks", "format? convert to record format (e.g. '3J'->'3i4') recfmt = _convert_format(format) except: try: #", "if isinstance(key, slice): out = tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields", "bzero) = self._get_scale_factors(indx) # add the location offset of the heap area for", "scale integers to Float32 self.data = num.array(raw_data, type=num.Float32) else: # floating point cases", "forms, with or without modification, are permitted provided that the following conditions are", "header \"\"\" hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] hdr", "raise ValueError, \"Inconsistent input data array: %s\" % array array._dtype = recfmt._dtype else:", "in ['value', 'comment']: self._extractValueComment(name) else: raise AttributeError, name return getattr(self, name) def _setkey(self,", "Card.length def _verify(self, option='warn'): \"\"\"Card class verification method.\"\"\" _err = _ErrList([]) try: self._check(option)", "will return # None, meaning the keyword is undefined. The comment field will", "the current thread and determine if this is a single treaded application threadName", "input if _stop < _start: raise IndexError, 'Illegal slice %s, stop < start.'", "_use_blanks(self, how_many): if self._blanks > 0: for i in range(min(self._blanks, how_many)): del self[-1]", "self.data = data # update the header self.update_header() self._bitpix = self.header['BITPIX'] # delete", "if dim == '0': dim = '' # set extension name if (name", "or # OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise", "_ver == _extver: found = j nfound += 1 if (nfound == 0):", "valStr = \"'%-8s'\" % _expValStr valStr = '%-20s' % valStr # must be", "['all', '']: list = _commonNames else: list = attrib.split(',') for i in range(len(list)):", "strfmt[:-1] return strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self,", "# need this in case card-with-continue's value is shortened if not isinstance(self, _Hierarch):", "+ _imagStr + ')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the equal", "before it. card: The Card to be inserted. useblanks: Use any *extra* blank", "to True, this function will return a (data, header) tuple. \"\"\" if 'header'", "is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out # if", "that a non-greedy match is done for a string, # since a greedy", "used for updating) comment: keyword comment (to be used for updating), default=None. before:", "cut into two pieces. But if there is one single word which is", "corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or 'END' cards. A corrupted HDU", "__len__(self): return len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass #", "url, fp, errcode, errmsg, headers): raise IOError, (errcode, errmsg, url) urllib._urlopener = ErrorURLopener()", "be precise. # # Note that a non-greedy match is done for a", "= 0 xoffset = 0 for i in range(nmax): try: loc = num.nonzero(blank_loc", "Card('XTENSION', 'IMAGE', 'Image extension') else: c0 = Card('SIMPLE', True, 'conforms to FITS standard')", "insert. The new card will be inserted before it. card: The Card to", "does not support ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format", "hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize # comment out to", "not a Primary header, a default Primary HDU will be inserted at the", "be before int checking since bool is also int elif isinstance(self.value , bool):", "_start = _where + 1 if _keyList[_start:].count('CONTINUE') == 0: break # construct the", "= field # translation table for floating value string _fix_table = maketrans('de', 'DE')", "j = self.ascard.index_of(key) if comment is not None: _comment = comment else: _comment", "'D') # binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] =", "L{getdata} for explanations/examples. @rtype: L{Header} object @return: header \"\"\" hdulist, _ext = _getext(filename,", "column has no width, add one if tbtype == 'TableHDU': for i in", "type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx] += bzero elif _bool:", "descriptors and the data. It returns the output \"data\" array of data type", "_lead < 0: raise ValueError, \"column `%s` starting point overlaps to the previous", "except: return 0 def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a card's keyword in", "to append to @type data: array, table, or group data object @param data:", "elif name == 'comment': _comm = _card.comment if isinstance(_comm, str) and _comm !=", "i in range(self._nfields): # touch all fields to expand the original ._convert list", "print ' ', getattr(self, att+'s') #def change_format(self, col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name,", "[1] npt = 1 for n in dims: npt *= n # Now,", "in extkeys: header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename,", "been written to the stream to satisfy the amount specified in the header,", "% self.key # verify the key, it is never fixable # always fix", "header=None, a minimal Header will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name =", "format (e.g. '3J'->'3i4') recfmt = _convert_format(format) except: try: # legit RecArray format? recfmt", "= '' _itemsize = 0 for i in range(len(tmp)): _formats += 'a%d,' %", "% (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the supposed", "Header from a CardList. cards: A list of Cards, default=[]. \"\"\" # decide", "0 elif indx < 0: indx += npts elif indx > npts: indx", "a table HDU's data part. This is a layer over the RecArray, so", "else: dummy = self._parent.field(indx) # further conversion for both ASCII and binary tables", "_ncols) return \"%-10s %-11s %5d %-12s %s\" % \\ (self.name, type, len(self.header.ascard), _dims,", "flush, to make sure the content is written self.__file.flush() # return both the", "# go through header keywords to pick out column definition keywords dict =", "optionally the header). @type filename: string @param filename: input FITS file name @param", "self._datLoc hdu._datSpan = self._datSpan hdu._ffile = self._ffile hdu.name = self.name hdu._extver = self._extver", "self.size() if self._size != 0: self.writeComplete = 0 else: self.writeComplete = 1 def", "val self.__dict__['key'] = val def _setvalue(self, val): \"\"\"Set the value attribute.\"\"\" if isinstance(val,", "be placed. default=None. \"\"\" if self.has_key(key): j = self.ascard.index_of(key) if comment is not", "1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale,", "re_extver.search(self._raw) if mo: extver = int(mo.group(1)) else: extver = 1 return name, extver", "into parts where each part is no longer than strlen and no word", "indx.start) else: raise IndexError, 'Illegal index %s' % indx def _normalize_slice(input, naxis): \"\"\"Set", "-bzero if bscale not in ['', None, 1]: array /= bscale self.array =", "have two different columns called 'abc' and 'ABC' respectively. (b) When you *refer*", "else: raise IndexError, 'Illegal slice %s, start must be integer.' % input _stop", "item # second time go through the next level items, each of the", "the \"Card\" is considered # to be more than one 80-char \"physical\" cards.", "width. try: (dtype, width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width == '':", "string import maketrans import copy import signal import threading # Module variables _blockLen", "comment from the card image.\"\"\" longstring = '' ncards = self._ncards() for i", "range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset = offset * _naxis", "'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the reverse dictionary of the above", "= hdu.header hdulist.close() return hdr def getdata(filename, *ext, **extkeys): \"\"\"Get the data from", "'HIERARCH': _start = 8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the", "0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val == 1\", 1, option,", "for att in list: if att not in _commonNames: print \"'%s' is not", "try to match case-insentively, _key = key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(), nameList)", "in valueStr and \"E\" not in valueStr: valueStr += \".0\" return valueStr class", "if block == '': raise EOFError hdu = _TempHDU() hdu._raw = '' #", "_naxis) offset = offset * _naxis + indx.offset # all elements after the", "be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None or self.key in", "self.header['PCOUNT']) else: _gcount = '' return \"%-10s %-11s %5d %-12s %s%s\" % \\", "size = 1 for j in range(1, naxis): size = size * self.header['NAXIS'+`j+1`]", "a # preferred order. _commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp',", "'big' if coldata2._type.bytes > 1: # do the _parent too, otherwise the _parent", "self.__dict__[attr] except KeyError: raise AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns the size (in", "hdu.data is not None: # if image, need to deal with byte order", "table type' elif isinstance(input, FITS_rec): # input is a FITS_rec tmp = hdu.columns", "raise IndexError, 'Illegal index %s' % indx def _normalize_slice(input, naxis): \"\"\"Set the slice's", "the TFORM value into data type and width. try: (dtype, width) = _re.match(input_format.strip()).groups()", "isinstance(i, chararray.CharArray): if i._type.bytes > 1: if i._byteorder != 'big': i.byteswap() i._byteorder =", "nmax = max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1) # locations of the", "list: if att not in _commonNames: print \"'%s' is not an attribute of", "if isinstance(self.value, str) and len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue output = self._breakup_strings()", "self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1 if option == 'ignore':", "if not isinstance(hdu, _AllHDU): raise \"Element %d in the HDUList input is not", "the same name else: result = self.field(indx[0]).astype('f8') for i in indx[1:]: result +=", "@param key: keyword name @param ext: The rest of the arguments are for", "card list. if keylist is None: self._keylist = [k.upper() for k in self.keys()]", "IndexError, 'Illegal index %s' % indx def _normalize_slice(input, naxis): \"\"\"Set the slice's start/stop", "= value.key.upper() self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is not a", "field may not be the column right after the last field elif tbtype", "' ' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long string value/comment into", "as 7A in # binary table, so both will produce 'a7'. if fmt.lstrip()[0]", "indexing work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self): tmp =", "= ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1", "r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC + ') *,", "a Card by indexing or by the keyword name.\"\"\" _key = self.index_of(key) return", "e.g. ttype, tform, etc. and the array. Does not support theap yet. \"\"\"", "class GroupData(FITS_rec): \"\"\"Random groups data object. Allows structured access to FITS Group data", "= \"'%s' card at the wrong place (card %d).\" % (keywd, _index) fix_text", "% self.key else: self.__dict__['_err_text'] = 'Card image is not FITS standard (unparsable value", "import signal import threading # Module variables _blockLen = 2880 # the FITS", "not a slice, do this because Record has no __getstate__. # also more", "Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ') if real.group('sign') is not None: _realStr =", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "definitions.\"%att continue print \"%s:\" % att print ' ', getattr(self, att+'s') #def change_format(self,", "a primary HDU. data: the data in the HDU, default=None. header: the header", "# there is no boolean in ASCII table _number = not(_bool or _str)", "if isinstance(ext1[0], str): if n_ext2 == 1 and 'extver' in keys: ext =", "hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7] +", "filename: input FITS file name \"\"\" f = open(filename) f.info() f.close() UNDEFINED =", "a list of cards into a printable string.\"\"\" kard = self._cardimage output =", "del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim == 0: dim", "bzero: bzero value, corresponding to TZERO keyword disp: display format, corresponding to TDISP", "location. It returns None if equal sign is not present, or it is", "def keys(self): \"\"\"Return a list of all keywords from the CardList.\"\"\" return map(lambda", "self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\" if attr == '_mm':", "P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i", "* _nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def __init__(self, input):", "of the arguments are flexible: the 3rd argument can be the header associated", "# floating point cases if self._ffile.memmap: self.data = raw_data.copy() # if not memmap,", "\"update\" mode os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo if", "nbytes) nx: number of bits \"\"\" output[...] = 0 # reset the output", "See L{getdata} for explanations/examples. @rtype: L{Header} object @return: header \"\"\" hdulist, _ext =", "the data portion of the HDU.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0)", "issubclass(self._hdutype, BinTableHDU): for name in ['TDISP', 'TDIM', 'THEAP']: for i in range(_tfields): del", "bscale = parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale = bscale,", "be used for updating), default=None. before: name of the keyword, or index of", "1 else: repeat = eval(repeat) return (repeat, dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert", "_VLF(map(_func, array)) except: raise ValueError, \"Inconsistent input data array: %s\" % array array._dtype", "= hdu.columns = input else: raise ValueError, 'column definitions have a different table", "(str, int, long, float, complex, bool, Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] =", "exists in header.' % newkey _index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value =", "isinstance(input, _TableBaseHDU): hdr = input.header _nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape =", "attribute object.\"\"\" if name == '_cardimage': self.ascardimage() elif name == 'key': self._extractKey() elif", "with scaled columns. \"\"\" def __init__(self, input): \"\"\"Construct a FITS record array from", "of data required to fill the stream per the header provided in the", "% ext2 if isinstance(ext1[0], str): if n_ext2 == 1 and 'extver' in keys:", "_getValueCommentString(self): \"\"\"Locate the equal sign in the card image and return the string", "input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data, either the", "self.header.get('NAXIS', 0) if naxis > 0: simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if", "mo = re_extname.search(self._raw) if mo: name = mo.group(1).rstrip() else: name = '' mo", "reading header blocks until END card is reached while 1: # find the", "= 'Fixed by inserting one as 0th HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text", "self._bitpix = self.header['BITPIX'] # delete the keywords BSCALE and BZERO del self.header['BSCALE'] del", "def __delitem__(self, key): \"\"\"Delete an HDU from the HDUList, indexed by number or", "the group parameter values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:] = value else: indx", "name == '_cardimage': self.ascardimage() elif name == 'key': self._extractKey() elif name in ['value',", "further conversion for both ASCII and binary tables if _number and (_scale or", "ColDefs(self, tbtype=class_name) elif attr == '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr ==", "+ 'X' elif isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max = max(map(len, VLdata)) val", "isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th element is not a primary HDU.\" fix_text", "of the format (e.g. E-009 vs. E-09) elif isinstance(self.value, float): if self._valueModified: valStr", "None: raise IndexError, 'No data in this HDU.' if _gethdr: _hdr = hdu.header", "if header is None: if isinstance(data, num.NumArray): hdu = ImageHDU(data) elif isinstance(data, FITS_rec):", "= self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale", "beginning of the header area hdu._datLoc = self.__file.tell() # beginning of the data", "_step = input.step if _step is None: _step = 1 elif isinstance(_step, (int,", "in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): #", "is no card (or blank card), append at the end. \"\"\" new_card =", "are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0]))", "loc = self.__file.tell() _size = 0 if hdu.data is not None: # if", "scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data by using BSCALE/BZERO. Call to", "_valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the equal sign in the card image before", "header will be created for the data object supplied. \"\"\" if not os.path.exists(filename):", "== '': repeat = 1 else: repeat = eval(repeat) return (repeat, dtype, option)", "= recfmt._dtype else: raise ValueError, \"Data is inconsistent with the format `%s`.\" %", "were provided with a Primary Header. If not we will need # to", "element is not a primary HDU.\" fix_text = 'Fixed by inserting one as", "_name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors list", "= re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is not correct for", "and val >= 0 and val <= 999\", 0, option, _err) tfields =", "functions class _Zero(int): def __init__(self): self = 0 def _getext(filename, mode, *ext1, **ext2):", "if self.__dict__.has_key('value'): valStr = str(self.value) # put all parts together output = keyStr", "header self.update_header() self._bitpix = self.header['BITPIX'] # delete the keywords BSCALE and BZERO del", "section.\"\"\" def __init__(self, hdu): self.hdu = hdu def __getitem__(self, key): dims = []", "FITS_rec): hdu = BinTableHDU(data) else: raise KeyError, 'data must be numarray or table", "rec import numarray.objects as objects import numarray.memmap as Memmap from string import maketrans", "when the file is first opened. This is to speed up the open.", "CardList(list): \"\"\"FITS header card list class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct the CardList", "in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card image is not FITS standard (equal sign", "for i in coldata: if not isinstance(i, chararray.CharArray): if i._type.bytes > 1: if", "self.ascard[j].comment self.ascard[j] = Card(key, value, _comment) elif before != None or after !=", "mo = re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul,", "pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\" def __init__(self,", "not resized, update in place else: for hdu in self: if (verbose): try:", "self._err_text def _checkKey(self, val): \"\"\"Verify the keyword to be FITS standard.\"\"\" # use", "unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s) information of the column", "which kind of header it belongs to try: if cards[0].key == 'SIMPLE': if", "objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max, len(value)) class Column: \"\"\"Column class which contains", "valStr = '%20d' % self.value # XXX need to consider platform dependence of", "the card will be appended after the last non-blank card. \"\"\" if isinstance", "or the 'END' card is not found. In the case of a missing", "attribute .names while Column has .name), Each attribute in ColDefs is a list", "The following psudo code illustrates its use: header = pyfits.Header() for all the", "\"\"\"Verify val to be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] =", "> 0: self.__file.write(_padLength(_size)*'\\0') # flush, to make sure the content is written self.__file.flush()", "Undefined): valStr = '' # conserve space for HIERARCH cards if isinstance(self, _Hierarch):", "we skip NAXIS1. if naxis > 1: size = 1 for j in", "class _File: \"\"\"A file I/O class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0): if mode", "default = 0. \"\"\" # Get the name of the current thread and", "self._datLoc = self._ffo.getfile().tell() self._size = self.size() if self._size != 0: self.writeComplete = 0", "in ASCII table _number = not(_bool or _str) bscale = self._coldefs.bscales[indx] bzero =", "not end with an even number of # quotes to be precise. #", "_keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and isinstance(self, BinTableHDU): val = _cols._recformats[i] if isinstance(val,", "# check for one word longer than strlen, break in the middle if", "_width = [] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for", "i._byteorder != 'big': i.byteswap() i._byteorder = 'big' else: if coldata._type.bytes > 1: if", "data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1]", "header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each piece of data: shdu.write(data) shdu.close() \"\"\"", "if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval = self._xtn else: firstkey = 'SIMPLE'", "list twice, first time print out all top level messages for item in", "scale self.data and update the keywords of BSCALE and BZERO in self.header. This", "of _pcount # pass the attributes for attr in ['formats', 'names']: setattr(_data, attr,", "hdu.data._parent._byteorder = 'big' output = hdu.data else: output = hdu.data output.tofile(self.__file) _size =", "+ _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')' valStr = '%20s' %", "default Primary HDU will be inserted at the beginning of the file and", "def index_of(self, key): \"\"\"Get the index of an HDU from the HDUList. The", "isinstance(hdus, (HDUList, list)): raise \"Invalid input for HDUList.\" for hdu in hdus: if", "self.data is None: _shape, _format = (), '' else: # the shape will", "len(self) - self._blanks i = nc - 1 if not bottom: for i", "in self: block = block + repr(card) return block def __str__(self): \"\"\"Format a", "group, # since binary table does not support ND yet if isinstance(hdu, GroupsHDU):", "del self.header.ascard[i] del _list # populate the new table definition keywords for i", "self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not in self.__dict__: self.__dict__['_valueModified'] = 0 elif name", "'SIMPLE' firstval = True self.req_cards(firstkey, '== 0', '', firstval, option, _err) self.req_cards('BITPIX', '==", "file and return an HDUList object. name: Name of the FITS file to", "arr is not None: dim = arr._shape[0] else: dim = 0 if dim", "Name Type\"\\ \" Cards Dimensions Format\\n\" % _name for j in range(len(self)): results", "to the location specified by before or after. The argument `before' takes precedence", "if isinstance(hdu, (slice, list)): if isinstance(_key, int): raise ValueError, \"An element in the", "be updated. If it does not exist, a new card will be created", "the # result is not allowed to expand (as C/Python does). for i", "*ext1, **ext2): \"\"\"Open the input file, return the HDUList and the extension.\"\"\" hdulist", "be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable string %s'", "== 0): _after = 'naxis' else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after", "os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name else: raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify", "before, after, useblanks=1): \"\"\"Insert a Card to the location specified by before or", "floating value string _fix_table = maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE') class Card(_Verify):", "{'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value", "unused bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct the P format", "_text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each element calls their own", "' if keyStr.strip() in Card._commentaryKeys: # not using self.key eqStr = '' if", "_name in _unique: _unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr] = _unique try: return", "tab=0): \"\"\"Print out nested structure with corresponding indentations. A tricky use of __str__,", "(and optionally the header). @type filename: string @param filename: input FITS file name", "= self.data[key] if isinstance(key, (int, long)): return x else: return ColDefs(x) def __len__(self):", "must start with CONTINUE and the whole card must have string value. \"\"\"", "are blank cards directly before END, it will use this space first, instead", "cards[0].value == True: self._hdutype = PrimaryHDU else: self._hdutype = _ValidHDU elif cards[0].key ==", "shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder = 'big' # pass datLoc, for P format", "else: if key[0] == ' ': useblanks = new_card._cardimage != ' '*80 self.ascard.append(new_card,", "_pc + _format[1:] + _dict[_format[0]] + ' '*_trail # not using numarray.strings's num2char", "(s, nx) nx: number of bits \"\"\" pow2 = [128, 64, 32, 16,", "not groups: name = 'PRIMARY' else: name = '' return size, name def", "string, # since a greedy match will find a single-quote after # the", "till the HDU is accessed. \"\"\" def _getname(self): \"\"\"Get the extname and extver", "for i in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i]", "= 'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII table", "word which is longer than strlen, then it will be split in the", "header=header, name=name) self._xtn = 'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment", "_AllHDU): raise ValueError, \"%s is not an HDU.\" % hdu try: super(HDUList, self).__setitem__(_key,", "Primary HDU will be inserted at the beginning of the file and the", "== 1: dummy = [] else: dummy = map(lambda x, y: x-y, self.starts[1:],", "_cols = [] _pnames = [] _pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for", "last field if self._tbtype == 'TableHDU': last_end = 0 attr = [0] *", "if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes > 1: #", "of the same name (except blank card). If there is no card (or", "interface if only one HDU needs to be written to a file. name:", "0', 0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None,", "for a valid value/comment string. It returns a match object # for a", "value into repeat, data type, and option.\"\"\" try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups()", "= [None]*self._nfields for i in range(self._nfields): # touch all fields to expand the", "return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx]", "_start for nc in range(1, _max+1): if _where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper()", "in other] indx=range(len(self)) for x in _other: indx.remove(x) tmp = [self[i] for i", "self.data) if self._bzero != 0: self.data += self._bzero # delete the keywords BSCALE", "= input._coldefs else: # input is a list of Columns tmp = hdu.columns", "if isinstance (card, Card): nc = len(self) - self._blanks i = nc -", "record format spec. Do the opposite if reverse = 1. \"\"\" fmt =", "hdulist.close() def info(filename): \"\"\"Print the summary information on a FITS file. This includes", "else: # construct a list of cards of minimal header if isinstance(self, _ExtensionHDU):", "a strings array array = chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable length array", "the standard format for storing high energy astrophysics data. For details of the", "name.\"\"\" key = self.index_of(key) _item = super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key,", "self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale or _zero:", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "= 0 raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns the keyword name parsed from", "del self.header['BSCALE'] if self.data._type != _type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU):", "a single treaded application threadName = threading.currentThread() singleThread = (threading.activeCount() == 1) and", "*' r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC + ') *\\))'", "== 'value': if valu is None: raise ValueError, \"Unparsable card, fix it first", "%s' % indx def _normalize_slice(input, naxis): \"\"\"Set the slice's start/stop in the regular", "in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close the associated FITS file", "do the value string valfmt = \"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list =", "dtype in _fits2rec.keys(): # FITS format if dtype == 'A': output_format = _fits2rec[dtype]+`repeat`", "get the right shape for the data part of the random group, #", "keyword name, default=''. value: keyword value, default=''. comment: comment, default=''. \"\"\" if key", "\"\"\"Insert a Card to the CardList. pos: The position (index, keyword name will", "naxis > 0: size = 1 for j in range(naxis): size = size", "# reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1]", "< 0: raise ValueError, \"column `%s` starting point overlaps to the previous column\"", "key, *ext, **extkeys): \"\"\"Get a keyword's value from a header in a FITS", "Get the index of the key in the name list. The key can", "if self.__file != None: if self.__file.memmap == 1: self.mmobject = self.__file._mm if self.__file.mode", "in keys: ext = ext2['extname'], ext2['extver'] else: ext = ext2['extname'] else: raise KeyError,", "first extension. If the file does already exist, but the provided header represents", "self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self, start, end): _hdus = super(HDUList, self).__getslice__(start,end)", "desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for both ASCII and binary", "will not be initialized till the HDU is accessed. \"\"\" def _getname(self): \"\"\"Get", "num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero: _arr = tmp._arrays[i].copy() else: _arr = tmp._arrays[i]", "_Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the", "collect the pieces in a list tmp = input[xoffset:offset] list.append(tmp) if len(input) ==", "It has attributes corresponding to the Column attributes (e.g. ColDefs has the attribute", "shape (s, nbytes) output: output Boolean array of shape (s, nx) nx: number", "= _keyList.count('CONTINUE') _start = 0 for i in range(_max): _where = _keyList[_start:].index('CONTINUE') +", "\"'%-8s'\" % _expValStr valStr = '%-20s' % valStr # must be before int", "keyStr + eqStr + valStr + commentStr # need this in case card-with-continue's", "parnames: list of parameter names. bscale: BSCALE of the data bzero: BZERO of", "Add 1 to .ascard to include the END card _nch80 = reduce(operator.add, map(Card._ncards,", "offset = 0 xoffset = 0 for i in range(nmax): try: loc =", "name longer than 8 characters. \"\"\" def _verify(self, option='warn'): \"\"\"No verification (for now).\"\"\"", "= eval(repeat) return (repeat, dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec", "bscale not in ['', None, 1] _zero = bzero not in ['', None,", "return FITS_rec(_data) def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table", "_card in self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value) return output def _add_commentary(self, key,", "will be created and if the header represents a Primary header, it will", "%s is out of bound or not found.' % key self._resize = 1", "in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except: if (j == 0): _after =", "second time go through the next level items, each of the next level", "def _getname(self): \"\"\"Get the extname and extver from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([", "before writing to the output file, as the data will be scaled and", "be used? default=0. \"\"\" # instantiate a FITS file object (ffo) ffo =", "_SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal index %s' % indx def _normalize_slice(input, naxis):", "0 else: size = len(tmp._arrays[i]) n = min(size, nrows) if fill: n =", "valu.group('numr') != None: # Check for numbers with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr'))", "But if there is one single word which is longer than strlen, then", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "except: raise ValueError, 'Illegal format `%s` for ASCII table.' % input_format return (dtype,", "else: self.data = raw_data try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _dimShape(self):", "= self._locateEq() if eqLoc is None: eqLoc = 8 _start = 0 if", "ASCII table format spec to record format spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4',", "current data type. option: how to scale the data: if \"old\", use the", "no other field name is a case variant of \"XYZ\", then field('xyz'), field('Xyz'),", "be 0, so we skip NAXIS1. if naxis > 1: size = 1", "ASCIITNULL self._convert[indx] = dummy for i in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i]", "not isinstance(hdus, (HDUList, list)): raise \"Invalid input for HDUList.\" for hdu in hdus:", "if 'data' not in dir(hdu): continue if hdu.data is None: continue _bytes =", "None: _name = '(No file associated with this HDUList)' else: _name = self.__file.name", "numarray.objects as objects import numarray.memmap as Memmap from string import maketrans import copy", "value string _fix_table = maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE') class Card(_Verify): #", "= tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero: _arr =", "L{Header} object or None @param header: the header associated with 'data', if None,", "isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def", "is resized for hdu in self: # Header: # Add 1 to .ascard", "= _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname, value.value) else: setattr(self, cname, value) #", "raise AttributeError, name # When an attribute (value or comment) is changed, will", "_scale: dummy /= bscale elif self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx] else: continue", "update(file, dat, header=hdr, ext=5) # update the 5th extension \"\"\" # parse the", "num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for", "break # Data: if 'data' not in dir(hdu): continue if hdu.data is None:", "hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan hdu._ffile = self._ffile hdu.name", "# delete the keywords BSCALE and BZERO del self.header['BSCALE'] del self.header['BZERO'] def update_header(self):", "for HIERARCH if len(keyStr + eqStr + valStr) > Card.length: raise ValueError, \"The", "TypeError, 'bad value type' value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else:", "width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width == '': width = None", "FITS file using the supplied data/header. @type filename: string @param filename: name of", "= list(self.data.data.getshape())[1:] axes.reverse() axes = [0] + axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] =", "self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL',", "int(mo.group(1)) else: pcount = 0 mo = re_groups.search(block) if mo and simple: groups", "_trail < 0: raise ValueError, \"column `%s` ending point overlaps to the next", "self.header = header.copy() # # Check if the file already exists. If it", "_scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # add the location offset of the", "(), '' _nrows = 0 else: _nrows = len(self.data) _ncols = len(self.columns.formats) _format", "-32 -> 'E' _fmt = _fits2rec[fits_fmt] # 'E' -> 'f4' _formats = (_fmt+',')", "__setattr__(self, name, val): if name == 'key': raise SyntaxError, 'keyword name cannot be", "exact match first try: indx = nameList.index(key.rstrip()) except ValueError: # try to match", "of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct a StreamingHDU", "len(self._cardimage) / Card.length def _verify(self, option='warn'): \"\"\"Card class verification method.\"\"\" _err = _ErrList([])", "guarantee # the elements in the object array are consistent. if not isinstance(array,", "the column definition.\"\"\" return self+column def del_col(self, col_name): \"\"\"Delete (the definition of) one", "return data_output class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def __init__(self, input): \"\"\" input:", "streamed. header : Header The header object associated with the data to be", "self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames = self.columns._pnames else: data =", "= \"delayed\" # used for lazy instantiation of data ASCIITNULL = 0 #", "[None]*self._nfields self.names = self._names def copy(self): r = rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs", "= {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'}", "BinTableHDU else: self._hdutype = _ExtensionHDU else: self._hdutype = _ValidHDU except: self._hdutype = _CorruptedHDU", "if dim > nrows: nrows = dim if tbtype == 'TableHDU': _formats =", "overflow, an IOError exception is raised and the data is not written. Once", "= file if hdus is None: hdus = [] # can take one", "i in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return self._convert[indx] if", "_hdrLoc # beginning of the header area hdu._datLoc = self.__file.tell() # beginning of", "a Card or just # a number/string for cname in _commonNames: value =", "_Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where # if not the real", "list class. It has a nested list structure constructed by error messages generated", "how_many)): del self[-1] # it also delete the keylist item def keys(self): \"\"\"Return", "_AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group", "_after = self.header['NAXIS'] + 3 # if the card EXTEND exists, must be", "_max = min((i+1)*8, nx) for j in range(_min, _max): if j != _min:", "return \"%-10s %-11s %5d %-12s %s\" % \\ (self.name, type, len(self.header.ascard), _dims, _format)", "eval(imag.group('sign') + _idigt)*1j else: _val = UNDEFINED self.__dict__['value'] = _val if '_valuestring' not", "i in parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i", "num.multiply(self.data, self._bscale, self.data) if self._bzero != 0: self.data += self._bzero # delete the", "if _gethdr: _hdr = hdu.header hdulist.close() if _gethdr: return _data, _hdr else: return", "arguments) are assumed to be the extension specification(s). Header and extension specs can", "\"\"\" if (len(self) == 0): print \"There is nothing to write.\" return self.update_tbhdu()", "padding hdu._datSpan = _size + _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell()", "== 'ignore': return elif option == 'parse': # check the value only, no", "= re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo: name = mo.group(1).rstrip() else: name =", "for nc in range(1, _max+1): if _where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() !=", "option == 'fix': self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' %", "was endorsed by the International Astronomical Union in 1999 and mandated by NASA", "size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs = self.columns else: data = None self.__dict__[attr]", "both the data descriptors and the data. It returns the output \"data\" array", "ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard def items(self): \"\"\"Return a list of all", "self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix)", "value: keyword value (to be used for updating) comment: keyword comment (to be", "end. \"\"\" new_card = Card(key, value) if before != None or after !=", "# return both the location and the size of the data area return", "= GroupsHDU elif cards[0].value == True: self._hdutype = PrimaryHDU else: self._hdutype = _ValidHDU", "add to the last occurrence of cards of the same name (except blank", "fits files is not supported\" zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name =", "one for non-FSC (NFSC) format: # NFSC allows lower case of DE for", "the file. Default is False. \"\"\" if header is None: if 'header' in", "**extkeys) hdulist[_ext] = new_hdu hdulist.close() def info(filename): \"\"\"Print the summary information on a", "a new value '%s'.\" % fix_value if fixable: fix = \"self.header['%s'] = %s\"", "(self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option = option.lower() if _option not in ['fix',", "strlen, break in the middle if offset <= xoffset: offset = xoffset +", "if _bytes != hdu._datSpan: self._resize = 1 if verbose: print \"One or more", "class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\"", "i in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an HDU from the HDUList, indexed", "to. output_verify: output verification option, default = 'exception'. clobber: Overwrite the output file", "att+'s') #def change_format(self, col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu):", "in dir(hdu): continue if hdu.data is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes =", "self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 1 and val <= 999\", 1,", "# pass datLoc, for P format _data._heapoffset = hdu._theap + hdu._datLoc _data._file =", "the open in # Linux, but is at the beginning in Solaris. self.__file.seek(0,", "if not os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu", "hdu.data.shape[0] # calculate PCOUNT, for variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart =", "# add the location offset of the heap area for each # variable", "of table fields') ]) if header is not None: # Make a \"copy\"", "= extkeys['header'] del extkeys['header'] else: _gethdr = False hdulist, _ext = _getext(filename, 'readonly',", "\"'%s' card does not exist.\" % keywd fix_text = \"Fixed by inserting a", "== 1: ext = ext2['ext'] elif n_ext2 == 2 and 'extver' in keys:", "[other] _other = [_get_index(self.names, key) for key in other] indx=range(len(self)) for x in", "isinstance(_name, str): _name = _name.strip().upper() if _name == _key: # if only specify", "self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`, dummy) elif name == 'spans':", "method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val == 2', 2, option, _err)", "minimal header is created @type filename: string @param filename: name of the file", "array self.__setstate__(input.__getstate__()) # _parent is the original (storage) array, # _convert is the", "attribute.\"\"\" if attr == '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except", "single treaded application threadName = threading.currentThread() singleThread = (threading.activeCount() == 1) and (threadName.getName()", "return tmp class ColDefs(object): \"\"\"Column definitions class. It has attributes corresponding to the", "match case-insentively, _key = key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(), nameList) _count =", "print \"append HDU\", hdu.name, _extver hdu._new = 0 elif self.__file.mode == 'update': if", "name: The name of the HDU, will be the value of the keywod", "byteswap elif isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 =", "commfmt = \"%-s\" if not comm == '': nlines = len(comm) / comm_len", "class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr == '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2'])", "self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype,", "naxis > 0: simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple == 'T'", "'format', new_format) def _get_tbdata(hdu): \"\"\" Get the table data from input (an HDU", "after the equal sign. If there is no equal sign, return the string", "names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder", "0, 'number of array dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups'))", "separate arguments or as a tuple: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' &", "if not isinstance(key, tuple): key = (key,) naxis = self.hdu.header['NAXIS'] if naxis <", "to the Column attributes (e.g. ColDefs has the attribute .names while Column has", "__repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass # needed for __add__ def", "del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del", "if real.group('sign') is not None: _realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr =", "equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise", "use in source and binary forms, with or without modification, are permitted provided", "a long string into parts where each part is no longer than strlen", "option == 'unfixable': _text = \"Unfixable error: %s\" % _text else: exec(fix) #if", "integer, or float \"\"\" _hdr = getheader(filename, *ext, **extkeys) return _hdr[key] def _makehdu(data,", "after=after) else: if key[0] == ' ': useblanks = new_card._cardimage != ' '*80", "val def __setattr__(self, name, val): if name == 'key': raise SyntaxError, 'keyword name", "+ `hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force a write of the", "file. :Returns: writeComplete : integer Flag that when true indicates that all of", "2nd SCI extension >>> update(file, dat, 3, header=hdr) # update the 3rd extension", "# whereas it should not end with an even number of # quotes", "self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def get(self, key, default=None): \"\"\"Get a", "cards: A list of Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards = cards #", "not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close()", "('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an exception, e.g.,", "== 0\", 0, option, _err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU", "comment maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The", "if isinstance(val, str): val = val.strip() if len(val) <= 8: val = val.upper()", "if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale, _zero, bscale,", "'null', 'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL',", "numarray.strings's num2char because the # result is not allowed to expand (as C/Python", "self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe out the old table definition keywords.", "so it's defined (in the case of reading from a # FITS file)", "os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and", ":Parameters: data : NumArray Data to stream to the file. :Returns: writeComplete :", "illustrates its use: header = pyfits.Header() for all the cards you need in", "- Google Search, when asked for \"PyFITS\" \"\"\" import re, os, tempfile, exceptions", "no need to parse further if self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment']", "num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1] + 1 if loc == 0: offset", "_formats = '' _cols = [] if pardata is None: npars = 0", "> 0 and (not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element %s is not", "value else: indx = self._unique[parName] if len(indx) == 1: self.field(indx[0])[:] = value #", "string, boolean, # number, or complex value is found, otherwise it will return", "self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns the keyword name parsed", "if self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu =", "Card to the CardList. card: The Card to be appended. useblanks: Use any", "else: raise \"HDUList can only append an HDU\" # make sure the EXTEND", "file does not already exist, it will be created and if the header", "This file format was endorsed by the International Astronomical Union in 1999 and", "_getitem(self, offset): row = (offset - self._byteoffset) / self._strides[0] return _Group(self, row) class", "coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder = 'big' # In case the FITS_rec was", "get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\" return self.columns def update(self): \"\"\" Update header", "\"\"\"Insert a Card to the location specified by before or after. The argument", "0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None,", "be a table HDU or a list of Columns\" def __getattr__(self, name): \"\"\"Populate", "0. \"\"\" # Get the name of the current thread and determine if", "= _normalize_slice(indx, naxis) if (indx.start == 0) and (indx.stop == naxis) and (indx.step", "@type filename: string @param filename: name of the file to append to @type", "after=None): \"\"\"Add a commentary card. If before and after are None, add to", "_repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format %s\" % fmt return output_format def _convert_ASCII_format(input_format):", "array of data type dtype. The descriptor location will have a zero offset", "not isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names, key) for key", "['', None, 1] _zero = bzero not in ['', None, 0] # ensure", "to have duplicate name. \"\"\" oldkey = oldkey.strip().upper() newkey = newkey.strip().upper() if newkey", "offset): row = (offset - self._byteoffset) / self._strides[0] return _Group(self, row) class _Group(rec.Record):", "ext[0] ext = ext[1:] elif not isinstance(ext[0], (int, long, str, tuple)): raise KeyError,", "back to the _keylist. self._checkKey(self.key) # verify the value, it may be fixable", "'_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return self def _ncards(self): return len(self._cardimage) / Card.length", "0.9.6.3 to avoid out of range error for BZERO = +32768 self.header.update('BZERO', _zero)", "of the file and the provided header will be added as the first", "the keyword, or index of the Card before which the new card will", "keyStr = ' '*8 # value string # check if both value and", "backward: _indx = len(_keylist) - _indx - 1 return _indx except: raise KeyError,", "not an HDU.\" % item else: if not isinstance(hdu, _AllHDU): raise ValueError, \"%s", "\"\"\"Cards having more than one 80-char \"physical\" cards, the cards after the first", "0 elif name == 'comment': self.__dict__['comment'] = '' if valu is not None:", "is not an attribute of the column definitions.\"%att continue print \"%s:\" % att", "extension >>> update(file, dat, hdr, 3) # update the 3rd extension >>> update(file,", "= tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype", "= valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input): \"\"\"Fix the", "the next level items, each of the next level # must present, even", "it is never fixable # always fix silently the case where \"=\" is", "= self._ffo.getfile().tell() self._size = self.size() if self._size != 0: self.writeComplete = 0 else:", "specification. See L{getdata} for explanations/examples. @return: keyword value @rtype: string, integer, or float", "0: break # construct the Header object, using the cards. try: header =", "Card.length: self.__class__ = _Card_with_continue # remove the key/value/comment attributes, some of them may", "output = '' for card in self: output += str(card) + '\\n' return", "_format = _format[_format.rfind('.')+1:] # if data is not touched yet, use header info.", "return # None, meaning the keyword is undefined. The comment field will #", "will be streamed. header : Header The header object associated with the data", "extra bytes after the last HDU or the file is corrupted.' % (len(hduList)+1)", "to a numarray first array = num.array(array) except: try: # then try to", "= 0 if dim > nrows: nrows = dim if tbtype == 'TableHDU':", "header=None, name=None): \"\"\" header: header to be used data: data to be used", "_ver = None if not isinstance(_key, str): raise KeyError, key _key = (_key.strip()).upper()", "(ffo) ffo = _File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo) # read all HDU's", "attr = getattr(self, cname+'s') del attr[indx] del self._arrays[indx] self._nfields -= 1 def change_attrib(self,", "in this HDU.' if _gethdr: _hdr = hdu.header hdulist.close() if _gethdr: return _data,", "isinstance(self.value, str): raise ValueError, 'Value in a commentary card must be a string'", "_err) naxis = self.header.get('NAXIS', 0) if naxis < 1000: for j in range(3,", "self.req_cards('PCOUNT', None, _isInt+\" and val == 0\", 0, option, _err) return _err class", "[]: dims = [1] npt = 1 for n in dims: npt *=", "keyword null: null value, corresponding to TNULL keyword bscale: bscale value, corresponding to", "mandatory keywords. # Do the first card here, instead of in the respective", "((nx-1) / 8) + 1 for i in range(nbytes): _min = i*8 _max", "% VLdata._max else: val = _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def copy(self):", "-64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None):", "range(nmax): try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1] + 1 if", "for name in ['TDISP', 'TDIM', 'THEAP']: for i in range(_tfields): del self[name+`i+1`] if", "\"update data in place: Name =\", hdu.name, _extver # reset the modification attributes", "ext2 return hdulist, ext def getheader(filename, *ext, **extkeys): \"\"\"Get the header from an", "(slice(None),) * (naxis-len(key)) offset = 0 for i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`]", "If the card image is longer than 80, assume it contains CONTINUE card(s).", "valu size = eval(width)+1 strfmt = strfmt + 's'+str(size) + ',' strlen =", "'' for card in self: output += str(card) + '\\n' return output[:-1] #", "if (keyword in _keyNames): col = eval(_key.group('num')) if col <= _nfields and col", "Header): header = ext[0] ext = ext[1:] elif not isinstance(ext[0], (int, long, str,", "must contain printable ASCII characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) #", "%-11s %5d %-12s %s\" % \\ (self.name, type, len(self.header.ascard), _dims, _format) def get_coldefs(self):", "isinstance(hdu, _AllHDU): raise \"Element %d in the HDUList input is not an HDU.\"", "Classes: Card, CardList, _Card_with_continue, Header, _Hierarch @group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU,", "% self.key else: keyStr = ' '*8 # value string # check if", "= self.field('data') elif attr == '_unique': _unique = {} for i in range(len(self.parnames)):", "have the fix_value as its value when created. Also check the card's value", "written to the stream. If the provided data would cause the stream to", "j in range(len(self)): results = results + \"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1]", "= self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len) for i in range(len(val_list)): if i", "last example, field('Abc') will cause an exception since there is no unique mapping.", "disp: display format, corresponding to TDISP keyword start: column starting position (ASCII table", "= data._shape[0] self.header['TFIELDS'] = data._nfields self.data = data self.columns = data._coldefs self.update() elif", "reset.' elif name == 'value': self._setvalue(val) elif name == 'comment': self._setcomment(val) else: raise", "attempt to write more data after the stream has been filled will raise", "text to be added. before: [same as in update()] after: [same as in", "['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append one Column to the column", "# deprecated _INDENT = \" \" DELAYED = \"delayed\" # used for lazy", "super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else:", "pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all HDUs which are not corrupted.\"\"\"", "exist for name in ['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return", "= j nfound += 1 else: # if the keyword EXTVER does not", "_tmp = self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip()", "keywd fix_text = \"Fixed by inserting a new '%s' card.\" % keywd if", "writing the # given header. # if not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist", "== tbtype: tmp = hdu.columns = input else: raise ValueError, 'column definitions have", "calculate PCOUNT, for variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize)", "rest of the arguments are for extension specification. See L{getdata} for explanations/examples. @rtype:", "if (verbose): print \"reopen the newly renamed file\", oldName # reset the resize", "http_error_default(self, url, fp, errcode, errmsg, headers): raise IOError, (errcode, errmsg, url) urllib._urlopener =", "0: self.data += self._bzero # delete the keywords BSCALE and BZERO after scaling", "convert to a numarray first array = num.array(array) except: try: # then try", "# reset the modification attributes after updating for hdu in self: hdu.header._mod =", "data: if \"old\", use the original BSCALE and BZERO values when the data", "value, and comment, or from raw string. option: verification option, default=silentfix. \"\"\" #", "print out verbose messages? default = 0. \"\"\" # Get the name of", "self.key else: keyStr = '%-8s' % self.key else: keyStr = ' '*8 #", "provided with a Primary Header. If not we will need # to prepend", "# Instead, just truncate the comment if isinstance(self.value, str) and len(valStr) > (Card.length-10):", "histories as a list of string texts.\"\"\" output = [] for _card in", "reset the output nbytes = ((nx-1) / 8) + 1 unused = nbytes*8", "case if input is None: _tmp = self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value']", "_check(self, option='ignore'): \"\"\"Verify the card image with the specified option. \"\"\" self.__dict__['_err_text'] =", "= self.header['BITPIX'] # delete the keywords BSCALE and BZERO del self.header['BSCALE'] del self.header['BZERO']", "del self.header['BZERO'] if _scale != 1: self.data /= _scale self.header.update('BSCALE', _scale) else: del", "otherwise, 0. key: keyword name. If given an index, always returns 0. \"\"\"", "_max): if j != _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift", "each of the next level # must present, even it has nothing. for", "Check for numbers with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, '", "added. before: [same as in update()] after: [same as in update()] \"\"\" self._add_commentary('history',", "header can be used to reconstruct another kind of header. \"\"\" try: #", "file\", oldName # reset the resize attributes after updating self._resize = 0 for", "ColDefs): b = list(other.data) else: raise TypeError, 'Wrong type of input' if option", "== 'data': # same code as in _TableBaseHDU size = self.size() if size:", "for item in self: if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) # print out", "if (keyword in _keyNames): _list.append(i) for i in _list: del self.header.ascard[i] del _list", "Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s) information of the", "keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname, value.value) else: setattr(self, cname, value)", "value only, no need to check key and comment for 'parse' result =", "keyword, or index of the Card after which the new card will be", "the size from the first block of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*')", "0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute. The data", "close(self): \"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list class.", "not a Card\" % str(value) def __delitem__(self, key): \"\"\"Delete a Card from the", "= gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close()", "err_text = \"HDUList's 0th element is not a primary HDU.\" fix_text = 'Fixed", "which are not corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size (in bytes) of the", "raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if the primary header needs the", "If the file does already exist, but the provided header represents a Primary", "output = hdu.data # Binary table byteswap elif isinstance(hdu, BinTableHDU): for i in", "= HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'): _err = _ErrList([], unit='Card') isValid", "eqLoc = None return eqLoc def _getKeyString(self): \"\"\"Locate the equal sign in the", "not eval(test): err_text = \"'%s' card has invalid value '%s'.\" % (keywd, val)", "_stop = _normalize(_stop, naxis) else: raise IndexError, 'Illegal slice %s, stop must be", "is corrupted.' % (len(hduList)+1) break # initialize/reset attributes to be used in \"update/append\"", "hdu self.field = field # translation table for floating value string _fix_table =", "from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all docstrings in this module. @group Header-related", "card if self[i].key not in Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if", "= _where # if not the real CONTINUE card, skip to the next", "return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec to record format spec.", "print \"One or more header is resized.\" break # Data: if 'data' not", "nx for i in range(nbytes): _min = i*8 _max = min((i+1)*8, nx) for", "return self.ascard[key].value def __setitem__ (self, key, value): \"\"\"Set a header keyword value.\"\"\" self.ascard[key].value", "once. The following psudo code illustrates its use: header = pyfits.Header() for all", "memmap, use the space already in memory else: self.data = raw_data if self._bscale", "values if there is bscale/bzero if isinstance(array, num.NumArray): # boolean needs to be", "(i.e. everything else) result = None return result else: # verify the equal", "and one for non-FSC (NFSC) format: # NFSC allows lower case of DE", "%s' % str(val) self.__dict__['value'] = val def _setcomment(self, val): \"\"\"Set the comment attribute.\"\"\"", "= '_index '+ pos if not eval(test_pos): err_text = \"'%s' card at the", "output_format = 'f8' else: raise ValueError, \"Illegal format %s\" % fmt else: if", "i in range(naxis): mo = re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2))", "many blank cards are *directly* before the END card.\"\"\" for i in range(1,", "'number of groups'), Card('TFIELDS', 0, 'number of table fields') ]) if header is", "# update the header self.update_header() self._bitpix = self.header['BITPIX'] # delete the keywords BSCALE", "more data will be accepted. An attempt to write more data after the", "3rd argument is not a header, it (and other positional arguments) are assumed", "self: if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) # print out a message only", "middle if offset <= xoffset: offset = xoffset + strlen # collect the", "= eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, '", "the END card.\"\"\" for i in range(1, len(self)): if str(self[-i]) != ' '*Card.length:", "\"table data has incorrect type\" # set extension name if not name and", "data type of the variable array \"\"\" _offset = 0 data_output = _VLF([None]*len(input))", "and val >= 0 and val <= 999\", 0, option, _err) naxis =", "# CardList needs its own _mod attribute since it has methods to change", "ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,", "an integer or a float in fixed or # scientific notation. One for", "might evaluate them as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE =", "until flush is complete!\" keyboardInterruptSent = True # Install new handler signal.signal(signal.SIGINT,New_SIGINT) if", "card image is longer than 80, assume it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage']", "1', _isInt+\" and \"+isValid, 8, option, _err) self.req_cards('NAXIS', '== 2', _isInt+\" and val", "key='', value='', comment=''): \"\"\"Construct a card from key, value, and (optionally) comment. Any", "is '': self.starts[i] = last_end + 1 _end = self.starts[i] + _width -", "= _end - last_end last_end = _end self._width = _end else: raise KeyError,", "_numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC + ') *\\))' r')? *)' r'(?P<comm_field>'", "def copy(self): \"\"\"Make a copy of the HDU, both header and data are", "+ `value` + '\\n' return text[:-1] def copy(self): tmp = Column(format='I') # just", "tbtype == 'TableHDU': # string no need to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n]", "for each piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\"", "def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDUList to a new file. name:", "update(file, dat, hdr, 'sci') # update the 'sci' extension >>> update(file, dat, 3)", "except EOFError: break # check in the case there is extra space after", "allowed\" self._checkKey(val) else: if val[:8].upper() == 'HIERARCH': val = val[8:].strip() self.__class__ = _Hierarch", "standard.\"\"\" # use repr (not str) in case of control character if Card._keywd_FSC_RE.match(val)", "# the reverse dictionary of the above _rec2fits = {} for key in", "which the header and data will be streamed. header : Header The header", "_Card_with_continue # remove the key/value/comment attributes, some of them may not exist for", "self._setcomment(val) else: raise AttributeError, name # When an attribute (value or comment) is", "i in range(len(tmp)): _formats += 'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i] hdu.data =", "corrupted HDU usually means that the data size cannot be calculated or the", "= fits_fmt, bscale = parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale", "= min _scale = (max - min) / (2.**8 - 1) else: _zero", "return default def update(self, key, value, comment=None, before=None, after=None): \"\"\"Update one header card.\"\"\"", "is not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype = ImageHDU", "i > 0 and _card.key != 'CONTINUE': raise ValueError, 'Long card image must", "== self._size: # # the stream is full so pad the data to", "one 80-char \"physical\" cards. _max = _keyList.count('CONTINUE') _start = 0 for i in", "bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data, either the group data itself (a", "raise KeyError, 'extension %s not found' % `key` elif (nfound > 1): raise", "of columns.\"\"\" _update = self.header.update _append = self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize,", "i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset = offset", "keyword name parsed from the card image.\"\"\" head = self._getKeyString() if isinstance(self, _Hierarch):", "elif tbtype == 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i]", "the Header ends, but this task may be difficult when the extension is", "dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx) # further conversion for both", "input header, since it # may get modified. the data is still a", "at least 8 columns, unless it is # a null string elif isinstance(self.value,", "rows in the new table fill: if = 1, will fill all cells", "with the same keyword name if isinstance(key, str): while 1: try: del self.ascard[key]", "this can be reset by user. _isInt = \"isinstance(val, (int, long))\" # Functions", "range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical) column if self._coldefs._recformats[indx]._dtype is _booltype: for", "done after the \"regular\" data is written (above) _where = self.__file.tell() if isinstance(hdu,", "long)): self.field(parName)[:] = value else: indx = self._unique[parName] if len(indx) == 1: self.field(indx[0])[:]", "directly before END, it will use this space first, instead of appending after", "= axes[j] except: if (j == 0): _after = 'naxis' else : _after", "in keys: ext = ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s'", "pretty darn close. It appears to find the # end of a string", "+ valstr) # do the comment string if self.comment is None: comm =", "in ASCII table is the same as 7A in # binary table, so", "met: 1. Redistributions of source code must retain the above copyright notice, this", "name self.__dict__[name] = attr return self.__dict__[name] \"\"\" # make sure to consider the", "- self._datLoc == self._size: # # the stream is full so pad the", "and put each card into a list of cards. Will deal with CONTINUE", "= hdu.data.shape[0] # calculate PCOUNT, for variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart", "except: return # for integer key only delete once else: del self.ascard[key] self._mod", "isinstance(val,str): self._checkText(val) else: if val is not None: raise ValueError, 'comment %s is", "key name already exist, force to have duplicate name. \"\"\" oldkey = oldkey.strip().upper()", "it to the header object hduList._resize = 0 return hduList fitsopen = open", "= _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is not recognized.' % tform if repeat", "must reproduce the above copyright notice, this list of conditions and the following", "' ') if numr.group('sign') is not None: _valStr = numr.group('sign')+_valStr elif input.group('cplx') !=", "real.group('sign') is not None: _realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table,", "isinstance(input, FITS_rec): # input is a FITS_rec tmp = hdu.columns = input._coldefs else:", "other = [other] _other = [_get_index(self.names, key) for key in other] indx=range(len(self)) for", "0: raise IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) # flush, to make sure", "1 if not bottom: for i in range(nc-1, -1, -1): # locate last", "val to be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable", "= self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val >= 0\", 0, option,", "would cause the stream to overflow, an IOError exception is raised and the", "`after' if both specified. default=None. after: name of the keyword, or index of", "is None: _step = 1 elif isinstance(_step, (int, long)): if _step <= 0:", "= self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey #", "header = None if len(ext) > 0: if isinstance(ext[0], Header): header = ext[0]", "len(self) > 0 and (not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th element is", "self._hdutype = TableHDU elif xtension == 'IMAGE': self._hdutype = ImageHDU elif xtension in", "# all elements after the first WholeLine must be WholeLine or # OnePointAxis", "%s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU):", "self._get_scale_factors(indx) # for P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype =", "the table HDU, both header and data are copied.\"\"\" # touch the data,", "specs can also be keyword arguments. For example: >>> update(file, dat, hdr, 'sci')", "key: string @param key: keyword name @param ext: The rest of the arguments", "tuple)): other = [other] _other = [_get_index(self.names, key) for key in other] indx=range(len(self))", "pass elif isinstance(value, chararray.CharArray) and value.itemsize() == 1: pass elif self._dtype == 'a':", "is None: dim = `self.header['NAXIS']` if dim == '0': dim = '' self.header.update('EXTEND',", "long string into parts where each part is no longer than strlen and", "instead of dictionaries so the names can be displayed in a # preferred", "the first block of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX", "if not name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def __getattr__(self,", "return eqLoc def _getKeyString(self): \"\"\"Locate the equal sign in the card image and", "__delitem__(self, key): \"\"\"Delete card(s) with the name 'key'.\"\"\" # delete ALL cards with", "'' # equal sign string eqStr = '= ' if keyStr.strip() in Card._commentaryKeys:", "return the string after column 8. \"\"\" eqLoc = self._locateEq() if eqLoc is", "the supposed location is specified if pos is not None: test_pos = '_index", "tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create", "overwrite the file. Default is False. \"\"\" if header is None: if 'header'", "x in _other: indx.remove(x) tmp = [self[i] for i in indx] return ColDefs(tmp)", "can be one or more of the attributes listed in _commonNames. The default", "FITS standard.: %s' % self.key else: self.__dict__['_err_text'] = 'Card image is not FITS", "= \"%dR x %dC\" % (_nrows, _ncols) return \"%-10s %-11s %5d %-12s %s\"", "stream. If the provided data would cause the stream to overflow, an IOError", "j nfound += 1 if (nfound == 0): raise KeyError, 'extension %s not", "_name = _tmpName(oldName) _hduList = open(_name, mode=\"append\") if (verbose): print \"open a temp", "False): hdr['extend'] = True else: if hdr['naxis'] == 0: hdr.update('extend', True, after='naxis') else:", "3rd extension >>> update(file, dat, header=hdr, ext=5) # update the 5th extension \"\"\"", "as the data will be scaled and is therefore not very usable after", "header, the header will be modified to an image extension header and appended", "Header will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY' # insert", "so the rest of the header can be used to reconstruct another kind", "== len(value): for i in range(len(indx)): self.field(indx[i])[:] = value[i] else: raise ValueError, \"parameter", "= _pcount # update TFORM for variable length columns for i in range(hdu.data._nfields):", "the data in the HDU, default=None. header: the header to be used (as", "None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale, _zero,", "else: return found def readall(self): \"\"\"Read data of all HDU's into memory.\"\"\" for", "ext=2) # the second extension By name, i.e., EXTNAME value (if unique): >>>", "isinstance(val, (str, int, long, float, complex, bool, Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified']", "to be used (as a template), default=None. If header=None, a minimal Header will", "_readheader(self, cardList, keyList, blocks): \"\"\"Read blocks of header, and put each card into", "': break # combine contiguous CONTINUE cards with its parent card if nc", "(int, long)): self.field(parName)[:] = value else: indx = self._unique[parName] if len(indx) == 1:", ": string The name of the file to which the header and data", "threadName = threading.currentThread() singleThread = (threading.activeCount() == 1) and (threadName.getName() == 'MainThread') if", "== 'XTENSION': xtension = cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype = TableHDU elif", "gcount = int(mo.group(1)) else: gcount = 1 mo = re_pcount.search(block) if mo is", "else: # flat the shape temporarily to save memory dims = self.data.getshape() self.data.setshape(self.data.nelements())", "self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return result def __setitem__(self, key, value): \"\"\"Set a", "array /= bscale self.array = array def __repr__(self): text = '' for cname", "j in range(len(self)): _name = self[j].name if isinstance(_name, str): _name = _name.strip().upper() if", "xoffset = offset return list class Header: \"\"\"FITS header class.\"\"\" def __init__(self, cards=[]):", "be of the syntax of \"> n\", # where n is an int", "the HDUList to a new file. name: output FITS file name to be", "valid value/comment string. It returns a match object # for a valid value/comment", "if strlen == 0: return input else: return input + ' ' *", "option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None,", "data in place: Name =\", hdu.name, _extver # reset the modification attributes after", "1 except: pass _pos = '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err)", "format of an ASCII column has no width, add one if tbtype ==", "option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec to record format spec. Do", "check key and comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable", "not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = '' # string value should occupies at", "not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not in self.__dict__: self.__dict__['_valueModified'] =", "from input, undefined cells will still be filled with zeros/blanks. tbtype: table type", "and the following disclaimer in the documentation and/or other materials provided with the", "self._coldefs.formats[indx][0] == 'A' _bool = 0 # there is no boolean in ASCII", "# input arrays can be just list or tuple, not required to be", "object. name: Name of the FITS file to be opened. mode: Open mode,", "numbers with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ') if", "HDU is resized for hdu in self: # Header: # Add 1 to", "_imagStr _valStr = '(' + _realStr + ', ' + _imagStr + ')'", "self._arrays[i] = input[i].array \"\"\" def __getitem__(self, key): x = self.data[key] if isinstance(key, (int,", "data part.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) # for random group", "i in range(len(input)): if dtype == 'a': data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i]", "sure the content is written self.__file.flush() # return both the location and the", "== 'name' and value: if not isinstance(value, str): raise TypeError, 'bad value type'", "* gcount * (pcount + size) / 8 return size def close(self): \"\"\"", "= Card(key, value) if before != None or after != None: self.ascard._pos_insert(new_card, before=before,", "FITS file name to be written to. output_verify: output verification option, default='exception'. clobber:", "= FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale, _zero)", "not already exist. Use the directory of the input file and the base", "is not an HDU.\" % item else: if not isinstance(hdu, _AllHDU): raise ValueError,", "attr == 'columns': _cols = [] _pnames = [] _pcount = self.header['PCOUNT'] _format", "the index of an HDU from the HDUList. The key can be an", "the attributes.\"\"\" cname = name[:-1] if cname in _commonNames: attr = [''] *", "dtype == 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format' elif", "everything else) result = None return result else: # verify the equal sign", "min) / (2.**(8*_type.bytes) - 2) # Do the scaling if _zero != 0:", "return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal index %s' % indx def _normalize_slice(input,", "a record array (FITS_rec) which will contain both group parameter info and the", "longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long string value/comment into CONTINUE cards. This is", "is specified if test: val = self.header[keywd] if not eval(test): err_text = \"'%s'", "filled with zeros/blanks. tbtype: table type to be created (BinTableHDU or TableHDU) \"\"\"", "re_bitpix.search(block) if mo is not None: bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX not", "shape): \"\"\"Overload this to make mask array indexing work properly.\"\"\" hdu = new_table(self._coldefs,", "between words. So it may not look pretty. \"\"\" val_len = 67 comm_len", "there are blank cards in front of END. \"\"\" if isinstance (card, Card):", "hdu._new = 1 self._resize = 1 else: raise \"HDUList can only append an", "except: print 'Format \"%s\" is not recognized.' % tform if repeat == '':", "_isInt, 1, option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err) self.req_cards('GROUPS', _pos, 'val", "(threadName.getName() == 'MainThread') if singleThread: # Define new signal interput handler keyboardInterruptSent =", "thread and determine if this is a single treaded application threadName = threading.currentThread()", "axes = list(self.data.data.getshape())[1:] axes.reverse() axes = [0] + axes elif isinstance(self.data, num.NumArray): self.header['BITPIX']", "part.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) # for random group image,", "_dummy.strip(): if self.unit: result += _INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result += _dummy", "the stream. Notes ----- Only the amount of data specified in the header", "GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format =", "att in list: if att not in _commonNames: print \"'%s' is not an", "has ts own private attribute __file. \"\"\" if self.__file != None: if self.__file.memmap", "both specified. They can be either a keyword name or index. \"\"\" if", "_err = _ValidHDU._verify(self, option=option) # Verify location and value of mandatory keywords. naxis", "_count == 1: indx = _list.index(_key) elif _count == 0: raise NameError, \"Key", "'' # string value should occupies at least 8 columns, unless it is", "attribute.\"\"\" if attr == 'section': return Section(self) elif attr == 'data': self.__dict__[attr] =", "the HDUList input is not an HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def __iter__(self):", "col_name, new_name): \"\"\"Change a Column's name.\"\"\" if new_name != col_name and new_name in", "_AllHDU): super(HDUList, self).append(hdu) hdu._new = 1 self._resize = 1 else: raise \"HDUList can", "messages? default = 0. This simply calls the close method of the _File", "if nrows == 0: for arr in tmp._arrays: if arr is not None:", "Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = '' return \"%-10s %-11s", "or more mandatory Cards are corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or", "file is corrupted.' % (len(hduList)+1) break # initialize/reset attributes to be used in", "_repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format %s\" %", "option = 'unfixable' if option in ['warn', 'exception']: #raise VerifyError, _text #elif option", "Header: \"\"\"FITS header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a Header from a CardList.", "text += cname + ' = ' + `value` + '\\n' return text[:-1]", "else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr == 'data': self.__dict__[attr] = self.field('data') elif", "corresponding to TUNIT keyword null: null value, corresponding to TNULL keyword bscale: bscale", "of the keyword, or index of the Card before which the new card", "if there are blank cards in front of END. \"\"\" if isinstance (card,", "This class is used when one or more mandatory Cards are corrupted (unparsable),", "'%20s' % self._valuestring elif isinstance(self.value, complex): if self._valueModified: _tmp = '(' + _floatFormat(self.value.real)", "is found, the value to be returned. \"\"\" try: return self[key] except: return", "front of END. bottom: If =0 (default) the card will be appended after", "keylist # find out how many blank cards are *directly* before the END", "consider the case that the starting column of # a field may not", "HDU object).\"\"\" tmp = hdu.columns # get the right shape for the data", "update the 5th extension \"\"\" # parse the arguments header = None if", "return size def _verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option) # Verify locations and", "get_comment(self): \"\"\"Get all comments as a list of string texts.\"\"\" output = []", "by the keyword name.\"\"\" _key = self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self, start,", "1: self.update_extend() def index_of(self, key): \"\"\"Get the index of an HDU from the", "!= None: text += cname + ' = ' + `value` + '\\n'", "del self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS header card list class.\"\"\" def __init__(self,", "in # Linux, but is at the beginning in Solaris. self.__file.seek(0, 2) self._size", "firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey, '== 0', '', firstval, option, _err)", "attrib.strip().lower() in ['all', '']: list = _commonNames else: list = attrib.split(',') for i", "return a match if the comment separator is found, though the # comment", "= [] for _card in self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value) return output", "header: image header data: image data _file: file associated with array (None) _datLoc:", "group of the random group data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self, input, row)", "the first (0th) element must be a primary HDU if len(self) > 0", "HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: # # This", "if isinstance(input, ColDefs): self.data = [col.copy() for col in input.data] # if the", "_ncards(self): return len(self._cardimage) / Card.length def _verify(self, option='warn'): \"\"\"Card class verification method.\"\"\" _err", "\"\"\"Append a new HDU to the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new", "else: raise TypeError, \"input to ColDefs must be a table HDU or a", "= valu.group('valu') if '_valueModified' not in self.__dict__: self.__dict__['_valueModified'] = 0 elif name ==", "# ASCII table, convert strings to numbers if self._coldefs._tbtype == 'TableHDU': _dict =", "self._coldefs.starts[indx] if _trail < 0: raise ValueError, \"column `%s` ending point overlaps to", "a new card will be created and it will be placed before or", "_limit = Card.length else: _limit = 10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc", "raise NameError, \"Key '%s' does not exist.\" % key else: # multiple match", "100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage, see the I{PyFITS User's Manual}", "= output + '%-80s' % commstr return output def _words_group(self, input, strlen): \"\"\"Split", "= 67 comm_len = 64 output = '' # do the value string", "'sci') # update the 'sci' extension >>> update(file, dat, 3) # update the", "re.sub(\"''\", \"'\", _card.value).rstrip() # drop the ending \"&\" if _val[-1] == '&': _val", "return self._parent.field(indx) # ASCII table, convert strings to numbers if self._coldefs._tbtype == 'TableHDU':", "if pos is not None: test_pos = '_index '+ pos if not eval(test_pos):", "value): \"\"\"Set the group parameter values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:] = value", "if before != None: loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after !=", "self._hdutype = _ValidHDU elif cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip() if xtension ==", "= class_name[class_name.rfind('.')+1:] # if data is touched, use data info. if 'data' in", "or oldkey in Card._commentaryKeys: if not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys):", "= [hdus] elif not isinstance(hdus, (HDUList, list)): raise \"Invalid input for HDUList.\" for", "if not. If only data is supplied, a minimal header is created @type", "[None]*self._nfields) def add_col(self, column): \"\"\"Append one Column to the column definition.\"\"\" return self+column", "and simple: groups = 1 else: groups = 0 mo = re_naxis.search(block) if", "1: result = self.field(indx[0]) # if more than one group parameter have the", "not in ['', None, 1]: array = array.copy() if bzero not in ['',", "ascii2rec[dtype] if width == '': width = None else: width = eval(width) except:", "any of the HDU is resized for hdu in self: # Header: #", "header associated with 'data', if None, a header of the appropriate type is", "= _fits2rec[option[0]] elif dtype == 'F': output_format = 'f8' else: raise ValueError, \"Illegal", "+= 1 else: # if the keyword EXTVER does not exist, default it", "default=silentfix. \"\"\" # Only if the card image already exist (to avoid infinite", "these blank cards, so the total space will not increase (default). When useblanks", "Transport System (FITS) files. This file format was endorsed by the International Astronomical", "Card else: # does not support CONTINUE for HIERARCH if len(keyStr + eqStr", "ValueError, \"parameter value must be a sequence with %d arrays/numbers.\" % len(indx) def", "_list: del self.header.ascard[i] del _list # populate the new table definition keywords for", "A7 in ASCII table is the same as 7A in # binary table,", "they must be separated by comma(s). \"\"\" if attrib.strip().lower() in ['all', '']: list", "new file. name: output FITS file name to be written to. output_verify: output", "the \"test\" argument. \"\"\" _err = errlist fix = '' cards = self.header.ascard", "a # string should not end with two single quotes, # whereas it", "_bytes + _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose: print", "the distribution. 3. The name of AURA and its representatives may not be", "# reverse of the numarray shape if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format", "option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val == 2',", "will try to match the exact name first, so in the example in", "Card.length: return input elif _len > Card.length: strlen = _len % Card.length if", "= self._keylist if backward: _keylist = self._keylist[:] # make a copy _keylist.reverse() try:", "Verify locations and values of mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and val", "_scale = bscale not in ['', None, 1] _zero = bzero not in", "isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data must be contiguous.' for j in range(i+1,naxis):", "1) else: _zero = (max + min) / 2. # throw away -2^N", "name # When an attribute (value or comment) is changed, will reconstructe #", "num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift the unused bits num.lshift(output[...,i], unused,", "len(value): for i in range(len(indx)): self.field(indx[i])[:] = value[i] else: raise ValueError, \"parameter value", "tbtype=class_name) elif attr == '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount':", "self.columns def update(self): \"\"\" Update header keywords to reflect recent changes of columns.\"\"\"", "self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of", "write more data after the stream has been filled will raise an IOError", "PrimaryHDU)): err_text = \"HDUList's 0th element is not a primary HDU.\" fix_text =", "ValueError, 'Intended keyword %s already exists in header.' % newkey _index = self.ascard.index_of(oldkey)", "# equivalent Note EXTNAMEs are not case sensitive By combination of EXTNAME and", "bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt = _fits2rec[fits_fmt]", "= self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self, start, end): _cards = super(CardList, self).__getslice__(start,end)", "column\" % indx+1 if 'A' in _format: _pc = '%-' else: _pc =", "and mandated by NASA as the standard format for storing high energy astrophysics", "\"\"\"data: data of the table header: header to be used for the HDU", "'ab+' mode, the pointer is at the end after the open in #", "i.e., EXTNAME value (if unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent", "in fixed or # scientific notation. One for FSC and one for non-FSC", "the keyword value or comment from the card image.\"\"\" # for commentary cards,", "= '' # equal sign string eqStr = '= ' if keyStr.strip() in", "a string. force: if new key name already exist, force to have duplicate", "convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if", "or more of the attributes listed in _commonNames. The default is \"all\" which", "hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This class is the base class", "'TableHDU': dummy = self._convert[indx] else: continue # ASCII table, convert numbers to strings", "(e.g. '3J'->'3i4') recfmt = _convert_format(format) except: try: # legit RecArray format? recfmt =", "= HDUList(file=ffo) # read all HDU's while 1: try: hduList.append(ffo._readHDU()) except EOFError: break", "dictionary to enable file cacheing class _File: \"\"\"A file I/O class\"\"\" def __init__(self,", "else: raise ValueError, valu size = eval(width)+1 strfmt = strfmt + 's'+str(size) +", "bscale/bzero are numbers if not _scale: bscale = 1 if not _zero: bzero", "= re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn =", "header=None): self._file, self._datLoc = None, None if header is not None: if not", "\"\"\"Overload this to make mask array indexing work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0])", "block. block = self.__file.read(_blockLen) if block == '': raise EOFError hdu = _TempHDU()", "= chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] =", "__init__(self, hdu=None, field=None): self.hdu = hdu self.field = field # translation table for", "an HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return [self[i] for i in", "in ['all', '']: list = _commonNames else: list = attrib.split(',') for i in", "range(_max): _where = _keyList[_start:].index('CONTINUE') + _start for nc in range(1, _max+1): if _where+nc", "i in range(_max): _where = _keyList[_start:].index('CONTINUE') + _start for nc in range(1, _max+1):", "pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap the X format column Boolean array", "npt = 1 for n in dims: npt *= n # Now, get", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF", "_fits2rec.keys(): # FITS format if dtype == 'A': output_format = _fits2rec[dtype]+`repeat` # to", "self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx) # further", "def change_attrib(self, col_name, attrib, new_value): \"\"\"Change an attribute (in the commonName list) of", "# flat the shape temporarily to save memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min", "raise ValueError, 'keyword name %s is not a string' % val self.__dict__['key'] =", "class_name[class_name.rfind('.')+1:] # if data is touched, use data info. if 'data' in dir(self):", "both ASCII and binary tables if _number or _str: if _number and (_scale", "eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype == tbtype: tmp = hdu.columns = input", "'_cardimage': self.ascardimage() elif name == 'key': self._extractKey() elif name in ['value', 'comment']: self._extractValueComment(name)", "mode in ['update', 'append']: raise \"Writing to zipped fits files is not supported\"", "memmap=oldMemmap) self.__file = ffo if (verbose): print \"reopen the newly renamed file\", oldName", "_KeyType: def __init__(self, npts, offset): self.npts = npts self.offset = offset class _WholeLine(_KeyType):", "attr[indx] del self._arrays[indx] self._nfields -= 1 def change_attrib(self, col_name, attrib, new_value): \"\"\"Change an", "'PRIMARY': hdu._extver = 1 hdu._file = self.__file hdu._hdrLoc = _hdrLoc # beginning of", "_err # 0.8.8 def _iswholeline(indx, naxis): if isinstance(indx, (int, long)): if indx >=", "ValueError, \"number `%s` does not fit into the output's itemsize of %s\" %", "in range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key ==", "GCOUNT dim = `self.header['NAXIS']` if dim == '0': dim = '' # set", "<= 8: val = val.upper() if val == 'END': raise ValueError, \"keyword 'END'", "'' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\" def", "def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name = str(self.__class__) type", "standard (unparsable value string).' raise ValueError, self._err_text + '\\n%s' % self._cardimage # verify", "')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force a write of the HDUList back to", "= \"Fixed by moving it to the right place (card %d).\" % insert_pos", "keyword name value: keyword value (to be used for updating) comment: keyword comment", "ColDefs must be a table HDU or a list of Columns\" def __getattr__(self,", "attr[i] = val elif name == '_arrays': attr = [col.array for col in", "{} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format in binary", "\"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY' # insert the keywords EXTEND if", "\"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse", "if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) =", "them may not exist for name in ['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name):", "to be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable string", "= self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after != None: loc = self.index_of(after) self.insert(loc+1,", "else: # multiple match raise NameError, \"Ambiguous key name '%s'.\" % key else:", "update()] after: [same as in update()] \"\"\" self._add_commentary('comment', value, before=before, after=after) def add_blank(self,", "== 'a': value = chararray.array(value, itemsize=1) else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key,", "!= 0: raise IOError, 'Header size is not multiple of %d: %d' %", "strlen = strlen + size else: strfmt = '>' + strfmt[:-1] return strfmt", "% tmp.spans[i] _itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else:", "cards # if the key list is not supplied (as in reading in", "data type as expressed in FITS BITPIX value (8, 16, 32, 64, -32,", "val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max else: val =", "for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`,", "Instead, just truncate the comment if isinstance(self.value, str) and len(valStr) > (Card.length-10): self.__class__", "the same keyword name if isinstance(key, str): while 1: try: del self.ascard[key] self._mod", "__setitem__(self, key, value): \"\"\"Set a Card by indexing or by the keyword name.\"\"\"", "def __getitem__(self, key): dims = [] if not isinstance(key, tuple): key = (key,)", "PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each element calls their", "first, instead of appending after these blank cards, so the total space will", "new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option)", "hdu = hdulist[_ext] hdr = hdu.header hdulist.close() return hdr def getdata(filename, *ext, **extkeys):", "print \"One or more data area is resized.\" break # if the HDUList", "and EXTVER, as separate arguments or as a tuple: >>> getdata('in.fits', 'sci', 2)", "of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus,", "return output def get_comment(self): \"\"\"Get all comments as a list of string texts.\"\"\"", "1: size = 1 for j in range(1, naxis): size = size *", "+ ', ' + _floatFormat(self.value.imag) + ')' valStr = '%20s' % _tmp else:", "the specified extension with the input data/header. @type filename: string @param filename: name", "the stream. If the provided data would cause the stream to overflow, an", "since bool is also int elif isinstance(self.value , bool): valStr = '%20s' %", "format=None, unit=None, null=None, \\ bscale=None, bzero=None, disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct a", "self._Formats = self.formats self._arrays[i] = input[i].array \"\"\" def __getitem__(self, key): x = self.data[key]", "input._coldefs else: # input is a list of Columns tmp = hdu.columns =", "the header area hdu._datLoc = self.__file.tell() # beginning of the data area #", "# set extension name if not name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name", "above _rec2fits = {} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X", "of the arguments are for extension specification. See L{getdata} for explanations/examples. @return: keyword", "raise \"Invalid input for HDUList.\" for hdu in hdus: if not isinstance(hdu, _AllHDU):", "'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the memmap object, it is designed to", "to pass it to the header object hduList._resize = 0 return hduList fitsopen", "size = 0 else: size = len(tmp._arrays[i]) n = min(size, nrows) if fill:", "# # This will not be the first extension in the file so", "self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 0 and val <= 999\", 0,", "table's data. \"\"\" if attr == 'data': # same code as in _TableBaseHDU", "= False # deprecated _INDENT = \" \" DELAYED = \"delayed\" # used", "__credits__=\"\"\" Copyright (C) 2004 Association of Universities for Research in Astronomy (AURA) Redistribution", "files with multiple members are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name", "sign. If there is no equal sign, return the string before column 9.", "given an index, always returns 0. \"\"\" try: key = key.strip().upper() if key[:8]", "name if (name is None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name", "has no width, add one if tbtype == 'TableHDU': for i in range(len(self)):", "parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero = bzero)) self._coldefs =", "(in the case of reading from a # FITS file) self.data return new_table(self.columns,", "class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors list class. It", "contain printable ASCII characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks", "separator is found, though the # comment maybe an empty string. _value_FSC_RE =", "name.\"\"\" if isinstance (value, Card): _key = self.index_of(key) # only set if the", "# comment string if keyStr.strip() in Card._commentaryKeys: # do NOT use self.key commentStr", "keys = ext2.keys() # parse the extension spec if n_ext1 > 2: raise", "can only append an HDU\" # make sure the EXTEND keyword is in", "-32:'E', -64:'D'} def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU", "\"\"\"Scale image data by using BSCALE/BZERO. Call to this method will scale self.data", "not _scale: bscale = 1 if not _zero: bzero = 0 return (_str,", "parameters parbzeros: list of bzeros for the parameters \"\"\" if isinstance(input, num.NumArray): _formats", "type to numarray data type (code) _booltype = 'i1' _fits2rec = {'L':_booltype, 'B':'u1',", "= ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping from", "the card image already exist (to avoid infinite loop), # fix it first.", "= self.field(i) if self._convert[i] is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy", "pad the FITS data block if _size > 0: self.__file.write(_padLength(_size)*'\\0') # flush, to", "inserting one as 0th HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text,", "(_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:]", "string to be multiple of 80.\"\"\" _len = len(input) if _len == Card.length:", "the HDUList, indexed by number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize = 1 def", "_keyList[_start:].index('CONTINUE') + _start for nc in range(1, _max+1): if _where+nc >= len(_keyList): break", "= self.header['EXTEND'] #_after += 1 except: pass _pos = '>= '+`_after` self.req_cards('GCOUNT', _pos,", "dims == []: dims = [1] npt = 1 for n in dims:", "the input argument (except array) can be a Card or just # a", "(keyword in _keyNames): _list.append(i) for i in _list: del self.header.ascard[i] del _list #", "table definition keywords for i in range(len(_cols)): for cname in _commonNames: val =", "range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` +", "strlen = '', 0 for j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu =", "= header else: # construct a list of cards of minimal header if", "header if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image extension') else: c0 =", "a header in a FITS file. @type filename: string @param filename: input FITS", "dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data must be contiguous.' for", "% (_blockLen, len(blocks)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block", "+ _dict[_format[0]] + ' '*_trail # not using numarray.strings's num2char because the #", "be split in the middle of the word. \"\"\" list = [] _nblanks", "strings array array = chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable length array except:", "user specified BSCALE and BZERO values. \"\"\" if self.data is None: return #", "longer than 80, assume it contains CONTINUE card(s). elif len(self._cardimage) > Card.length: self.__class__", "TNULL keyword bscale: bscale value, corresponding to TSCAL keyword bzero: bzero value, corresponding", "construct the Header object, using the cards. try: header = Header(CardList(_cardList, keylist=_keyList)) hdu", "appended at the end. key: keyword name value: keyword value (to be used", "!= 'append' and not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else: self.name = name", "card is not found. In the case of a missing 'END' card, the", "reverse dictionary of the above _rec2fits = {} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key", "of (numeric) arrays. parnames: list of parameter names. bscale: BSCALE of the data", "value is found, otherwise it will return # None, meaning the keyword is", "_card.key == 'HISTORY': output.append(_card.value) return output def get_comment(self): \"\"\"Get all comments as a", "keyword name. If given an index, always returns 0. \"\"\" try: key =", "_data = None return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the", "which are \"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append HDU\", hdu.name, _extver", "(self.unit, element) result += _dummy element += 1 return result class _Verify: \"\"\"Shared", "0 for indx in range(self._nfields): if (self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx], _FormatX):", "- stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create a temporary file name which should", "space will not increase (default). When useblanks == 0, the card will be", "col <= _nfields and col > 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value", "= num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for both ASCII", "Check if the file already exists. If it does not, check to see", "0 for i in range(_max): _where = _keyList[_start:].index('CONTINUE') + _start for nc in", "> 0: simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple == 'T' and", "must be separated by comma(s). \"\"\" if attrib.strip().lower() in ['all', '']: list =", "string if self.comment is None: comm = '' else: comm = self.comment commfmt", "= 0 hdu._new = 0 hdu._file = ffo.getfile() # if not resized, update", "default = 'exception'. verbose: print out verbose messages? default = 0. This simply", "_stop < _start: raise IndexError, 'Illegal slice %s, stop < start.' % input", "hdu): \"\"\"Write FITS HDU data part.\"\"\" self.__file.flush() loc = self.__file.tell() _size = 0", "names are case sensitive: you can have two different columns called 'abc' and", "or string. If integer, it is the index in the list. If string,", "def setupHDU(self): \"\"\"Read one FITS HDU, data portions are not actually read here,", "results[:-1] print results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open a FITS", "continue _shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount = '", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF", "= Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) if (data is", "make sure the new item has consistent data type to avoid misalignment. \"\"\"", "data: array, record array, or groups data object @param data: data to write", "re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount", "= 'big' if (self._bzero != 0 or self._bscale != 1): if _bitpix >", "long, float, complex, bool, Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1 else:", "'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if the string = TNULL, return", "'.zip': # Handle zip files if mode in ['update', 'append']: raise \"Writing to", "wrong place (card %d).\" % (keywd, _index) fix_text = \"Fixed by moving it", "\"\"\"input: input data, either the group data itself (a numarray) or a record", "if eqLoc is None: eqLoc = 7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify", "1: return _SinglePoint(1, indx) elif naxis == 1: return _OnePointAxis(1, 0) else: raise", "def __init__(self, input): \"\"\" input: a sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input)", "ValueError, \"The keyword %s with its value is too long.\" % self.key if", "are %d extensions of %s' % (nfound, `key`) else: return found def readall(self):", "Get the name of the current thread and determine if this is a", "Column to the column definition.\"\"\" return self+column def del_col(self, col_name): \"\"\"Delete (the definition", "extension specs can also be keyword arguments. For example: >>> update(file, dat, hdr,", "self._convert[indx] if _str: return self._parent.field(indx) # ASCII table, convert strings to numbers if", "tmp = hdu.columns = input._coldefs else: # input is a list of Columns", "to prepend a default PrimaryHDU to the file before writing the # given", "header=header) self._xtn = 'IMAGE' self.header._hdutype = ImageHDU # insert the require keywords PCOUNT", "str) and _comm != '': longstring = longstring + _comm.rstrip() + ' '", "@return: an array, record array (i.e. table), or groups data object depending on", "supplied, a minimal header is created @type filename: string @param filename: name of", "occupies at least 8 columns, unless it is # a null string elif", "= _normalize(_stop, naxis) else: raise IndexError, 'Illegal slice %s, stop must be integer.'", "a Column's data as an array.\"\"\" indx = _get_index(self._coldefs.names, key) if (self._convert[indx] is", "isinstance(self, BinTableHDU): val = _cols._recformats[i] if isinstance(val, _FormatX): val = `val._nx` + 'X'", "self.names = self._names def copy(self): r = rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs =", "in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format a list of cards into", "the exact name first, so in the example in (a), field('abc') will get", "== _key: # if only specify extname, can only have one extension with", "can be reset by user. _isInt = \"isinstance(val, (int, long))\" # Functions def", "column 9, # since there is no way to communicate back to the", "ASCII table and binary table column # format spec, i.e. A7 in ASCII", "dim: column dimension corresponding to TDIM keyword \"\"\" # any of the input", "= 0, not the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] = ''", "need # to prepend a default PrimaryHDU to the file before writing the", "Header-related Classes: Card, CardList, _Card_with_continue, Header, _Hierarch @group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU,", "an HDU.\" % item else: if not isinstance(hdu, _AllHDU): raise ValueError, \"%s is", "= \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each element", "Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise ValueError, 'Illegal value", "will be the value of the keywod EXTNAME, default=None. \"\"\" # no need", "HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"Construct an image HDU. data: the", "size(self): \"\"\" Return the size (in bytes) of the data portion of the", "def __init__(self, input): \"\"\"Construct a FITS record array from a RecArray.\"\"\" # input", "i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`,", "singleThread: # Define new signal interput handler keyboardInterruptSent = False def New_SIGINT(*args): print", "destination (numarray) data type if type is None: type = self.NumCode[self._bitpix] _type =", "i in range(_tfields): del self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS header card list", "in dir(self): if self.data is None: _shape, _format = (), '' else: #", "if isinstance(self, GroupsHDU) and j == 0: continue _shape += (self.header['NAXIS'+`j+1`],) _format =", "hdus is None: hdus = [] # can take one HDU, as well", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "of the file to append to @type data: array, table, or group data", "_VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset", "keys: if 'extver' in keys: ext = ext2['extname'], ext2['extver'] else: ext = ext2['extname']", "case.\"\"\" if isinstance(val, str): val = val.strip() if len(val) <= 8: val =", "is specified, it will be appended at the end. key: keyword name value:", "arguments are flexible: the 3rd argument can be the header associated with the", "= 0, copy the data from input, undefined cells will still be filled", "while 1: # find the END card mo = end_RE.search(block) if mo is", "with the specified option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] =", "bscale not in ['', None, 1]: array /= bscale self.array = array def", "ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo if (verbose): print \"reopen the", "def get_comment(self): \"\"\"Get all comments as a list of string texts.\"\"\" output =", "pow2 = [128, 64, 32, 16, 8, 4, 2, 1] nbytes = ((nx-1)", "value.key.upper() self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is not a Card\"", "value. \"\"\" def __str__(self): \"\"\"Format a list of cards into a printable string.\"\"\"", "format = _format, bscale = _bscale, bzero = _bzero)) data_shape = self._dimShape()[:-1] dat_format", "key: keyword name, default=''. value: keyword value, default=''. comment: comment, default=''. \"\"\" if", "cards. _max = _keyList.count('CONTINUE') _start = 0 for i in range(_max): _where =", "from the card image.\"\"\" head = self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip()", "_bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format,", "table, convert numbers to strings if self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead", "# check if both value and _cardimage attributes are missing, # to avoid", "header is None: if 'header' in keys: header = keys['header'] hdu=_makehdu(data, header) if", "array columns # this has to be done after the \"regular\" data is", "ONE u1 (i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif", "for HIERARCH cards if isinstance(self, _Hierarch): valStr = valStr.strip() # comment string if", "new_card) except: self.ascard.append(new_card, bottom=1) self._mod = 1 def copy(self): \"\"\"Make a copy of", "'T': groups = 1 else: groups = 0 size = 1 for j", "darn close. It appears to find the # end of a string rather", "else: if _key == 'HIERARCH': _limit = Card.length else: _limit = 10 try:", "_isInt+\" and val >= 0\", 0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and", "return valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading data.\"\"\" def", "tbtype: which table HDU, 'BinTableHDU' (default) or 'TableHDU' (text table). \"\"\" ascii_fmt =", "return len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass # needed", "table's column definitions.\"\"\" return self.columns def update(self): \"\"\" Update header keywords to reflect", "detailed examples of usage, see the I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc", "AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns the size (in bytes) of the HDU's", "= re.compile(_keywd_FSC) # A number sub-string, either an integer or a float in", "with case insensitivity. So, in the last example, field('Abc') will cause an exception", "in update()] \"\"\" self._add_commentary('comment', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): \"\"\"Add", "after updating self._resize = 0 for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod", "def __init__(self, data=None, header=None): \"\"\"Construct a primary HDU. data: the data in the", "key in the name list. The key can be an integer or string.", "this HDU.' if _data is None: raise IndexError, 'No data in this HDU.'", "_clone(self, shape): \"\"\"Overload this to make mask array indexing work properly.\"\"\" hdu =", "the keyword EXTVER does not exist, default it to 1 _extver = self[j]._extver", "delete the original file, and rename the tmp to the original file if", "if there is bscale/bzero if isinstance(array, num.NumArray): # boolean needs to be scaled", "cname) if val != None: attr[i] = val elif name == '_arrays': attr", "def __getattr__(self, name): \"\"\" instanciate specified attribute object.\"\"\" if name == '_cardimage': self.ascardimage()", "is None: if 'header' in keys: header = keys['header'] hdu=_makehdu(data, header) if not", "non-commentary card if self[i].key not in Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper())", "if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b = list(other.data) else:", "are missing, # to avoid infinite loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr", "numbers with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ') if", "size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount", "end of a string rather well, but will accept # strings with an", "self.index_of(key) # only set if the value is different from the old one", "\"\"\" _cardList = [] _keyList = [] blocks = self._raw if (len(blocks) %", "% input return slice(_start, _stop, _step) class _KeyType: def __init__(self, npts, offset): self.npts", "require keywords PCOUNT and GCOUNT dim = `self.header['NAXIS']` if dim == '0': dim", "PrimaryHDU._verify(self, option=option) # Verify locations and values of mandatory keywords. self.req_cards('NAXIS', '== 2',", "option != '': output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer else:", "the elements in the object array are consistent. if not isinstance(array, (num.NumArray, chararray.CharArray,", "\"Profits\"? - Google Search, when asked for \"PyFITS\" \"\"\" import re, os, tempfile,", "FITS format if dtype == 'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate both", "size = eval(width)+1 strfmt = strfmt + 's'+str(size) + ',' strlen = strlen", "if i == 0: headstr = \"%-8s= \" % self.key else: headstr =", "_ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big' return raw_data class", "== self._dtype: pass elif isinstance(value, chararray.CharArray) and value.itemsize() == 1: pass elif self._dtype", "+ tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape)", "machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big' output = hdu.data else: output =", "isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 if isinstance(ext1[0],", "valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt: code, width, prec = fmt.group('code',", "and option.\"\"\" try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is", "number, or complex value is found, otherwise it will return # None, meaning", "the name of the current thread and determine if this is a single", "self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit',", "name=name) self._xtn = 'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment =", "= num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset _offset += len(data_output[i]) *", "self._hdutype = BinTableHDU else: self._hdutype = _ExtensionHDU else: self._hdutype = _ValidHDU except: self._hdutype", "= self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero =", "following psudo code illustrates its use: header = pyfits.Header() for all the cards", "i in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray):", "16, 8, 4, 2, 1] nbytes = ((nx-1) / 8) + 1 for", "input data does not match what is expected by the header, a TypeError", "treaded application threadName = threading.currentThread() singleThread = (threading.activeCount() == 1) and (threadName.getName() ==", "of END. \"\"\" if isinstance (card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) #", "the input is a list of Columns elif isinstance(input, (list, tuple)): for col", "cname+'s')[i] if val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and", "or more header is resized.\" break # Data: if 'data' not in dir(hdu):", "allows space between sign, # digits, exponent sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?'", "lists of column/field definition common names and keyword names, make # sure to", "'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder", "table does not have Boolean type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class", "fix the value elif option == 'unfixable': _text = \"Unfixable error: %s\" %", "and \"E\" not in valueStr: valueStr += \".0\" return valueStr class Undefined: \"\"\"Undefined", "input is None: _tmp = self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip()", "value='', comment=''): \"\"\"Construct a card from key, value, and (optionally) comment. Any specifed", "key name '%s'.\" % key else: raise NameError, \"Illegal key '%s'.\" % `key`", "None: text += cname + ' = ' + `value` + '\\n' return", "valu.group('strg')) elif valu.group('numr') != None: # Check for numbers with leading 0s. numr", "'&': _val = _val[:-1] longstring = longstring + _val elif name == 'comment':", "example in (a), field('abc') will get the first field, and field('ABC') will get", "(in bytes) of the HDU's data part.\"\"\" size = 0 naxis = self.header.get('NAXIS',", "with byte order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big': output = hdu.data.byteswapped()", "1999 and mandated by NASA as the standard format for storing high energy", "axes[j] except: if (j == 0): _after = 'naxis' else : _after =", "_format: _pc = '%-' else: _pc = '%' _fmt = ' '*_lead +", "class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\" def __init__(self, hdu):", "delete ALL cards with the same keyword name if isinstance(key, str): while 1:", "= str(dim) self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1,", "more than one 80-char \"physical\" cards, the cards after the first one must", "Card to be appended. useblanks: Use any *extra* blank cards? default=1. If useblanks", "sign, # digits, exponent sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?)", "(naxis-len(key)) offset = 0 for i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx =", "_SinglePoint(1, indx) elif naxis == 1: return _OnePointAxis(1, 0) else: raise IndexError, 'Index", "deal with scaled columns. \"\"\" def __init__(self, input): \"\"\"Construct a FITS record array", "data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE', 1) _bzero", "return \"%-10s %-11s %5d %-12s %s%s\" % \\ (self.name, type, len(self.header.ascard), _shape, _format,", "\"Illegal format %s\" % fmt else: if dtype == 'a': output_format = option+_rec2fits[dtype]", "> 0: # # This will not be the first extension in the", "re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d)", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx in range(self._nfields): if", "are for extension specification. See L{getdata} for explanations/examples. @return: keyword value @rtype: string,", "first extension in the file so we # must change the Primary header", "elif self._tbtype == 'TableHDU': self._Formats = self.formats if len(self) == 1: dummy =", "else: _expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr valStr = '%-20s' %", "\"physical\" cards, the cards after the first one must start with CONTINUE and", "(GroupsHDU, _TableBaseHDU)) and hdu.data is not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data", "@param filename: input FITS file name @type: string @param ext: The rest of", "of the heap area for each # variable length column if isinstance(self._coldefs._recformats[indx], _FormatP):", "else: raise ValueError, 'keyword name %s is too long (> 8), use HIERARCH.'", "image header data: image data _file: file associated with array (None) _datLoc: starting", "and the data. The rest of the arguments are used only for the", "attr): if attr == 'data': self.__dict__[attr] = self.field('data') elif attr == '_unique': _unique", "= () _nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format = '[' for j", "file to which the header and data will be streamed. header : Header", "of bound or not found.' % key self._resize = 1 def __delitem__(self, key):", "groups: name = 'PRIMARY' else: name = '' return size, name def setupHDU(self):", "name or index default: if no keyword is found, the value to be", "the equal sign in the card image before column 10 and return its", "_floatFormat(value): \"\"\"Format the floating number to make sure it gets the decimal point.\"\"\"", "the first WholeLine must be WholeLine or # OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)):", "is raised. \"\"\" if self.writeComplete: raise IOError, \"The stream is closed and can", "_ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif self.data is None: axes = [] else:", "header _list = CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8, 'array data type'), Card('NAXIS',", "previous column\" % indx+1 _trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if _trail", "_bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # add the location offset", "__str__, since normally __str__ has only one argument. \"\"\" result = \"\" element", "header. :Parameters: name : string The name of the file to which the", "name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def __getattr__(self, attr): \"\"\"Get", "data.byteswapped() else: output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc == self._size: #", "# Do the scaling if _zero != 0: self.data += -_zero # 0.9.6.3", "list # so the sliced FITS_rec will view the same scaled columns as", "data = 0, not the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] =", "eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: # Check for numbers", "max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8': # UInt8 case _zero =", "'data', if None, a header of the appropriate type is created for the", "+ axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif", "= real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is", "was created in a LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big' output", "elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does not begin", "for commentary cards (i.e. part of the string value) _key = self._cardimage[:8].strip().upper() if", "/= _scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if self.data._type != _type: self.data =", "= pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale or", "% _blockLen def _tmpName(input): \"\"\"Create a temporary file name which should not already", "[None]*self._nfields for i in range(self._nfields): # touch all fields to expand the original", "long)): return key elif isinstance(key, str): _key = key.strip().upper() if _key[:8] == 'HIERARCH':", "self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan hdu._ffile = self._ffile hdu.name = self.name", "the data is not written. Once sufficient data has been written to the", "True, this function will return a (data, header) tuple. \"\"\" if 'header' in", "self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode])", "the dtype of the input data does not match what is expected by", "TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self,", "'TTYPE', 'TUNIT']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name", "\"\"\" Construct a StreamingHDU object given a file name and a header. :Parameters:", "None: err_text = \"'%s' card does not exist.\" % keywd fix_text = \"Fixed", "clobber=clobber) def _verify(self, option='warn'): _err = _ErrList([], unit='Card') isValid = \"val in [8,", "and comment, or from raw string. option: verification option, default=silentfix. \"\"\" # Only", "0, 'number of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups',", "*refer* to a field (presumably with the field method), it will try to", "initialized till the HDU is accessed. \"\"\" def _getname(self): \"\"\"Get the extname and", "ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"Construct", "self._convert[indx]) desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize", "0 else: # flat the shape temporarily to save memory dims = self.data.getshape()", "commentary cards (i.e. part of the string value) _key = self._cardimage[:8].strip().upper() if _key", "and val == 0\", 0, option, _err) _after = self.header['NAXIS'] + 3 #", "output + '%-80s' % commstr return output def _words_group(self, input, strlen): \"\"\"Split a", "if not os.path.exists(_name): return _name else: raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception", "be appended. useblanks: Use any *extra* blank cards? default=1. If useblanks != 0,", "bits \"\"\" pow2 = [128, 64, 32, 16, 8, 4, 2, 1] nbytes", "= [col.copy() for col in input.data] # if the input is a list", "fix silently the case where \"=\" is before column 9, # since there", "data=data, header=header) self.name = 'PRIMARY' # insert the keywords EXTEND if header is", "_wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx)", "value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max, len(value)) class Column:", "(not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th element is not a primary HDU.\"", "not None: _comm = valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self,", "if len(indx) == 1: self.field(indx[0])[:] = value # if more than one group", "= ErrorURLopener() # Assign the locally subclassed opener # class to the urllibrary", "is a list of Columns tmp = hdu.columns = ColDefs(input, tbtype) # read", "hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force", "@type filename: string @param filename: input FITS file name \"\"\" f = open(filename)", "there is no way to communicate back to the _keylist. self._checkKey(self.key) # verify", "combination of EXTNAME and EXTVER, as separate arguments or as a tuple: >>>", "header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS',", "nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx] = dummy", "CONTINUE card, skip to the next card to search # to avoid starting", "of a table HDU's data part. This is a layer over the RecArray,", "same scaled columns as # the original dummy = self.field(i) if self._convert[i] is", "in ['', None, 0] or bscale not in ['', None, 1]: array =", "where \"=\" is before column 9, # since there is no way to", "%s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the", "int(key) elif isinstance(key, str): # try to find exact match first try: indx", "the header, a TypeError exception is raised. \"\"\" if self.writeComplete: raise IOError, \"The", "# if the format of an ASCII column has no width, add one", "\"\"\" objects.ObjectArray.__init__(self, input) self._max = 0 def __setitem__(self, key, value): \"\"\"To make sure", "except: raise ValueError, \"Illegal format `%s`.\" % format self.format = format # does", "Column attributes (e.g. ColDefs has the attribute .names while Column has .name), Each", "num.NumArray): _formats = '' _cols = [] if pardata is None: npars =", "# Assign the locally subclassed opener # class to the urllibrary urllib._urlopener.tempcache =", "be created and if the header represents a Primary header, it will be", "@param ext: The rest of the arguments are for extension specification. See L{getdata}", "either an integer or a float in fixed or # scientific notation. One", "HDU's for scaled fields.\"\"\" for hdu in self: if 'data' in dir(hdu): if", "ImageHDU # insert the require keywords PCOUNT and GCOUNT dim = `self.header['NAXIS']` if", "header object hduList._resize = 0 return hduList fitsopen = open # Convenience functions", "_normalize(_stop, naxis) else: raise IndexError, 'Illegal slice %s, stop must be integer.' %", "gcount = self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0) size = abs(bitpix) * gcount", "_arr = tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if _scale: _arr *= bscale if", "ext = ext2['extname'] else: raise KeyError, 'Insufficient keyword argument: %s' % ext2 return", "self._width = _end else: raise KeyError, 'Attribute %s not defined.' % name self.__dict__[name]", "each # variable length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:] =", "first time, no need to copy, and keep it unchanged else: self.header =", ">>> getdata('in.fits', 0) # the primary header >>> getdata('in.fits', 2) # the second", "will reconstructe # the card image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a (new)", "unit, corresponding to TUNIT keyword null: null value, corresponding to TNULL keyword bscale:", "factors for one field. indx is the index of the field. \"\"\" if", "the FITS standard, see the NASA/Science Office of Standards and Technology publication, NOST", "'TUNIT']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name in", "if isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 if", "_ValidHDU @group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\" Do", "coldata2._type.bytes > 1: # do the _parent too, otherwise the _parent # of", "An attempt to write more data after the stream has been filled will", "isinstance(item, _AllHDU): raise ValueError, \"%s is not an HDU.\" % item else: if", "try: hdu = hdulist[1] _data = hdu.data except IndexError: raise IndexError, 'No data", "self.__file.flush() # return both the location and the size of the data area", "another. Also, it does not break at the blank space between words. So", "Universities for Research in Astronomy (AURA) Redistribution and use in source and binary", "self.__file.read(_blockLen) if block == '': break else: break hdu._raw += block _size, hdu.name", "self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try: return self.__dict__[attr] except KeyError:", "the offset needs to multiply the length of all remaining axes else: offset", "this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA", "None: comm = '' else: comm = self.comment commfmt = \"%-s\" if not", "\"\"\" self.header = header.copy() # # Check if the file already exists. If", "['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option == 'fix': self.__dict__['_fix_text'] = 'Fixed", "__init__(self, name, header): \"\"\" Construct a StreamingHDU object given a file name and", "cards = self.header.ascard try: _index = cards.index_of(keywd) except: _index = None fixable =", "'Unprintable string %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _checkKey(self,", "self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS']", "of data specified in the header provided to the class constructor may be", "the _parent # of a scaled column may have wrong byteorder if coldata2._byteorder", "after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDUList to a new file.", "a new HDU to the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new =", "characters. \"\"\" def _verify(self, option='warn'): \"\"\"No verification (for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card):", "attributes: key, value, and comment. Core code for ascardimage. \"\"\" # keyword string", "input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise", "hdu try: super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise IndexError, 'Extension %s is out", "string before column 9. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc", "def _clone(self, shape): \"\"\"Overload this to make mask array indexing work properly.\"\"\" hdu", "None self.header = header self.data = data self._xtn = ' ' def __setattr__(self,", "re_pcount.search(block) if mo is not None: pcount = int(mo.group(1)) else: pcount = 0", "of the data parbscales: list of bscales for the parameters parbzeros: list of", "ffo.getfile() # if not resized, update in place else: for hdu in self:", "in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print", "1 unused = nbytes*8 - nx for i in range(nbytes): _min = i*8", "ext=5) # update the 5th extension \"\"\" # parse the arguments header =", "ErrorURLopener() # Assign the locally subclassed opener # class to the urllibrary urllib._urlopener.tempcache", "return indx _start = input.start if _start is None: _start = 0 elif", "= keyStr + eqStr + valStr + commentStr # need this in case", "option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and val", "'CONTINUE': raise ValueError, 'Can not rename to CONTINUE' if newkey in Card._commentaryKeys or", "the word. \"\"\" list = [] _nblanks = input.count(' ') nmax = max(_nblanks,", "_val = eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: # Check for numbers with leading", "size, name def setupHDU(self): \"\"\"Read one FITS HDU, data portions are not actually", "X format in binary tables.\"\"\" pass class _FormatP(str): \"\"\"For P format in variable", "dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise IndexError, 'Subsection data is not contiguous.' #", "StreamingHDU: \"\"\" A class that provides the capability to stream data to a", "Required keywords missing when trying to read HDU #%d.\\n There may be extra", "keyword value @rtype: string, integer, or float \"\"\" _hdr = getheader(filename, *ext, **extkeys)", "definitions have a different table type' elif isinstance(input, FITS_rec): # input is a", "format %s\" % fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec", "# locate last non-commentary card if self[i].key not in Card._commentaryKeys: break super(CardList, self).insert(i+1,", "self._valuestring elif isinstance(self.value, Undefined): valStr = '' # conserve space for HIERARCH cards", "def ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card image from the attributes: key, value,", "returned. \"\"\" try: return self[key] except: return default def update(self, key, value, comment=None,", "if name == '_cardimage': self.ascardimage() elif name == 'key': self._extractKey() elif name in", "bound or not found.' % key self._resize = 1 def __delitem__(self, key): \"\"\"Delete", "to write to @type data: array, record array, or groups data object @param", "image already exist (to avoid infinite loop), # fix it first. if self.__dict__.has_key('_cardimage'):", "_size > 0: self.__file.write(_padLength(_size)*'\\0') # flush, to make sure the content is written", "best illustrated by examples: No extra arguments implies the primary header >>> getdata('in.fits')", "of the CardList.\"\"\" cards = [None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards)", "be just list or tuple, not required to be NDArray if format is", "0 naxis = self.header.get('NAXIS', 0) if naxis > 0: simple = self.header.get('SIMPLE','F') randomGroups", "memmap if memmap and mode not in ['readonly', 'copyonwrite', 'update']: raise \"Memory mapping", "= input.start if _start is None: _start = 0 elif isinstance(_start, (int, long)):", "for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err) return _err class", "the file is first opened. This is to speed up the open. Any", "and BZERO after scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data", "+ (slice(None),) * (naxis-len(key)) offset = 0 for i in range(naxis): _naxis =", "write to the new file @type header: L{Header} object or None @param header:", "(Card.length-strlen) # minimum length is 80 else: strlen = _len % Card.length return", "self.data is None: return # Determine the destination (numarray) data type if type", "be renamed to each other.' elif (force == 0) and (newkey in self.ascard._keylist):", "a Card to the location specified by before or after. The argument `before'", "the HDUList is resized, need to write it to a tmp file, #", "_booltype: for i in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return", "will be written to the beginning of the file. If the file does", "or from raw string. option: verification option, default=silentfix. \"\"\" # Only if the", "%s' % self.key else: self.__dict__['_err_text'] = 'Card image is not FITS standard (unparsable", "'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end +", "return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS record array is the", "dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None: hdu.data._scale_back() if isinstance(hdu,", "of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max = 0 def __setitem__(self, key, value):", "out all top level messages for item in self: if not isinstance(item, _ErrList):", "option=option) # Verify locations and values of mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\"", "return super(CardList, self).__getitem__(_key) def __getslice__(self, start, end): _cards = super(CardList, self).__getslice__(start,end) result =", "data will be streamed. header : Header The header object associated with the", "**extkeys) return _hdr[key] def _makehdu(data, header): if header is None: if isinstance(data, num.NumArray):", "indexing or by the keyword name.\"\"\" _key = self.index_of(key) return super(CardList, self).__getitem__(_key) def", "= 1 else: _nbytes = num.getType(dtype).bytes for i in range(len(input)): if dtype ==", "= [] for card in self.ascard: pairs.append((card.key, card.value)) return pairs def has_key(self, key):", "((nx-1) / 8) + 1 unused = nbytes*8 - nx for i in", "width, prec = fmt.group('code', 'width', 'prec') else: raise ValueError, valu size = eval(width)+1", "_get_tbdata(self) data._coldefs = self.columns else: data = None self.__dict__[attr] = data elif attr", "update(self): \"\"\" Update header keywords to reflect recent changes of columns.\"\"\" _update =", "'MainThread') if singleThread: # Define new signal interput handler keyboardInterruptSent = False def", "array = _VLF(map(_func, array)) except: raise ValueError, \"Inconsistent input data array: %s\" %", "tempfile import gzip import zipfile import numarray as num import numarray.generic as ndarray", "string value should occupies at least 8 columns, unless it is # a", "header : Header The header object associated with the data to be written", "hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT, for variable length tables", "fix_text = \"Fixed by inserting a new '%s' card.\" % keywd if fixable:", "= hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes) if _bytes != hdu._datSpan: self._resize =", "__init__(self, name, mode='copyonwrite', memmap=0): if mode not in _python_mode.keys(): raise \"Mode '%s' not", "has attributes corresponding to the Column attributes (e.g. ColDefs has the attribute .names", "quotes, # instead of issuing an error. The FITS standard # appears vague", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "= [i] self.__dict__[attr] = _unique try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def", "\"\"\" dirName = os.path.dirname(input) if dirName != '': dirName += '/' _name =", "'PRIMARY' else: name = '' return size, name def setupHDU(self): \"\"\"Read one FITS", "related code---------------------------------- # lists of column/field definition common names and keyword names, make", "format can be optional. name: column name, corresponding to TTYPE keyword format: column", "return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list, tuple)): other =", "supported\" zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file", "values when the data was read/created. If \"minmax\", use the minimum and maximum", "than one 80-char \"physical\" cards, the cards after the first one must start", "result = \"\" element = 0 # go through the list twice, first", "Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\" Do you mean: \"Profits\"? - Google", "os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo if (verbose): print", "output verification option, default = 'exception'. clobber: Overwrite the output file if exists,", "of Columns or a ColDefs object. header: header to be used to populate", "eqLoc def _getKeyString(self): \"\"\"Locate the equal sign in the card image and return", "newkey _index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] = Card(newkey,", "the file before writing the # given header. # if not os.path.exists(name): if", "num.NumArray) and value.type() == self._dtype: pass elif isinstance(value, chararray.CharArray) and value.itemsize() == 1:", "\"\"\"The attrib can be one or more of the attributes listed in _commonNames.", "output, nx): \"\"\"Unwrap the X format column into a Boolean array. input: input", "len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars])", "self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero)", "and comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e. everything", "width) = _convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU):", "= _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU': # string no need", "%5d %-12s %s\" % \\ (self.name, type, len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns", "at the end, even if there are blank cards in front of END.", "if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if the", "to reflect recent changes of columns.\"\"\" _update = self.header.update _append = self.header.ascard.append _cols", "'physical' FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list class. This is the", "is not None: bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX not found where expected\")", "\"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\" self._file.seek(0, 2) return", "if _count == 1: indx = _list.index(_key) elif _count == 0: raise NameError,", "card image from the attributes: key, value, and comment. Core code for ascardimage.", "self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields self.data = data self.columns = data._coldefs self.update()", "= ImageHDU(data) elif isinstance(data, FITS_rec): hdu = BinTableHDU(data) else: raise KeyError, 'data must", "or more data area is resized.\" break # if the HDUList is resized,", "new FITS file using the supplied data/header. @type filename: string @param filename: name", "is not None: _imagStr = imag.group('sign') + _imagStr _valStr = '(' + _realStr", "_number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # add the location offset of", "the header associated with 'data', if None, a header of the appropriate type", "is not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number,", "value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] =", "_str: return self._parent.field(indx) # ASCII table, convert strings to numbers if self._coldefs._tbtype ==", "name 'key'.\"\"\" # delete ALL cards with the same keyword name if isinstance(key,", "is None: self._keylist = [k.upper() for k in self.keys()] else: self._keylist = keylist", "= `hdu.header['extver']` except: _extver = '' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if", "\"parameter value must be a sequence with %d arrays/numbers.\" % len(indx) def _getitem(self,", "of reading from a # FITS file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def", "0: indx += npts elif indx > npts: indx = npts return indx", "' ', getattr(self, att+'s') #def change_format(self, col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format',", "% mode if mode != 'append' and not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name)", "to the output file, as the data will be scaled and is therefore", "dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`, dummy) elif name == 'spans': # make", "name of the file to which the header and data will be streamed.", "update(file, dat, 3, header=hdr) # update the 3rd extension >>> update(file, dat, header=hdr,", "always returns 0. \"\"\" try: key = key.strip().upper() if key[:8] == 'HIERARCH': key", "output = [] for _card in self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value) return", "list of Columns, an (table) HDU tbtype: which table HDU, 'BinTableHDU' (default) or", "match if (keyword in _keyNames): col = eval(_key.group('num')) if col <= _nfields and", "or self.__dict__.has_key('_cardimage'): if self.comment in [None, '']: commentStr = '' else: commentStr =", "assume it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() == 'HIERARCH':", "of header, and put each card into a list of cards. Will deal", "super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return result def __setitem__(self, key, hdu): \"\"\"Set an", "(threading.activeCount() == 1) and (threadName.getName() == 'MainThread') if singleThread: # Define new signal", "row = (offset - self._byteoffset) / self._strides[0] return _Group(self, row) class _Group(rec.Record): \"\"\"One", "'Illegal format `%s` for ASCII table.' % input_format return (dtype, width) def _get_index(nameList,", "or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr = 'HIERARCH %s ' % self.key else:", "skip to the next card to search # to avoid starting at the", "hdu.name, hdu._extver = hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver = 1 hdu._file =", "eqLoc = self._locateEq() if eqLoc is None: eqLoc = 8 _start = 0", "scaled array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate the starting", "arrays else: if isinstance(value, (list, tuple)) and len(indx) == len(value): for i in", "can be an integer, a string, or a tuple of (string, integer). \"\"\"", "try: _dum = self.header['EXTEND'] #_after += 1 except: pass _pos = '>= '+`_after`", "= str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr == '_theap':", "The position (index, keyword name will not be allowed) to insert. The new", "bzero = _bzero)) data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format _bscale =", "if not a slice, do this because Record has no __getstate__. # also", "extension specification. They are flexible and are best illustrated by examples: No extra", "'': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and isinstance(self, BinTableHDU): val =", "or groups data object depending on the type of the extension being referenced", "= [None]*npars if parbzeros is None: parbzeros = [None]*npars if bitpix is None:", "after scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data", "= _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val == 2', 2, option, _err) self.req_cards('BITPIX', None,", "hdulist.close() return hdr def getdata(filename, *ext, **extkeys): \"\"\"Get the data from an extension", "class. It has a nested list structure constructed by error messages generated by", "if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT', 1, 'number of", "variant of \"XYZ\", then field('xyz'), field('Xyz'), etc. will get this field. \"\"\" if", "If the 3rd argument is not a header, it (and other positional arguments)", "== '_recformats': if self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt) for fmt in self.formats]", "is resized.\" break # Data: if 'data' not in dir(hdu): continue if hdu.data", "if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data", "\"\"\"Read data of all HDU's into memory.\"\"\" for i in range(len(self)): if self[i].data", "of parameters')) _list.append(Card('GCOUNT', 1, 'number of groups')) if header is not None: hcopy", "+ _padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0: raise IOError self.__file.flush() loc = self.__file.tell()", "a card length = 80 # String for a FITS standard compliant (FSC)", "integer The number of bytes of data required to fill the stream per", "X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx),", "in # binary table, so both will produce 'a7'. if fmt.lstrip()[0] == 'A'", "= self._xtn self.header.ascard[0].comment = 'ASCII table extension' ''' def format(self): strfmt, strlen =", "is not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data is not None: #", "def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute.\"\"\" if attr == 'data':", "if indx < -npts: indx = 0 elif indx < 0: indx +=", "hdu): \"\"\"Write *one* FITS HDU. Must seek to the correct location before calling", "re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC +", "elif name == '_recformats': if self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt) for fmt", "% format self.format = format # does not include Object array because there", "numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is not None: _valStr = numr.group('sign')+_valStr elif input.group('cplx')", "= [None]*self._nfields self.names = self._names def copy(self): r = rec.RecArray.copy(self) r.__class__ = rec.RecArray", "raise KeyError, key _key = (_key.strip()).upper() nfound = 0 for j in range(len(self)):", "that provides the capability to stream data to a FITS file instead of", "* (pcount + datasize) / 8 if simple and not groups: name =", "value or comment from the card image.\"\"\" longstring = '' ncards = self._ncards()", "'IMAGE', 'Image extension') else: c0 = Card('SIMPLE', True, 'conforms to FITS standard') _list", "can not be renamed to each other.' elif (force == 0) and (newkey", "as the 'BITPIX', 'NAXIS', or 'END' cards. A corrupted HDU usually means that", "break elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data must be contiguous.' for j", "unswapped # deal with var length table if isinstance(coldata, _VLF): for i in", "string in one block and the comment string in another. Also, it does", "_ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and val == 0\", 0, option, _err) return", "will get the second field. If there is no exact name matched, it", "tbtype == 'TableHDU': _formats = '' _itemsize = 0 for i in range(len(tmp)):", "re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple", "to the stream. Notes ----- Only the amount of data specified in the", "naxis = self.header.get('NAXIS', 0) if naxis > 0: simple = self.header.get('SIMPLE','F') randomGroups =", "% val_list[i] output = output + '%-80s' % (headstr + valstr) # do", "> 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else:", "= _end self._width = _end else: raise KeyError, 'Attribute %s not defined.' %", "# string should not end with two single quotes, # whereas it should", "the _keylist. self._checkKey(self.key) # verify the value, it may be fixable result =", "with %d arrays/numbers.\" % len(indx) def _getitem(self, offset): row = (offset - self._byteoffset)", "_tmp else: valStr = '%20s' % self._valuestring elif isinstance(self.value, Undefined): valStr = ''", "inserted. useblanks: Use any *extra* blank cards? default=1. If useblanks != 0, and", "file I/O class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0): if mode not in _python_mode.keys():", "to @type data: array, table, or group data object @param data: the new", "= num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max, len(value)) class Column: \"\"\"Column", "self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim == 0: dim = '' else: dim", "has .name), Each attribute in ColDefs is a list of corresponding attribute values", "default PrimaryHDU to the file before writing the # given header. # if", "= \"%-80s\" % output # longstring case (CONTINUE card) else: # try not", "_text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result = self[i]._verify(option) if _result: _err.append(_result)", "c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where # if", "string. If integer, it is the index in the list. If string, (a)", "if self._convert[i] is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out", "have been truncated.' hdu._ffile = self return hdu def writeHDU(self, hdu): \"\"\"Write *one*", "two-tier calls because _File has ts own private attribute __file. \"\"\" if self.__file", "type of input' if option == 'left': tmp = list(self.data) + b else:", "of the header area hdu._datLoc = self.__file.tell() # beginning of the data area", "representatives may not be used to endorse or promote products derived from this", "_step is None: _step = 1 elif isinstance(_step, (int, long)): if _step <=", "== '0': dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image", "assumed to be the extension specification(s). Header and extension specs can also be", "self.__dict__[attr] except KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple of image dimensions,", "case insensitivity. So, in the last example, field('Abc') will cause an exception since", "build the columns tmp = [Column(**attrs) for attrs in dict] self.data = tmp", "\"\"\" hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] hdr =", "self.key else: keyStr = ' '*8 # value string # check if both", "isinstance(key, str): # try to find exact match first try: indx = nameList.index(key.rstrip())", "table and binary table column # format spec, i.e. A7 in ASCII table", "updating The rest of the arguments are flexible: the 3rd argument can be", "will put the value string in one block and the comment string in", "\"\"\"Get a header keyword value.\"\"\" return self.ascard[key].value def __setitem__ (self, key, value): \"\"\"Set", "_count = operator.countOf(_list, _key) # occurrence of _key in _list if _count ==", "return key elif isinstance(key, str): _key = key.strip().upper() if _key[:8] == 'HIERARCH': _key", "_key = key.strip().upper() if _key[:8] == 'HIERARCH': _key = _key[8:].strip() _keylist = self._keylist", "output Boolean array of shape (s, nx) nx: number of bits \"\"\" pow2", "self._cardimage def __getattr__(self, name): \"\"\" instanciate specified attribute object.\"\"\" if name == '_cardimage':", "FALSE = False # deprecated _INDENT = \" \" DELAYED = \"delayed\" #", "hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes) if _bytes != hdu._datSpan: self._resize = 1", "'warn': output_verify = 'exception' self.verify(option=output_verify) # check if the output file already exists", "= FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f def _clone(self, shape): \"\"\"Overload this to", "just use a throw-away format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column definitions class.", "format self.format = format # does not include Object array because there is", "touch all fields to expand the original ._convert list # so the sliced", "contiguous.' for j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts)", "`fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable))", "range(len(input)): if dtype == 'a': data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i],", "begin with SIMPLE or XTENSION' for i in range(0, len(_blockLen), Card.length): _card =", "self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result = self[i]._verify(option) if _result: _err.append(_result) return _err", "be the first extension in the file so we # must change the", "def __repr__(self): tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names`", "if hdus is None: hdus = [] # can take one HDU, as", "self.count_blanks() def __getitem__(self, key): \"\"\"Get a Card by indexing or by the keyword", "\"Must specify format to construct Column\" # scale the array back to storage", "for commentary cards, no need to parse further if self.key in Card._commentaryKeys: self.__dict__['value']", "list)): if isinstance(_key, int): raise ValueError, \"An element in the HDUList must be", "prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY", "= 'XTENSION' firstval = self._xtn else: firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey,", "\"\"\"Append one Column to the column definition.\"\"\" return self+column def del_col(self, col_name): \"\"\"Delete", "`self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape = tuple(_shape) _format", "table from the input column definitions.\"\"\" \"\"\" input: a list of Columns or", "== 'END': raise ValueError, \"keyword 'END' not allowed\" self._checkKey(val) else: if val[:8].upper() ==", "'TNULL', 'TTYPE', 'TUNIT']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for", "bscale elif self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx] else: continue # ASCII table,", "!= 'CONTINUE ': break # combine contiguous CONTINUE cards with its parent card", "numarray.generic as ndarray import numarray.strings as chararray import numarray.records as rec import numarray.objects", "numarray first array = num.array(array) except: try: # then try to conver it", "input): \"\"\"Fix the card image for fixable non-standard compliance.\"\"\" _valStr = None #", "have CONTINUE cards after the first card.' if not isinstance(_card.value, str): raise ValueError,", "_bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data object. Allows structured", "end so as not to confuse the indexing. _list = [] for i", "and binary forms, with or without modification, are permitted provided that the following", "first block of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)')", "else: width = eval(width) except: raise ValueError, 'Illegal format `%s` for ASCII table.'", "dummy = num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx] = dummy for i in", "def _setvalue(self, val): \"\"\"Set the value attribute.\"\"\" if isinstance(val, (str, int, long, float,", "raise KeyError, 'Attribute %s not defined.' % name self.__dict__[name] = attr return self.__dict__[name]", "_hdr else: return _data def getval(filename, key, *ext, **extkeys): \"\"\"Get a keyword's value", "2880 # the FITS block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} #", "other.' elif (force == 0) and (newkey in self.ascard._keylist): raise ValueError, 'Intended keyword", "header blocks until END card is reached while 1: # find the END", "instead of issuing an error. The FITS standard # appears vague on this", "> Card.length: strlen = _len % Card.length if strlen == 0: return input", "use the largest column shape as the shape of the record if nrows", "') if numr.group('sign') == None: _val = eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif", "(index, keyword name will not be allowed) to insert. The new card will", "+= '/' _name = dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name else:", "computed. \"\"\" _cardList = [] _keyList = [] blocks = self._raw if (len(blocks)", "Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key == 'END': break else:", "+ ')' return tmp # synchronize the sliced FITS_rec and its ._parent def", "class Column: \"\"\"Column class which contains the definition of one column, e.g. ttype,", "card in self.ascard: pairs.append((card.key, card.value)) return pairs def has_key(self, key): \"\"\"Check for existence", "_booltype else: _str = self._coldefs.formats[indx][0] == 'A' _bool = 0 # there is", "hdu in self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver = ''", "hdu): self.hdu = hdu def __getitem__(self, key): dims = [] if not isinstance(key,", "numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') == None: _val", "that when true indicates that all of the required data has been written", "+ _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC + ') *\\))' r')? *)'", "not present, or it is a commentary card. \"\"\" # no equal sign", "hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data is not None: # check TFIELDS and", "will accept # strings with an odd number of single quotes, # instead", "a field named \"XYZ\" and no other field name is a case variant", "array = chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable length array except: if isinstance(recfmt,", "\"\"\" Write the given data to the stream. :Parameters: data : NumArray Data", "no comment if self.key in Card._commentaryKeys: if not isinstance(self.value, str): raise ValueError, 'Value", "_Hierarch # for card image longer than 80, assume it contains CONTINUE card(s).", "0: raise IOError, 'Header size is not multiple of %d: %d' % (_blockLen,", "one line. # Instead, just truncate the comment if isinstance(self.value, str) and len(valStr)", "if isinstance(_type, num.FloatingType): _scale = 1 _zero = 0 else: # flat the", "else: data = None self.__dict__[attr] = data elif attr == 'columns': class_name =", "1 break def append(self, card, useblanks=1, bottom=0): \"\"\"Append a Card to the CardList.", "HDUList object. hdus: Input, can be a list of HDU's or a single", "# Now, get the data (does not include bscale/bzero for now XXX) _bitpix", "when a file specified by a URL cannot be accessed\"\"\" def http_error_default(self, url,", "array of data type 2Int32 dtype: data type of the variable array \"\"\"", "definition keywords dict = [{} for i in range(_nfields)] # definition dictionaries for", "hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): # only swap unswapped #", "_key[8:].strip() _keylist = self._keylist if backward: _keylist = self._keylist[:] # make a copy", "range(len(tmp)): if tmp._arrays[i] is None: size = 0 else: size = len(tmp._arrays[i]) n", "_str: if _number and (_scale or _zero): dummy = self._convert[indx].copy() if _zero: dummy", "if header is None: dim = `self.header['NAXIS']` if dim == '0': dim =", "stringLen to the next FITS block.\"\"\" return (_blockLen - stringLen%_blockLen) % _blockLen def", "value = getattr(self, cname) if value != None: text += cname + '", "key): x = self.data[key] if isinstance(key, (int, long)): return x else: return ColDefs(x)", "_get_tbdata(hdu): \"\"\" Get the table data from input (an HDU object).\"\"\" tmp =", "__init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU self.name = name", "an index, always returns 0. \"\"\" try: key = key.strip().upper() if key[:8] ==", "col_name, attrib, new_value): \"\"\"Change an attribute (in the commonName list) of a Column.\"\"\"", "size) / 8 return size def copy(self): \"\"\"Make a copy of the HDU,", "j in range(_ncols): _format += self.header['TFORM'+`j+1`] + ', ' _format = _format[:-2] +", "for card in self: output += str(card) + '\\n' return output[:-1] # -----------------------------", "this task may be difficult when the extension is a TableHDU containing ASCII", "mo = re_naxis.search(block) if mo is not None: naxis = int(mo.group(1)) pos =", "= rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names` + ')' return", "else: raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification", "== 1: result = self.field(indx[0]) # if more than one group parameter have", "Section(self) elif attr == 'data': self.__dict__[attr] = None if self.header['NAXIS'] > 0: _bitpix", "if comment is not None: _comment = comment else: _comment = self.ascard[j].comment self.ascard[j]", "= 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size: print 'Warning: File size is", "*directly* before the END card self._blanks = 0 self.count_blanks() def __getitem__(self, key): \"\"\"Get", "['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the memmap object, it is designed", "to be opened. mode: Open mode, 'readonly' (default), 'update', or 'append'. memmap: Is", "not in _python_mode.keys(): raise \"Mode '%s' not recognized\" % mode if mode !=", "= tmp[:loc+7] + `self._coldefs.names` + ')' return tmp # synchronize the sliced FITS_rec", "if (self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str,", "\"Unparsable card, fix it first with .verify('fix').\" if valu.group('bool') != None: _val =", "index_of(self, key, backward=0): \"\"\"Get the index of a keyword in the CardList. key:", "output nbytes = ((nx-1) / 8) + 1 unused = nbytes*8 - nx", "slice)): return key elif isinstance(key, tuple): _key = key[0] _ver = key[1] else:", "the header). @type filename: string @param filename: input FITS file name @param ext:", "8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal sign in", "= self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each element calls their own verify", "<= xoffset: offset = xoffset + strlen # collect the pieces in a", "if before != None or after != None: self.ascard._pos_insert(new_card, before=before, after=after) else: if", "'': valStr = \"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr", "GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap:", "'Block does not begin with SIMPLE or XTENSION' for i in range(0, len(_blockLen),", "return a match if a FITS string, boolean, # number, or complex value", "None def size(self): \"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\"", "into two pieces. But if there is one single word which is longer", "portion of the HDU.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis", "_zero) = self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:] =", "\"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0]", "pass def info(self): \"\"\"Summarize the info of the HDU's in this HDUList.\"\"\" if", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT', 1, 'number", "> 1: self.update_extend() def index_of(self, key): \"\"\"Get the index of an HDU from", "= pyfits.StreamingHDU('filename.fits',header) for each piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name,", "\"\"\"Read the skeleton structure of the HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc =", "name) def _setkey(self, val): \"\"\"Set the key attribute, surrogate for the __setattr__ key", "= hdu.columns = ColDefs(input, tbtype) # read the delayed data for i in", "must be a primary HDU if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)):", "update in place else: for hdu in self: if (verbose): try: _extver =", "the HDU's in this HDUList.\"\"\" if self.__file is None: _name = '(No file", "binary tables if _number or _str: if _number and (_scale or _zero): dummy", "equivalent Note EXTNAMEs are not case sensitive By combination of EXTNAME and EXTVER,", "use of __str__, since normally __str__ has only one argument. \"\"\" result =", "is not FITS standard (unparsable value string).' raise ValueError, self._err_text + '\\n%s' %", "!= None: attr[i] = val elif name == '_arrays': attr = [col.array for", "*one* FITS HDU. Must seek to the correct location before calling this method.", "'length of dimension 2'), Card('PCOUNT', 0, 'number of group parameters'), Card('GCOUNT', 1, 'number", "for appending @type header: L{Header} object or None @param header: the header associated", "be the column right after the last field elif tbtype == 'TableHDU': (_format,", "hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS HDU header part.\"\"\"", "else: # try not to use CONTINUE if the string value can fit", "hdulist[_ext] hdr = hdu.header hdulist.close() return hdr def getdata(filename, *ext, **extkeys): \"\"\"Get the", "= self._ffo.getfile().tell() - self._datLoc if curDataSize + data.itemsize()*data._size > self._size: raise IOError, \"Supplied", "is longer than strlen, then it will be split in the middle of", "ord('T')) self._convert[indx] = dummy return self._convert[indx] if _str: return self._parent.field(indx) # ASCII table,", "return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes: header: image header", "Solaris. self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the _mm", "in source and binary forms, with or without modification, are permitted provided that", "_err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"data:", "hdu._datSpan = self._datSpan hdu._ffile = self._ffile hdu.name = self.name hdu._extver = self._extver hdu._new", "no equal sign, return the string before column 9. \"\"\" eqLoc = self._locateEq()", "is not a string' % val self.__dict__['key'] = val def _setvalue(self, val): \"\"\"Set", "%s\" % fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec to", "\"\"\"Construct an image HDU. data: the data in the HDU, default=None. header: the", "\"Card\" is considered # to be more than one 80-char \"physical\" cards. _max", "yet, use header info. else: _shape = () _nrows = self.header['NAXIS2'] _ncols =", "_setcomment(self, val): \"\"\"Set the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if val is", "indices.' elif naxis > len(key): key = key + (slice(None),) * (naxis-len(key)) offset", "make a copy _keylist.reverse() try: _indx = _keylist.index(_key) if backward: _indx = len(_keylist)", "in ['formats', 'names']: setattr(_data, attr, getattr(tmp, attr)) for i in range(len(tmp)): tmp._arrays[i] =", "val self.__dict__['comment'] = val def __setattr__(self, name, val): if name == 'key': raise", "copy the class tmp._hdutype = self._hdutype return tmp def _strip(self): \"\"\"Strip cards specific", "next level # must present, even it has nothing. for item in self:", "(value, Card): _key = self.index_of(key) # only set if the value is different", "'%20d' % self.value # XXX need to consider platform dependence of the format", "it may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None or", "re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo: name", "in dims: npt *= n # Now, get the data (does not include", "_dims, _format) def get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\" return self.columns def update(self):", "use HIERARCH.' % val else: raise ValueError, 'keyword name %s is not a", "> 0 and (not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th element is not", "# the content of header without being able to pass it to the", "= new_hdu hdulist.close() def info(filename): \"\"\"Print the summary information on a FITS file.", "[{} for i in range(_nfields)] # definition dictionaries for each field for _card", "to a field (presumably with the field method), it will try to match", "self._convert[indx] def _scale_back(self): \"\"\"Update the parent array, using the (latest) scaled array.\"\"\" _dict", "FITS block.\"\"\" return (_blockLen - stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create a temporary", "SIMPLE or XTENSION' for i in range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key", "find the # end of a string rather well, but will accept #", "value) self._max = max(self._max, len(value)) class Column: \"\"\"Column class which contains the definition", "self[i].key not in Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards())", "if len(self) > 1: self.update_extend() def index_of(self, key): \"\"\"Get the index of an", "= data self.name = None def size(self): \"\"\"Returns the size (in bytes) of", "= Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None or self.key in Card._commentaryKeys: return result", "= 0 for i in range(len(tmp)): _formats += 'a%d,' % tmp.spans[i] _itemsize +=", "copy if scaled, so as not to corrupt the original array if bzero", "raise ValueError, 'Illegal format `%s` for ASCII table.' % input_format return (dtype, width)", "UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C) 2004 Association of Universities for Research in", "_longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where", "FITS file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\"", "nfound = 0 for j in range(len(self)): _name = self[j].name if isinstance(_name, str):", "is not None: _valStr = numr.group('sign')+_valStr elif input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real'))", "long))\" # Functions def _padLength(stringLen): \"\"\"Bytes needed to pad the input stringLen to", "*' + _digits_NFSC # This regex helps delete leading zeros from numbers, otherwise", "= hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats,", "tbtype: tmp = hdu.columns = input else: raise ValueError, 'column definitions have a", "dict[col-1][cname] = _card.value # data reading will be delayed for col in range(_nfields):", "future it may be possible to decipher where the last block of the", "= _after) # delete extra NAXISi's for j in range(len(axes)+1, old_naxis+1): try: del", "and the comment string in another. Also, it does not break at the", "_Hierarch): self.__class__ = Card else: # does not support CONTINUE for HIERARCH if", "self.memmap = memmap if memmap and mode not in ['readonly', 'copyonwrite', 'update']: raise", "the content of header without being able to pass it to the header", "string @param filename: name of the file to be updated data: the new", "*ext, **extkeys): \"\"\"Get the data from an extension of a FITS file (and", "in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if", "Text to be added. before: [same as in update()] after: [same as in", "self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe out", "def _verify (self, option='warn'): _text = '' _err = _ErrList([], unit='HDU') # the", "cards.index_of(keywd) except: _index = None fixable = fix_value is not None # if", "mode=\"append\") if (verbose): print \"open a temp file\", _name for hdu in self:", "to be NDArray if format is not None: # check format try: #", "['fix', 'silentfix'] and x.find('Unfixable') != -1: raise VerifyError, '\\n'+x if (_option != \"silentfix\")", "= \"'%s' card has invalid value '%s'.\" % (keywd, val) fix_text = \"Fixed", "HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'TABLE'", "dtype, option) = _parse_tformat(fmt) if reverse == 0: if dtype in _fits2rec.keys(): #", "last_end + 1 _end = self.starts[i] + _width - 1 attr[i] = _end", "image, need to deal with byte order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder !=", "width = eval(width) except: raise ValueError, 'Illegal format `%s` for ASCII table.' %", "on a FITS file. This includes the name, type, length of header, data", "_Card_with_continue output = self._breakup_strings() else: print 'card is too long, comment is truncated.'", "of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce", "= 0 for indx in range(self._nfields): if (self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx],", "hdu.data is not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data is not None:", "self._datLoc = None, None if header is not None: if not isinstance(header, Header):", "_key == 'HIERARCH': _limit = Card.length else: _limit = 10 try: eqLoc =", "is read the first time, no need to copy, and keep it unchanged", "same keyword name if isinstance(key, str): while 1: try: del self.ascard[key] self._mod =", "break def _readHDU(self): \"\"\"Read the skeleton structure of the HDU.\"\"\" end_RE = re.compile('END'+'", "= 0 for j in range(len(self)): _name = self[j].name if isinstance(_name, str): _name", "a Card object from a (raw) string. It will pad the string if", "continue if hdu.data is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes +", "Boolean (logical) column if self._coldefs._recformats[indx]._dtype is _booltype: for i in range(len(self._parent)): dummy[i] =", "verbose: print \"One or more data area is resized.\" break # if the", "_format _bscale = self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format,", "for col in self.data] elif name == '_recformats': if self._tbtype == 'BinTableHDU': attr", "Redistribution and use in source and binary forms, with or without modification, are", "/ 8) + 1 unused = nbytes*8 - nx for i in range(nbytes):", "was read/created. If \"minmax\", use the minimum and maximum of the data to", "!= 1: num.multiply(self.data, self._bscale, self.data) if self._bzero != 0: self.data += self._bzero #", "_FormatX): _nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] =", "_cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero = _bzero)) _coldefs = ColDefs(_cols)", "mode if mode != 'append' and not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else:", "= ImageHDU(data, header) f = open(filename, mode='update') f.append(hdu) f.close() def update(filename, data, *ext,", "a tuple: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci',", "comm_len) for i in comm_list: commstr = \"CONTINUE '&' / \" + commfmt", "are *directly* before the END card self._blanks = 0 self.count_blanks() def __getitem__(self, key):", "to be FITS standard.\"\"\" # use repr (not str) in case of control", "print \"There is nothing to write.\" return self.update_tbhdu() if output_verify == 'warn': output_verify", "if (_option == \"ignore\"): return x = str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix']", "input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ') if real.group('sign')", "_index = None fixable = fix_value is not None # if pos is", "dimensions, and formats.\"\"\" class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if data is", "the non-required keywords nrows: number of rows in the new table fill: if", "GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt = _fits2rec[fits_fmt] # 'E' -> 'f4' _formats", "map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) * Card.length _bytes = _bytes + _padLength(_bytes) if", "card list class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct the CardList object from a", "self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero = _bzero))", "!= 'big': i.byteswap() i._byteorder = 'big' else: if coldata._type.bytes > 1: if coldata._byteorder", "if _scale != 1: self.data /= _scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if", "\"\"\"Return a list of all keywords from the CardList.\"\"\" return map(lambda x: getattr(x,'key'),", "out of range.' % indx elif isinstance(indx, slice): indx = _normalize_slice(indx, naxis) if", "data = _get_tbdata(self) data._coldefs = self.columns else: data = None self.__dict__[attr] = data", "= 1 except: return # for integer key only delete once else: del", "nbytes = ((repeat-1) / 8) + 1 # use an array, even if", "data to the new table for i in range(len(tmp)): if tmp._arrays[i] is None:", "Header may also contain the binary data(*). (*) In future it may be", "offset of the heap area for each # variable length column if isinstance(self._coldefs._recformats[indx],", "= self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode,", "> 0: coldata.tofile(self.__file) _shift = self.__file.tell() - _where hdu.data._heapsize = _shift - hdu.data._gap", "key attribute, surrogate for the __setattr__ key case.\"\"\" if isinstance(val, str): val =", "object is created from files # other than FITS, the close() call can", "self.__file.mode == 'append': for hdu in self: if (verbose): try: _extver = `hdu.header['extver']`", "= Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') == None: _val +=", "(default). When useblanks == 0, the card will be appended at the end,", "is no match if (keyword in _keyNames): col = eval(_key.group('num')) if col <=", "self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size = self.size() if self._size !=", "_name for j in range(len(self)): results = results + \"%-3d %s\\n\"%(j, self[j]._summary()) results", "'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'}", "i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for i in range(_tfields):", "to this method will scale self.data and update the keywords of BSCALE and", "= {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True # deprecated FALSE = False #", "or value != '' or comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for", "'' cards = self.header.ascard try: _index = cards.index_of(keywd) except: _index = None fixable", "a HISTORY card. value: History text to be added. before: [same as in", "self.ascardimage() elif name == 'key': self._extractKey() elif name in ['value', 'comment']: self._extractValueComment(name) else:", "self.field(indx[i])[:] = value[i] else: raise ValueError, \"parameter value must be a sequence with", "skip if there is no match if (keyword in _keyNames): col = eval(_key.group('num'))", "\"\"\" if len(block) != _blockLen: raise IOError, 'Block length is not %d: %d'", "attr == '_theap': self.__dict__[attr] = 0 try: return self.__dict__[attr] except KeyError: raise AttributeError(attr)", "0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') == None:", "+= bzero elif _bool: self._convert[indx] = num.equal(dummy, ord('T')) else: return dummy return self._convert[indx]", "all be written at once. The following psudo code illustrates its use: header", "self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] = _coldefs elif attr ==", "keys(self): \"\"\"Return a list of all keywords from the CardList.\"\"\" return map(lambda x:", "never fixable # always fix silently the case where \"=\" is before column", "if self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx) #", ">>> getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs are not case sensitive By combination", "def New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush is complete!\" keyboardInterruptSent = True #", "= max(self._max, len(value)) class Column: \"\"\"Column class which contains the definition of one", "self.verify(option=output_verify) if self.__file.mode == 'append': for hdu in self: if (verbose): try: _extver", "hdu._file = self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan hdu._ffile", "recfmt = _convert_format(format) except: try: # legit RecArray format? recfmt = format format", "_comment) elif before != None or after != None: _card = Card(key, value,", "card will be appended at the end, even if there are blank cards", "= str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if data is touched, use data info.", "[same as in update()] after: [same as in update()] \"\"\" self._add_commentary('comment', value, before=before,", "= parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero", "string rather well, but will accept # strings with an odd number of", "belongs to try: if cards[0].key == 'SIMPLE': if 'GROUPS' in cards._keylist and cards['GROUPS'].value", "None: pcount = int(mo.group(1)) else: pcount = 0 mo = re_groups.search(block) if mo", "'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return self def _ncards(self): return len(self._cardimage)", "HDU's in this HDUList.\"\"\" if self.__file is None: _name = '(No file associated", "', ' + _floatFormat(self.value.imag) + ')' valStr = '%20s' % _tmp else: valStr", "translation table for floating value string _fix_table = maketrans('de', 'DE') _fix_table2 = maketrans('dD',", "the X format column Boolean array into an UInt8 array. input: input Boolean", "key self._resize = 1 def __delitem__(self, key): \"\"\"Delete an HDU from the HDUList,", "axes.reverse() axes = [0] + axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes", "self._datLoc = None, None, None self.header = header self.data = data self.name =", "else: output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc == self._size: # #", "= re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')') #", "self.__dict__.has_key(name): delattr(self, name) return self def _ncards(self): return len(self._cardimage) / Card.length def _verify(self,", "commentStr # need this in case card-with-continue's value is shortened if not isinstance(self,", "the blank space between words. So it may not look pretty. \"\"\" val_len", "del self._keylist[_key] # update the keylist self.count_blanks() self._mod = 1 def count_blanks(self): \"\"\"Find", "calculated when the file is written. input: input object array desp_output: output \"descriptor\"", "]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM value into repeat, data type, and option.\"\"\"", "% self._valuestring elif isinstance(self.value, complex): if self._valueModified: _tmp = '(' + _floatFormat(self.value.real) +", "card does not exist if _index is None: err_text = \"'%s' card does", "which must contain printable ASCII characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text)", "_width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end + 1 _end", "re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')') # FSC commentary card string which must", "not None: dim = arr._shape[0] else: dim = 0 if dim > nrows:", "self: if 'data' in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not", "= 'ASCII table extension' ''' def format(self): strfmt, strlen = '', 0 for", "seek to the correct location before calling this method. \"\"\" if isinstance(hdu, _ImageBaseHDU):", "to stream to the file. :Returns: writeComplete : integer Flag that when true", "= 0 except: pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This", "= PrimaryHDU(data, header=header) clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None):", "real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is not", "def get(self, key, default=None): \"\"\"Get a keyword value from the CardList. If no", "the data part of the random group, # since binary table does not", "None return eqLoc def _getKeyString(self): \"\"\"Locate the equal sign in the card image", "with selected option.\"\"\" _text = err_text if not fixable: option = 'unfixable' if", "accomodate Extension # and Corrupted cases del self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis", "= num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8': # UInt8 case", "specification(s). Header and extension specs can also be keyword arguments. For example: >>>", "are two or more attribute names, they must be separated by comma(s). \"\"\"", "0: for i in range(min(self._blanks, how_many)): del self[-1] # it also delete the", "'ignore', 'warn', 'exception']: raise ValueError, 'Option %s not recognized.' % option if (_option", "_wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a table parent data, just pass it", "item in self: if not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" % item #", "if data is not touched yet, use header info. else: _shape = ()", "key = self.index_of(key) _item = super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU())", "read all HDU's while 1: try: hduList.append(ffo._readHDU()) except EOFError: break # check in", "used to reconstruct another kind of header. \"\"\" try: # have both SIMPLE", "object from a list of Cards. cards: A list of Cards, default=[]. \"\"\"", "in [None, '']: commentStr = '' else: commentStr = ' / ' +", "None: _val = re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') != None: # Check for", "ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap", "check if both value and _cardimage attributes are missing, # to avoid infinite", "if the header represents a Primary header, it will be written to the", "it unchanged else: self.header = header else: # construct a list of cards", "to preserve the one-to-one correspondence when updating the list(s). # Use lists, instead", "'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO',", "unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\" This class is", "size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT',", "return f def _clone(self, shape): \"\"\"Overload this to make mask array indexing work", "hdu.data._gap if _pcount > 0: hdu.header['PCOUNT'] = _pcount # update TFORM for variable", "results = \"Filename: %s\\nNo. Name Type\"\\ \" Cards Dimensions Format\\n\" % _name for", "hdu.data._gap _size = _size + _shift # pad the FITS data block if", "pad the string if it is not the length of a card image", "be a sequence with %d arrays/numbers.\" % len(indx) def _getitem(self, offset): row =", "fixable: fix = \"self.header['%s'] = %s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix,", "_shift = self.__file.tell() - _where hdu.data._heapsize = _shift - hdu.data._gap _size = _size", "')') # FSC commentary card string which must contain printable ASCII characters. _ASCII_text", "list(self.data.getshape()) axes.reverse() elif self.data is None: axes = [] else: raise ValueError, \"incorrect", "elif isinstance(self.value, Undefined): valStr = '' # conserve space for HIERARCH cards if", "if it does not exist for j in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j]", "wrong byteorder if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder = 'big' # In case", "[] if not isinstance(key, tuple): key = (key,) naxis = self.hdu.header['NAXIS'] if naxis", "legit FITS format? convert to record format (e.g. '3J'->'3i4') recfmt = _convert_format(format) except:", "err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute the verification with selected option.\"\"\" _text", "= data elif attr == 'columns': class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr]", "num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap the X format column Boolean", "= (_fmt+',') * npars data_fmt = '%s%s' % (`input.shape[1:]`, _fmt) _formats += data_fmt", "_list # populate the new table definition keywords for i in range(len(_cols)): for", "raise ValueError, valu size = eval(width)+1 strfmt = strfmt + 's'+str(size) + ','", "!= ' '*Card.length: self._blanks = i - 1 break def append(self, card, useblanks=1,", "= ascii2rec[dtype] if width == '': width = None else: width = eval(width)", "The final offset will be calculated when the file is written. input: input", "0, option, _err) naxis = self.header.get('NAXIS', 0) if naxis < 1000: for j", "len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars])", "group image, NAXIS1 should be 0, so we skip NAXIS1. if naxis >", "None fixable = fix_value is not None # if pos is a string,", "bzero !=0): _scale = bscale _zero = bzero else: if option == 'old':", "not in dir(hdu): continue if hdu.data is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes", "object supplied. \"\"\" if not os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data, header) if", "*[+-]? *\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC", "__setitem__ (self, key, value): \"\"\"Set a header keyword value.\"\"\" self.ascard[key].value = value self._mod", "if '_valuestring' not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not in self.__dict__:", "self.header.ascard try: _index = cards.index_of(keywd) except: _index = None fixable = fix_value is", "'== 1', _isInt+\" and \"+isValid, 8, option, _err) self.req_cards('NAXIS', '== 2', _isInt+\" and", "key, default=None): \"\"\"Get a keyword value from the CardList. If no keyword is", "indx.offset # all elements after the first WholeLine must be WholeLine or #", "in _TableBaseHDU size = self.size() if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs =", "%d' % (_blockLen, len(blocks)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError,", "a \"copy\" (not just a view) of the input header, since it #", "1): if _bitpix > 0: # scale integers to Float32 self.data = num.array(raw_data,", "'[' for j in range(_ncols): _format += self.header['TFORM'+`j+1`] + ', ' _format =", "> self._size: raise IOError, \"Supplied data will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] !=", "if isinstance(data, num.NumArray): hdu = ImageHDU(data) elif isinstance(data, FITS_rec): hdu = BinTableHDU(data) else:", "first card.' if not isinstance(_card.value, str): raise ValueError, 'Cards with CONTINUE must have", "the capability to stream data to a FITS file instead of requiring data", "if mode not in _python_mode.keys(): raise \"Mode '%s' not recognized\" % mode if", "to consider platform dependence of the format (e.g. E-009 vs. E-09) elif isinstance(self.value,", "determine if any of the HDU is resized for hdu in self: #", "provided header represents a Primary header, the header will be modified to an", "output def get_comment(self): \"\"\"Get all comments as a list of string texts.\"\"\" output", "= len(ext1) n_ext2 = len(ext2) keys = ext2.keys() # parse the extension spec", "calls because _File has ts own private attribute __file. \"\"\" if self.__file !=", "j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j == 0: continue _shape +=", "If string, (a) Field (column) names are case sensitive: you can have two", "zfile.namelist() if len(namelist) != 1: raise \"Zip files with multiple members are not", "if 'extname' in keys: if 'extver' in keys: ext = ext2['extname'], ext2['extver'] else:", "last non-blank card. \"\"\" if isinstance (card, Card): nc = len(self) - self._blanks", "file so we # must change the Primary header provided into an image", "= rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder = 'big' # pass", "range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name in ['TDISP', 'TDIM', 'THEAP']: for", "GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT', 1, 'number of groups')) if header", "is the index in the list. If string, (a) Field (column) names are", "file name and a header. :Parameters: name : string The name of the", "\"\"\"Format a list of cards into a string.\"\"\" block = '' for card", "names and keyword names, make # sure to preserve the one-to-one correspondence when", "and no word is cut into two pieces. But if there is one", "# read all HDU's while 1: try: hduList.append(ffo._readHDU()) except EOFError: break # check", "self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get the 'data' or", "the original array if bzero not in ['', None, 0] or bscale not", "this method will scale self.data and update the keywords of BSCALE and BZERO", "keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A number sub-string, either", "GroupsHDU elif cards[0].value == True: self._hdutype = PrimaryHDU else: self._hdutype = _ValidHDU elif", "if filename already exists, it will overwrite the file. Default is False. \"\"\"", "= 0 # there is no boolean in ASCII table _number = not(_bool", "value is different from the old one if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key,", "will be placed. The argument `before' takes precedence over `after' if both specified.", "+ _start for nc in range(1, _max+1): if _where+nc >= len(_keyList): break if", "'big' # In case the FITS_rec was created in a LittleEndian machine hdu.data._byteorder", "cannot be calculated or the 'END' card is not found. In the case", "arguments header = None if len(ext) > 0: if isinstance(ext[0], Header): header =", "EXTNAME keyword \"\"\" if header is not None: if not isinstance(header, Header): raise", "middle of the word. \"\"\" list = [] _nblanks = input.count(' ') nmax", "0, 'number of group parameters'), Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS', 0, 'number", "r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys of commentary cards _commentaryKeys = ['',", "name.\"\"\" if new_name != col_name and new_name in self.names: raise ValueError, 'New name", "HDU will be inserted at the beginning of the file and the provided", "= parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero = bzero)) self._coldefs", "as in update()] after: [same as in update()] \"\"\" self._add_commentary(' ', value, before=before,", "= results[:-1] print results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open a", "= input_format (repeat, dtype, option) = _parse_tformat(fmt) if reverse == 0: if dtype", "\"%s:\" % att print ' ', getattr(self, att+'s') #def change_format(self, col_name, new_format): #new_format", "out to avoid circular reference of _pcount # pass the attributes for attr", "new data used for updating The rest of the arguments are flexible: the", "elif isinstance(self.value , (int, long)): valStr = '%20d' % self.value # XXX need", "else: _name = self.__file.name results = \"Filename: %s\\nNo. Name Type\"\\ \" Cards Dimensions", "is None: if isinstance(data, num.NumArray): hdu = ImageHDU(data) elif isinstance(data, FITS_rec): hdu =", "= 'Card image is not FITS standard (unparsable value string).' raise ValueError, self._err_text", "last block of the Header ends, but this task may be difficult when", "__repr__(self): tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names` +", "CONTINUE cards. This is a primitive implementation, it will put the value string", "hdu._datSpan: self._resize = 1 if verbose: print \"One or more data area is", "self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx] if _lead < 0: raise ValueError, \"column", "(a string) or the index (an integer). backward: search the index from the", "format, corresponding to TFORM keyword unit: column unit, corresponding to TUNIT keyword null:", "{8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32,", "in self: # Header: # Add 1 to .ascard to include the END", "= 1 self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\" Return the size (in bytes)", "'array data type'), Card('NAXIS', 0, 'number of array dimensions'), ]) if isinstance(self, GroupsHDU):", "header is created @type filename: string @param filename: name of the file to", "minimal Header will be provided. name: The name of the HDU, will be", "== 'spans': # make sure to consider the case that the starting column", "prec = fmt.group('code', 'width', 'prec') else: raise ValueError, valu size = eval(width)+1 strfmt", "the index of the field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str = 'a'", "# for a valid value/comment string. # The valu group will return a", "there are blank cards in front of END. bottom: If =0 (default) the", "val <= 999\", 0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields):", "_formats = '' _itemsize = 0 for i in range(len(tmp)): _formats += 'a%d,'", "else: _shape = () _nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format = '['", "() for j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j == 0: continue", "+ 's'+str(size) + ',' strlen = strlen + size else: strfmt = '>'", "print \"reopen the newly renamed file\", oldName # reset the resize attributes after", "bzero not in ['', None, 0] # ensure bscale/bzero are numbers if not", "2004 Association of Universities for Research in Astronomy (AURA) Redistribution and use in", "'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype = _ExtensionHDU else: self._hdutype = _ValidHDU except:", "continue def update_tbhdu(self): \"\"\"Update all table HDU's for scaled fields.\"\"\" for hdu in", "distribution. 3. The name of AURA and its representatives may not be used", "== 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx] if _lead <", "(verbose): print \"append HDU\", hdu.name, _extver hdu._new = 0 elif self.__file.mode == 'update':", "elif indx > npts: indx = npts return indx _start = input.start if", "< 0: indx += npts elif indx > npts: indx = npts return", "value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname, value.value) else: setattr(self, cname,", "self.header.get('BSCALE', 1) if (data is DELAYED): return self.data = data # update the", "you *refer* to a field (presumably with the field method), it will try", "array class. FITS record array is the data part of a table HDU's", "= 0 if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure", "to read HDU #%d.\\n There may be extra bytes after the last HDU", "_rec2fits = {} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format", "input FITS file name \"\"\" f = open(filename) f.info() f.close() UNDEFINED = Undefined()", "header info. else: _shape = () for j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU)", "/ 2. # throw away -2^N _scale = (max - min) / (2.**(8*_type.bytes)", "value/comment string. It returns a match object # for a valid value/comment string.", "j) self._resize = 1 def _verify (self, option='warn'): _text = '' _err =", "i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j]", "= None, None if header is not None: if not isinstance(header, Header): raise", "isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b = list(other.data) else: raise", "\"\"\" if isinstance(input, num.NumArray): _formats = '' _cols = [] if pardata is", "the first one must start with CONTINUE and the whole card must have", "longstring case (CONTINUE card) else: # try not to use CONTINUE if the", "overlaps to the next column\" % indx+1 if 'A' in _format: _pc =", "value must be a list (or tuple) containing arrays else: if isinstance(value, (list,", "else: _arr = tmp._arrays[i] if _scale: _arr *= bscale if _zero: _arr +=", "= self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims = self._dimShape()", "'Float32' etc.). If is None, use the current data type. option: how to", "cards in front of END. bottom: If =0 (default) the card will be", "information on a FITS file. This includes the name, type, length of header,", "'' or value != '' or comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment) #", "ValueError, \"Illegal format %s\" % fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table", "view) of the input header, since it # may get modified. the data", "header: header to be used for the HDU name: the EXTNAME value \"\"\"", "= '' if self.__dict__.has_key('value'): valStr = str(self.value) # put all parts together output", "else: _result = self[i]._verify(option) if _result: _err.append(_result) return _err def append(self, hdu): \"\"\"Append", "_type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def", "tmp = list(self.data) + b else: tmp = b + list(self.data) return ColDefs(tmp)", "(pcount + datasize) / 8 if simple and not groups: name = 'PRIMARY'", "'*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except:", "or name.\"\"\" _key = self.index_of(key) if isinstance(hdu, (slice, list)): if isinstance(_key, int): raise", "new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s) information of the column definition.\"\"\" \"\"\"The attrib", "\"\"\"Construct a Column by specifying attributes. All attributes except format can be optional.", "'' for cname in _commonNames: value = getattr(self, cname) if value != None:", "if _option == 'exception' and x: raise VerifyError def _pad(input): \"\"\"Pad balnk space", "hdus: if not isinstance(hdu, _AllHDU): raise \"Element %d in the HDUList input is", "desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset _offset += len(data_output[i]) * _nbytes return data_output", "valStr = '%20s' % self._valuestring elif isinstance(self.value, complex): if self._valueModified: _tmp = '('", "_python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c',", "and the base name of the mktemp() output. \"\"\" dirName = os.path.dirname(input) if", "'Input argument has wrong data type.' if 'header' in extkeys: header = extkeys['header']", "bzero = self._coldefs.bzeros[indx] _scale = bscale not in ['', None, 1] _zero =", "is None: type = self.NumCode[self._bitpix] _type = getattr(num, type) # Determine how to", "null=None, \\ bscale=None, bzero=None, disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct a Column by", "naxis = self.header.get('NAXIS', 0) if naxis < 1000: for j in range(3, naxis+3):", "= self._datLoc hdu._datSpan = self._datSpan hdu._ffile = self._ffile hdu.name = self.name hdu._extver =", "return tmp def _get_scale_factors(self, indx): \"\"\" Get the scaling flags and factors for", "the card's value by using the \"test\" argument. \"\"\" _err = errlist fix", "beginning of the data area # data area size, including padding hdu._datSpan =", "an array, even if it is only ONE u1 (i.e. use tuple always)", "_tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM value into repeat, data", "\"\"\" if isinstance (card, Card): nc = len(self) - self._blanks i = nc", "cells will still be filled with zeros/blanks. tbtype: table type to be created", "update_tbhdu(self): \"\"\"Update all table HDU's for scaled fields.\"\"\" for hdu in self: if", "image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card image from the attributes:", "after != None: self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0] == ' ': useblanks", "hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise \"Invalid input for HDUList.\"", "(hdr['extend'] == False): hdr['extend'] = True else: if hdr['naxis'] == 0: hdr.update('extend', True,", "column 8. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc = 7", "self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset = offset * _naxis + indx.offset #", "_item = super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key)", "1: return _OnePointAxis(1, 0) else: raise IndexError, 'Index %s out of range.' %", "it is not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype =", "'*Card.length: self._blanks = i - 1 break def append(self, card, useblanks=1, bottom=0): \"\"\"Append", "range(len(self.parnames)): _name = self.parnames[i] if _name in _unique: _unique[_name].append(i) else: _unique[_name] = [i]", "if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del", "= hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDUList", "else: raise ValueError(\"NAXIS not found where expected\") if naxis == 0: datasize =", "_where hdu.data._heapsize = _shift - hdu.data._gap _size = _size + _shift # pad", "an image extension header and appended to the end of the file. \"\"\"", "# translation table for floating value string _fix_table = maketrans('de', 'DE') _fix_table2 =", "output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype == 'P': output_format = _FormatP('2i4')", "\"column `%s` ending point overlaps to the next column\" % indx+1 if 'A'", "- 1) else: _zero = (max + min) / 2. # throw away", "except KeyError: raise AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns the size (in bytes)", "recognized.' % tform if repeat == '': repeat = 1 else: repeat =", "i in range(nc-1, -1, -1): # locate last non-commentary card if self[i].key not", "the column data is not NDarray, make it to be one, i.e. #", "return strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option)", "col in input.data] # if the input is a list of Columns elif", "if _len == Card.length: return input elif _len > Card.length: strlen = _len", "= Column(format='I') # just use a throw-away format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object):", "for item in hdu: if not isinstance(item, _AllHDU): raise ValueError, \"%s is not", "self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE',", "does not support CONTINUE for HIERARCH if len(keyStr + eqStr + valStr) >", "option, _err) return _err # 0.8.8 def _iswholeline(indx, naxis): if isinstance(indx, (int, long)):", "self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>= 0\", 1, option, _err) # verify each", "header keywords to pick out column definition keywords dict = [{} for i", "group parameter have the same name else: result = self.field(indx[0]).astype('f8') for i in", "keyword 'header' is set to True, this function will return a (data, header)", "to the header object hduList._resize = 0 return hduList fitsopen = open #", "FITS file. This includes the name, type, length of header, data shape and", "raise IndexError, 'Illegal slice %s, start must be integer.' % input _stop =", "(data is not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0]", "else: c0 = Card('SIMPLE', True, 'conforms to FITS standard') _list = CardList([ c0,", "Uint8 array of shape (s, nbytes) output: output Boolean array of shape (s,", "dummy return out # if not a slice, do this because Record has", "print \"update data in place: Name =\", hdu.name, _extver # reset the modification", "return x = str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix'] and x.find('Unfixable') != -1:", "blank_loc = num.nonzero(arr == ' ')[0] offset = 0 xoffset = 0 for", "is no equal sign, return the string after column 8. \"\"\" eqLoc =", "= self.header.get('GROUPS','F') if simple == 'T' and randomGroups == 'T': groups = 1", "self._size: raise IOError, \"Supplied data will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type():", "size = abs(bitpix) * gcount * (pcount + datasize) / 8 if simple", "of groups'), Card('TFIELDS', 0, 'number of table fields') ]) if header is not", "Card._commentaryKeys: eqLoc = None else: if _key == 'HIERARCH': _limit = Card.length else:", "-> 'f4' _formats = (_fmt+',') * npars data_fmt = '%s%s' % (`input.shape[1:]`, _fmt)", "in range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n' return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct", "_cardimage attributes are missing, # to avoid infinite loops if not (self.__dict__.has_key('value') or", "long)): _stop = _normalize(_stop, naxis) else: raise IndexError, 'Illegal slice %s, stop must", "for i in range(len(self)): val = getattr(self[i], cname) if val != None: attr[i]", "a header keyword value.\"\"\" self.ascard[key].value = value self._mod = 1 def __delitem__(self, key):", "to be used for the HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data,", "kard = self._cardimage output = '' for i in range(len(kard)/80): output += kard[i*80:(i+1)*80]", "nc > 0: _longstring = _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring += c._cardimage", "readall(self): \"\"\"Read data of all HDU's into memory.\"\"\" for i in range(len(self)): if", "self._resize = 0 for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0", "extension of a FITS file. @param filename: input FITS file name @type: string", "oldkey, newkey, force=0): \"\"\"Rename a card's keyword in the header. oldkey: old keyword,", "be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY' # insert the keywords", "GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes: ColDefs, Column,", "a non-greedy match is done for a string, # since a greedy match", "_err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\" _dict = {8:'B',", "header, a default Primary HDU will be inserted at the beginning of the", "header part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END') blocks = blocks + _padLength(len(blocks))*' '", "= HDUList(_hdus) return result def __setitem__(self, key, hdu): \"\"\"Set an HDU to the", "is not implemented for mode `%s`.\" % mode else: if os.path.splitext(self.name)[1] == '.gz':", "result def setpar(self, parName, value): \"\"\"Set the group parameter values.\"\"\" if isinstance(parName, (int,", "and use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and _card.key", "_step = 1 elif isinstance(_step, (int, long)): if _step <= 0: raise IndexError,", "if tmp._arrays[i] is None: size = 0 else: size = len(tmp._arrays[i]) n =", "i in range(len(self)): if self[i].data is not None: continue def update_tbhdu(self): \"\"\"Update all", "used? default=0. \"\"\" # instantiate a FITS file object (ffo) ffo = _File(name,", "= newkey.strip().upper() if newkey == 'CONTINUE': raise ValueError, 'Can not rename to CONTINUE'", "If there is no card (or blank card), append at the end. \"\"\"", "will use this space first, instead of appending after these blank cards, so", "fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text,", "self.__file.flush() return loc def writeHDUdata(self, hdu): \"\"\"Write FITS HDU data part.\"\"\" self.__file.flush() loc", "equal sign in the card image and return the string after the equal", "(as a template), default=None. If header=None, a minimal Header will be provided. name:", "if only specify extname, can only have one extension with # that name", "having more than one 80-char \"physical\" cards, the cards after the first one", "a valid value/comment string. It returns a match object # for a valid", "not recognized\" % mode if mode != 'append' and not os.path.exists(name): self.name, fileheader", "_extver = '' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update", "__getitem__ (self, key): \"\"\"Get a header keyword value.\"\"\" return self.ascard[key].value def __setitem__ (self,", "arguments are for extension specification. They are flexible and are best illustrated by", "1: num.multiply(self.data, self._bscale, self.data) if self._bzero != 0: self.data += self._bzero # delete", "`value` + '\\n' return text[:-1] def copy(self): tmp = Column(format='I') # just use", "key = key.strip().upper() if key[:8] == 'HIERARCH': key = key[8:].strip() _index = self.ascard._keylist.index(key)", "and TZERO if _scale or _zero: for i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero", "+ ')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the equal sign in", "= num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image", "isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big': output = hdu.data.byteswapped() else: output = hdu.data", "== 'update': if not self._resize: # determine if any of the HDU is", "if mo and simple: groups = 1 else: groups = 0 mo =", "self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the data attribute.\"\"\" if attr == 'section':", "if there is extension if len(self) > 1: self.update_extend() hduList = open(name, mode=\"append\")", "equal sign. If there is no equal sign, return the string after column", "(s, nbytes) nx: number of bits \"\"\" output[...] = 0 # reset the", "unit=None, null=None, \\ bscale=None, bzero=None, disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct a Column", "one HDU, as well as a list of HDU's as input if isinstance(hdus,", "get extname and extver if hdu.name == '': hdu.name, hdu._extver = hdu._getname() elif", "card. \"\"\" # no equal sign for commentary cards (i.e. part of the", "dim = arr._shape[0] else: dim = 0 if dim > nrows: nrows =", "do the _parent too, otherwise the _parent # of a scaled column may", "n_ext2 == 2 and 'extver' in keys: ext = ext2['ext'], ext2['extver'] else: raise", "output.append(_card.value) return output def get_comment(self): \"\"\"Get all comments as a list of string", "tuple(axes) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name = str(self.__class__)", "attrib, new_value): \"\"\"Change an attribute (in the commonName list) of a Column.\"\"\" indx", "elif n_ext2 == 2 and 'extver' in keys: ext = ext2['ext'], ext2['extver'] else:", "Do the opposite if reverse = 1. \"\"\" fmt = input_format (repeat, dtype,", "ASCII data. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None, None,", "of DE for exponent, allows space between sign, # digits, exponent sign, and", "self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields =", "= self[j].name if isinstance(_name, str): _name = _name.strip().upper() if _name == _key: #", "self.key # verify the key, it is never fixable # always fix silently", "will not be allowed) to insert. The new card will be inserted before", "the HDU's data part.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) # for", "-&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo: name = mo.group(1).rstrip() else:", "the column definition.\"\"\" \"\"\"The attrib can be one or more of the attributes", "val = _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def copy(self): \"\"\"Make a copy", "it comes pretty darn close. It appears to find the # end of", "not None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero = self.header.get('BZERO',", "_fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} #", "header. # if not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception')", "7A in # binary table, so both will produce 'a7'. if fmt.lstrip()[0] ==", "the cards you need in the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each", "isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale, _zero, bscale, bzero)", "isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise \"Invalid input", "cards, value can only be strings and there # is no comment if", "written to. output_verify: output verification option, default = 'exception'. clobber: Overwrite the output", "CardList needs its own _mod attribute since it has methods to change #", "err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the supposed location is specified if", "get this field. \"\"\" if isinstance(key, (int, long)): indx = int(key) elif isinstance(key,", "more mandatory Cards are corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or 'END'", "names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns # populate data to the new table for", "name of the current thread and determine if this is a single treaded", "get(self, key, default=None): \"\"\"Get a keyword value from the CardList. If no keyword", "is not an HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return [self[i] for", "self._breakup_strings() else: print 'card is too long, comment is truncated.' output = output[:Card.length]", "_zero != 0: self.data += -_zero # 0.9.6.3 to avoid out of range", "def __setitem__(self, key, value): \"\"\"Set a Card by indexing or by the keyword", "if _data is None and isinstance(_ext, _Zero): try: hdu = hdulist[1] _data =", "# use repr to accomodate both string and non-string types # Boolean is", "else: self._hdutype = _ValidHDU elif cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip() if xtension", "== '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr)", "_INDENT = \" \" DELAYED = \"delayed\" # used for lazy instantiation of", "data self._xtn = ' ' def __setattr__(self, attr, value): \"\"\"Set an HDU attribute.\"\"\"", "_size = output.nelements() * output._itemsize # write out the heap of variable length", "HDU. data: the data in the HDU, default=None. header: the header to be", "Columns\" def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname = name[:-1] if cname in", "tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the largest column shape", "_data = self.data.copy() else: _data = None return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name,", "col_name) getattr(self, attrib+'s')[indx] = new_value def change_name(self, col_name, new_name): \"\"\"Change a Column's name.\"\"\"", "str(self[-i]) != ' '*Card.length: self._blanks = i - 1 break def append(self, card,", "manner analogous to tables \"\"\" def __init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None,", "\"\"\"Attributes: header: image header data: image data _file: file associated with array (None)", "range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except: if (j == 0): _after = 'naxis'", "= 0 self.count_blanks() def __getitem__(self, key): \"\"\"Get a Card by indexing or by", ":Parameters: None :Returns: size : integer The number of bytes of data required", "# if only specify extname, can only have one extension with # that", "method should only be used right before writing to the output file, as", "# just use a throw-away format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column definitions", "coldata._type.bytes > 1: if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes", "self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))): coldata", "_cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where # if not", "array (None) _datLoc: starting byte location of data block in file (None) \"\"\"", "_keylist = self._keylist[:] # make a copy _keylist.reverse() try: _indx = _keylist.index(_key) if", "If before and after are None, add to the last occurrence of cards", "!= self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII table extension' ''' def format(self):", "= _format[_format.rfind('.')+1:] # if data is not touched yet, use header info. else:", "extension specification. See L{getdata} for explanations/examples. @return: keyword value @rtype: string, integer, or", "+= 1 except: pass _pos = '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option,", "HDU.\" % `i` _text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result = self[i]._verify(option)", "an HDU from the HDUList, indexed by number or name.\"\"\" key = self.index_of(key)", "= Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value, comment)) self._mod =", "IndexError, 'Illegal slice %s, stop must be integer.' % input if _stop <", "# value string # check if both value and _cardimage attributes are missing,", "# use an array, even if it is only ONE u1 (i.e. use", "raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple of image dimensions, reverse the order", "data type 2Int32 dtype: data type of the variable array \"\"\" _offset =", "r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>'", "fixable=1): \"\"\"Execute the verification with selected option.\"\"\" _text = err_text if not fixable:", "part is no longer than strlen and no word is cut into two", "= 1 def __str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard", "err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking is specified if test: val =", "tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 if isinstance(ext1[0], str): if", "= 1 else: groups = 0 size = 1 for j in range(groups,naxis):", "elif isinstance(self.value, complex): if self._valueModified: _tmp = '(' + _floatFormat(self.value.real) + ', '", "@keyword clobber: (optional) if True and if filename already exists, it will overwrite", "% indx def _normalize_slice(input, naxis): \"\"\"Set the slice's start/stop in the regular range.\"\"\"", "other): pass # needed for __add__ def __add__(self, other, option='left'): if isinstance(other, Column):", "80, assume it contains CONTINUE card(s). elif len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue", "and numarray typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode =", "its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and _card.key != 'CONTINUE':", "has no __getstate__. # also more efficient. else: return tmp def _get_scale_factors(self, indx):", "written. Once sufficient data has been written to the stream to satisfy the", ">>> getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous", "check the card's value by using the \"test\" argument. \"\"\" _err = errlist", "0: _longstring = _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] =", "(self.name, type, len(self.header.ascard), _shape, _format, _gcount) def scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale", "of the required data has been written to the stream. Notes ----- Only", "placed. The argument `before' takes precedence over `after' if both specified. default=None. after:", "== \"ignore\"): return x = str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix'] and x.find('Unfixable')", "= self._ffile hdu.name = self.name hdu._extver = self._extver hdu._new = 0 hdu.header._mod =", "HDU. Default = None, i.e. an empty HDUList. file: The opened physical file", "self._setvalue(value) self._setcomment(comment) # for commentary cards, value can only be strings and there", "self._ffile hdu.name = self.name hdu._extver = self._extver hdu._new = 0 hdu.header._mod = 0", "range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>= 0\", 1, option, _err) #", "') nmax = max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1) # locations of", "(a) Field (column) names are case sensitive: you can have two different columns", "in the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each piece of data: shdu.write(data)", "hdu.data # Binary table byteswap elif isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields): coldata", "HDUList, indexed by number or name.\"\"\" _key = self.index_of(key) if isinstance(hdu, (slice, list)):", "if _ver == _extver: found = j nfound += 1 if (nfound ==", "'exception'. verbose: print out verbose messages? default = 0. This simply calls the", "= None return eqLoc def _getKeyString(self): \"\"\"Locate the equal sign in the card", "the value, it may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is not", "step must be positive.' % input else: raise IndexError, 'Illegal slice %s, step", "now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more than one 80-char \"physical\" cards,", "name) return self def _ncards(self): return len(self._cardimage) / Card.length def _verify(self, option='warn'): \"\"\"Card", "location specified by before or after. The argument `before' takes precedence over `after'", "* gcount * (pcount + size) / 8 return size def copy(self): \"\"\"Make", "will be scaled and is therefore not very usable after the call. type", "supplied data/header. @type filename: string @param filename: name of the new FITS file", "the CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] # update the keylist", "coldata.tofile(self.__file) _shift = self.__file.tell() - _where hdu.data._heapsize = _shift - hdu.data._gap _size =", "the HDUList and the extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1 = len(ext1) n_ext2", "Copyright (C) 2004 Association of Universities for Research in Astronomy (AURA) Redistribution and", "in self: if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) # print out a message", "return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDU to a", "the beginning locations are computed. \"\"\" _cardList = [] _keyList = [] blocks", "new keyword, must be a string. force: if new key name already exist,", "hdu): \"\"\"Append a new HDU to the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu)", "parameter names. bscale: BSCALE of the data bzero: BZERO of the data parbscales:", "= 'exception' self.verify(option=output_verify) # check if the output file already exists if os.path.exists(name):", "== 'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip() # drop the ending \"&\" if", "throw-away format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column definitions class. It has attributes", "_getsize(self, block): \"\"\"Get the size from the first block of the HDU.\"\"\" re_simple", "backward: search the index from the END, i.e. backward? default=0. If backward =", "!= 1): if _bitpix > 0: # scale integers to Float32 self.data =", "happen if header is None: raise ValueError, \"No header to setup HDU.\" #", "the # end of a string rather well, but will accept # strings", "# check the value only, no need to check key and comment for", "(card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) # update the keylist self.count_blanks() if", "_keyList = [] blocks = self._raw if (len(blocks) % _blockLen) != 0: raise", "HDU, default=None. header: the header to be used (as a template), default=None. If", "block): \"\"\"Get the size from the first block of the HDU.\"\"\" re_simple =", "not in ('append', 'update'): print \"flush for '%s' mode is not supported.\" %", "%d arrays/numbers.\" % len(indx) def _getitem(self, offset): row = (offset - self._byteoffset) /", "for extension specification. They are flexible and are best illustrated by examples: No", "+ _numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>'", "contain the binary data(*). (*) In future it may be possible to decipher", "isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data must", "getattr(self, cname) if value != None: text += cname + ' = '", "if naxis < len(key): raise IndexError, 'too many indices.' elif naxis > len(key):", "dtype: data type of the variable array \"\"\" _offset = 0 data_output =", "get the argument's value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname, value.value)", "None: attr[i] = val elif name == '_arrays': attr = [col.array for col", "construct a list of cards of minimal header if isinstance(self, _ExtensionHDU): c0 =", "use header info. else: _shape = () _nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS']", "value.value) else: setattr(self, cname, value) # if the column data is not NDarray,", "+ commentStr # need this in case card-with-continue's value is shortened if not", "= self.index_of(key) _item = super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return", "= self.__file.name results = \"Filename: %s\\nNo. Name Type\"\\ \" Cards Dimensions Format\\n\" %", "for j in range(len(self)): results = results + \"%-3d %s\\n\"%(j, self[j]._summary()) results =", "(int, long, str, tuple)): raise KeyError, 'Input argument has wrong data type.' if", "map(lambda y: 'a'+`y`, dummy) elif name == 'spans': # make sure to consider", "maketrans import copy import signal import threading # Module variables _blockLen = 2880", "self).__delitem__(key) self._resize = 1 def __delslice__(self, i, j): \"\"\"Delete a slice of HDUs", "= '' else: comm = self.comment commfmt = \"%-s\" if not comm ==", "written self.__file.flush() return loc def writeHDUdata(self, hdu): \"\"\"Write FITS HDU data part.\"\"\" self.__file.flush()", "\"\"\"A file I/O class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0): if mode not in", "type'), Card('NAXIS', 2, 'number of array dimensions'), Card('NAXIS1', 0, 'length of dimension 1'),", "_indx = _keylist.index(_key) if backward: _indx = len(_keylist) - _indx - 1 return", "self._xtn else: firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey, '== 0', '', firstval,", "the data will be scaled and is therefore not very usable after the", "self.data._type != _type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU", "keywd if fixable: # use repr to accomodate both string and non-string types", "a FITS file and return an HDUList object. name: Name of the FITS", "open(filename, mode=mode) n_ext1 = len(ext1) n_ext2 = len(ext2) keys = ext2.keys() # parse", "raise IOError, 'Header size is not multiple of %d: %d' % (_blockLen, len(blocks))", "if not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys): raise ValueError, 'Regular and", "from the CardList.\"\"\" return map(lambda x: getattr(x,'key'), self) def index_of(self, key, backward=0): \"\"\"Get", "axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize the HDU: name, dimensions,", "elif name == 'comment': self._setcomment(val) else: raise AttributeError, name # When an attribute", "write.\" return self.update_tbhdu() if output_verify == 'warn': output_verify = 'exception' self.verify(option=output_verify) # check", "self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale,", "names. bscale: BSCALE of the data bzero: BZERO of the data parbscales: list", "tmp class ColDefs(object): \"\"\"Column definitions class. It has attributes corresponding to the Column", "_pos, _isInt, 1, option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err) self.req_cards('GROUPS', _pos,", "Card._commentaryKeys: if not isinstance(self.value, str): raise ValueError, 'Value in a commentary card must", "and factors for one field. indx is the index of the field. \"\"\"", "= num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1] + 1 if loc == 0:", "value.\"\"\" return self.ascard[key].value def __setitem__ (self, key, value): \"\"\"Set a header keyword value.\"\"\"", "_start = _normalize(_start, naxis) else: raise IndexError, 'Illegal slice %s, start must be", "FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f def _clone(self, shape): \"\"\"Overload this to make", "_parse[0] in ['>=', '==']: insert_pos = eval(_parse[1]) # if the card does not", "see # if we were provided with a Primary Header. If not we", "{'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if isinstance(input, ColDefs): self.data =", "respectively. (b) When you *refer* to a field (presumably with the field method),", "else: if hdr['naxis'] == 0: hdr.update('extend', True, after='naxis') else: n = hdr['naxis'] hdr.update('extend',", "attributes listed in _commonNames. The default is \"all\" which will print out all", "number of bits \"\"\" output[...] = 0 # reset the output nbytes =", "elif isinstance(key, str): _key = key.strip().upper() if _key[:8] == 'HIERARCH': _key = _key[8:].strip()", "abs(bitpix) * gcount * (pcount + size) / 8 return size def _verify(self,", "['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames = ['TTYPE', 'TFORM',", "@type: string @param ext: The rest of the arguments are for extension specification.", "self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete", "return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal index %s'", "not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE')", "provided with the distribution. 3. The name of AURA and its representatives may", "% (headstr + valstr) # do the comment string if self.comment is None:", "than one 80-char \"physical\" cards. _max = _keyList.count('CONTINUE') _start = 0 for i", "% keywd fix_text = \"Fixed by inserting a new '%s' card.\" % keywd", "attr, value): \"\"\"Set an HDU attribute.\"\"\" if attr == 'name' and value: if", "is None: parbzeros = [None]*npars if bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt", "isinstance(_key, str): raise KeyError, key _key = (_key.strip()).upper() nfound = 0 for j", "header it belongs to try: if cards[0].key == 'SIMPLE': if 'GROUPS' in cards._keylist", "at the end. \"\"\" new_card = Card(key, value) if before != None or", "% valStr # must be before int checking since bool is also int", "\"\"\"Column definitions class. It has attributes corresponding to the Column attributes (e.g. ColDefs", "find exact match first try: indx = nameList.index(key.rstrip()) except ValueError: # try to", "TFORM data type to numarray data type (code) _booltype = 'i1' _fits2rec =", "after='naxis') else: n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False):", "= [1] npt = 1 for n in dims: npt *= n #", "as chararray import numarray.records as rec import numarray.objects as objects import numarray.memmap as", "appending @type header: L{Header} object or None @param header: the header associated with", "a tuple of image dimensions, reverse the order of NAXIS.\"\"\" naxis = self.header['NAXIS']", "# for commentary cards, value can only be strings and there # is", "definitions.\"\"\" \"\"\" input: a list of Columns or a ColDefs object. header: header", "array back to storage values if there is bscale/bzero if isinstance(array, num.NumArray): #", "default=None. If header=None, a minimal Header will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header)", "details of the FITS standard, see the NASA/Science Office of Standards and Technology", "_max = min((i+1)*8, nx) for j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def", "strings with an odd number of single quotes, # instead of issuing an", "into a Boolean array. input: input Uint8 array of shape (s, nbytes) output:", "back to the file (for append and update modes only). output_verify: output verification", "the mm object. try: self.mmobject.close() except: pass def info(self): \"\"\"Summarize the info of", "a list of corresponding attribute values from all Columns. \"\"\" def __init__(self, input,", "when true indicates that all of the required data has been written to", "old table definition keywords. Mark them first, # then delete from the end", "2) # Do the scaling if _zero != 0: self.data += -_zero #", "data (does not include bscale/bzero for now XXX) _bitpix = self.hdu.header['BITPIX'] code =", "array of shape (s, nbytes) nx: number of bits \"\"\" output[...] = 0", "1. Redistributions of source code must retain the above copyright notice, this list", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \"\"\"", "'' return \"%-10s %-11s %5d %-12s %s%s\" % \\ (self.name, type, len(self.header.ascard), _shape,", "exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors list class. It has a nested", "default=''. comment: comment, default=''. \"\"\" if key != '' or value != ''", "self.ascard[key].value = value self._mod = 1 def __delitem__(self, key): \"\"\"Delete card(s) with the", "= 1 for j in range(naxis): size = size * self.header['NAXIS'+`j+1`] bitpix =", "def _ncards(self): return len(self._cardimage) / Card.length def _verify(self, option='warn'): \"\"\"Card class verification method.\"\"\"", "KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name", "be modified to an image extension header and appended to the end of", "keyList, blocks): \"\"\"Read blocks of header, and put each card into a list", "option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\" and val >= 0 and val <=", "self.header = header self.data = data self.name = None def size(self): \"\"\"Returns the", "The <strg> regex is not correct for all cases, but # it comes", "att not in _commonNames: print \"'%s' is not an attribute of the column", "CONTINUE for HIERARCH if len(keyStr + eqStr + valStr) > Card.length: raise ValueError,", "valueStr and \"E\" not in valueStr: valueStr += \".0\" return valueStr class Undefined:", "NFSC allows lower case of DE for exponent, allows space between sign, #", "\"\"\" try: return self[key] except: return default def update(self, key, value, comment=None, before=None,", "if self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims =", "isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes = [0] +", "if repeat == '': repeat = 1 else: repeat = eval(repeat) return (repeat,", "not isinstance(i, chararray.CharArray): if i._type.bytes > 1: if i._byteorder != 'big': i.byteswap() i._byteorder", "of the HDUList back to the file (for append and update modes only).", "array, table, or group data object @param data: the new data used for", "string, it must be of the syntax of \"> n\", # where n", "for col in input: if not isinstance(col, Column): raise \"Element %d in the", "hdu.data._heapsize + hdu.data._gap if _pcount > 0: hdu.header['PCOUNT'] = _pcount # update TFORM", "3) # update the 3rd extension >>> update(file, dat, hdr, 3) # update", "header, the stream is padded to fill a complete FITS block and no", "hdu.data._byteorder != 'big': output = hdu.data.byteswapped() else: output = hdu.data # Binary table", "self.data = data self.name = None def size(self): \"\"\"Returns the size (in bytes)", "\"\"\"Close the associated FITS file and memmap object, if any. output_verify: output verification", "required cards in wrong order. if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval =", "card image.\"\"\" longstring = '' ncards = self._ncards() for i in range(ncards): #", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "EXTEND keyword is in primary HDU if there is extension if len(self) >", "num.FloatingType): _scale = 1 _zero = 0 else: # flat the shape temporarily", "field. indx is the index of the field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU':", "file associated with the HDUList. Default = None. \"\"\" self.__file = file if", "isinstance(key, slice): out = tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for", "def _getValueCommentString(self): \"\"\"Locate the equal sign in the card image and return the", "last_end last_end = _end self._Formats = self.formats self._arrays[i] = input[i].array \"\"\" def __getitem__(self,", "is first opened. This is to speed up the open. Any header will", "Column\" # scale the array back to storage values if there is bscale/bzero", "parameter have the same name else: result = self.field(indx[0]).astype('f8') for i in indx[1:]:", "% item else: if not isinstance(hdu, _AllHDU): raise ValueError, \"%s is not an", "def items(self): \"\"\"Return a list of all keyword-value pairs from the CardList.\"\"\" pairs", "# value for ASCII table cell with value = TNULL # this can", "= key[8:].strip() _index = self.ascard._keylist.index(key) return 1 except: return 0 def rename_key(self, oldkey,", "return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass # needed for __add__ def __add__(self,", "+ ', ' + _imagStr + ')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self):", "numbers if not _scale: bscale = 1 if not _zero: bzero = 0", "IndexError, 'No data in this HDU.' if _data is None: raise IndexError, 'No", "array, both the data descriptors and the data. It returns the output \"data\"", "difficult when the extension is a TableHDU containing ASCII data. \"\"\" def __init__(self,", "TypeError, \"Supplied data is not the correct type.\" if data._byteorder != 'big': #", "anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype = ImageHDU # insert the", "added as the first extension. If the file does already exist, but the", "sure if the primary header needs the keyword EXTEND or if it has", "size = abs(bitpix) * gcount * (pcount + size) / 8 return size", "a list of (numeric) arrays. parnames: list of parameter names. bscale: BSCALE of", "exist.\" % name # make sure the EXTEND keyword is there if there", "sign, return the string before column 9. \"\"\" eqLoc = self._locateEq() if eqLoc", "name, val): if name == 'key': raise SyntaxError, 'keyword name cannot be reset.'", "\"\"\" if attrib.strip().lower() in ['all', '']: list = _commonNames else: list = attrib.split(',')", "will be modified to an image extension header and appended to the end", "SyntaxError, \"%s is not a Card\" % str(value) def __delitem__(self, key): \"\"\"Delete a", "self._datLoc = None, None, None self.header = header self.data = data self._xtn =", "ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx] =", "object (ffo) ffo = _File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo) # read all", "the case there is extra space after the last HDU or corrupted HDU", "= re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo = re_bitpix.search(block)", "+= npts elif indx > npts: indx = npts return indx _start =", "0: if dtype in _fits2rec.keys(): # FITS format if dtype == 'A': output_format", "(int, long)): result = self.field(parName) else: indx = self._unique[parName.lower()] if len(indx) == 1:", "= end_RE.search(block) if mo is None: hdu._raw += block block = self.__file.read(_blockLen) if", "# When an attribute (value or comment) is changed, will reconstructe # the", "_tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif input.group('numr') != None: numr", "and (not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element %s is not an extension", "if singleThread: # Define new signal interput handler keyboardInterruptSent = False def New_SIGINT(*args):", "except ValueError: # try to match case-insentively, _key = key.lower().rstrip() _list = map(lambda", "list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.')+1:] # if", "self).__setitem__(_key, hdu) except IndexError: raise IndexError, 'Extension %s is out of bound or", "Card('BITPIX', 8, 'array data type'), Card('NAXIS', 2, 'number of array dimensions'), Card('NAXIS1', 0,", "\"\"\"Size (in bytes) of the data portion of the HDU.\"\"\" size = 0", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "= 'exception'. clobber: Overwrite the output file if exists, default = False. \"\"\"", "AttributeError, name return getattr(self, name) def _setkey(self, val): \"\"\"Set the key attribute, surrogate", "= self.__file.tell() self.__file.write(blocks) # flush, to make sure the content is written self.__file.flush()", "self._tbtype == 'TableHDU': self._Formats = self.formats if len(self) == 1: dummy = []", "x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array)) except: try: # this handles ['abc']", "column 10 and return its location. It returns None if equal sign is", "(newkey in self.ascard._keylist): raise ValueError, 'Intended keyword %s already exists in header.' %", "self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII table extension' ''' def format(self): strfmt,", "try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns the", "of cards of the same name (except blank card). If there is no", "The opened physical file associated with the HDUList. Default = None. \"\"\" self.__file", "list.__init__(self, val) self.unit = unit def __str__(self, tab=0): \"\"\"Print out nested structure with", "\"\"\"Delete card(s) with the name 'key'.\"\"\" # delete ALL cards with the same", "EXTEND if header is None: dim = `self.header['NAXIS']` if dim == '0': dim", "'a': data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i])", "array def __repr__(self): text = '' for cname in _commonNames: value = getattr(self,", "space to the input string to be multiple of 80.\"\"\" _len = len(input)", "hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU': # string no need to convert,", "in the header, the stream is padded to fill a complete FITS block", "key): \"\"\"Delete a Card from the CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key) del", "to a FITS file instead of requiring data to all be written at", "_format[:-2] + ']' _dims = \"%dR x %dC\" % (_nrows, _ncols) return \"%-10s", "with CONTINUE must have string value.' if name == 'value': _val = re.sub(\"''\",", "(newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys): raise ValueError, 'Regular and commentary keys", "it may not look pretty. \"\"\" val_len = 67 comm_len = 64 output", "'TABLE': self._hdutype = TableHDU elif xtension == 'IMAGE': self._hdutype = ImageHDU elif xtension", "_checkText(self, val): \"\"\"Verify val to be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None:", "= abs(bitpix) * gcount * (pcount + datasize) / 8 if simple and", "by number or name.\"\"\" _key = self.index_of(key) if isinstance(hdu, (slice, list)): if isinstance(_key,", "be appended after the last non-commentary card. If =1, the card will be", "Is memmory mapping to be used? default=0. \"\"\" # instantiate a FITS file", "% keywd if fixable: # use repr to accomodate both string and non-string", "\"\"\"Convert FITS format spec to record format spec. Do the opposite if reverse", "value should occupies at least 8 columns, unless it is # a null", "def __init__(self, npts, offset): self.npts = npts self.offset = offset class _WholeLine(_KeyType): pass", "== 'TableHDU': dummy = self._convert[indx] else: continue # ASCII table, convert numbers to", "if the card does not exist if _index is None: err_text = \"'%s'", "is a list of Columns elif isinstance(input, (list, tuple)): for col in input:", "too long.\" % self.key if len(output) <= Card.length: output = \"%-80s\" % output", "else: setattr(self, cname, value) # if the column data is not NDarray, make", "= eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') ==", "block.\"\"\" return (_blockLen - stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create a temporary file", "# legit FITS format? convert to record format (e.g. '3J'->'3i4') recfmt = _convert_format(format)", "# Read the first header block. block = self.__file.read(_blockLen) if block == '':", "Column(format='I') # just use a throw-away format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column", "of header. Strip cards like SIMPLE, BITPIX, etc. so the rest of the", "key[1] else: _key = key _ver = None if not isinstance(_key, str): raise", "no \"before\" or \"after\" is specified, it will be appended at the end.", "the provided data would cause the stream to overflow, an IOError exception is", "column # format spec, i.e. A7 in ASCII table is the same as", "if _number or _str: if _number and (_scale or _zero): dummy = self._convert[indx].copy()", "_number and (_scale or _zero): # only do the scaling the first time", "in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if", "%s' % ext2 elif n_ext1 == 2: if n_ext2 == 0: ext =", "to @type data: array, record array, or groups data object @param data: data", "table cell with value = TNULL # this can be reset by user.", "is False. \"\"\" if header is None: if 'header' in keys: header =", "index, always returns 0. \"\"\" try: key = key.strip().upper() if key[:8] == 'HIERARCH':", "hdu._datLoc = self.__file.tell() # beginning of the data area # data area size,", "'Option %s not recognized.' % option if (_option == \"ignore\"): return x =", "the # given header. # if not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist =", "% value if \".\" not in valueStr and \"E\" not in valueStr: valueStr", "x if 'D' in _format: self._parent.field(indx).sub('E', 'D') # binary table else: if isinstance(self._parent.field(indx)._type,", "not None: _comment = comment else: _comment = self.ascard[j].comment self.ascard[j] = Card(key, value,", "for j in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except: if (j == 0):", "raise AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name =", "= keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None): \"\"\"Append the header/data to", "_File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo) # read all HDU's while 1: try:", "the CardList. key: the keyword name (a string) or the index (an integer).", "num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif self.data is None: axes", "object @param data: data to write to the new file @type header: L{Header}", "extension >>> update(file, dat, header=hdr, ext=5) # update the 5th extension \"\"\" #", "self.__dict__['value'] = val def _setcomment(self, val): \"\"\"Set the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val)", "AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple of image dimensions, reverse the order of", "extension name if (name is None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name =", "be inserted. useblanks: Use any *extra* blank cards? default=1. If useblanks != 0,", "width of each field for ASCII table if self._coldefs._tbtype == 'TableHDU': _loc =", "must be contiguous.' for j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j],", "_numr_NFSC = r'[+-]? *' + _digits_NFSC # This regex helps delete leading zeros", "_value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' +", "try: hduList.append(ffo._readHDU()) except EOFError: break # check in the case there is extra", "elif isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'): _err", "- 1 attr[i] = _end - last_end last_end = _end self._width = _end", "of the HDU. :Parameters: None :Returns: size : integer The number of bytes", "or comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary cards, value can", "self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode]) # For 'ab+'", "for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 hdu._new = 0", "getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs are not case sensitive", "to make sure the content is written self.__file.flush() # return both the location", "\"Data is inconsistent with the format `%s`.\" % format else: raise ValueError, \"Must", "_comm.rstrip() def _fixValue(self, input): \"\"\"Fix the card image for fixable non-standard compliance.\"\"\" _valStr", "be FITS standard.: %s' % self.key else: self.__dict__['_err_text'] = 'Card image is not", "the comment separator is found, though the # comment maybe an empty string.", "mapping. If there is a field named \"XYZ\" and no other field name", "= [] _keyList = [] blocks = self._raw if (len(blocks) % _blockLen) !=", "the end of the file. \"\"\" self.header = header.copy() # # Check if", "len(indx) == len(value): for i in range(len(indx)): self.field(indx[i])[:] = value[i] else: raise ValueError,", "cause an exception since there is no unique mapping. If there is a", "\"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:]", "string no need to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i]", "modified to an image extension header and appended to the end of the", "when the file is written. input: input object array desp_output: output \"descriptor\" array", "table.\"\"\" pass # TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword", "for j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j == 0: continue _shape", "= 'BINTABLE' hdr = self.header if hdr[0] != self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment", "HDU base class.\"\"\" \"\"\"Attributes: header: image header data: image data _file: file associated", "range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an HDU from the HDUList, indexed by number", "the data object supplied. \"\"\" if not os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data,", "'K':'i8'} # the reverse dictionary of the above _rec2fits = {} for key", "header keyword value.\"\"\" self.ascard[key].value = value self._mod = 1 def __delitem__(self, key): \"\"\"Delete", "multiple of %d: %d' % (_blockLen, len(blocks)) elif (blocks[:8] not in ['SIMPLE ',", "to be a list of null strings.\"\"\" for cname in _commonNames: setattr(self, cname+'s',", "it does not break at the blank space between words. So it may", "_offset = 0 data_output = _VLF([None]*len(input)) data_output._dtype = dtype if dtype == 'a':", "FITS standard. key: keyword name, default=''. value: keyword value, default=''. comment: comment, default=''.", "(raw) string. It will pad the string if it is not the length", "__str__(self): \"\"\"Format a list of cards into a printable string.\"\"\" output = ''", "def flush(self, output_verify='exception', verbose=0): \"\"\"Force a write of the HDUList back to the", "output_format._dtype = _fits2rec[option[0]] elif dtype == 'F': output_format = 'f8' else: raise ValueError,", "bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E'", "to be FITS standard.: %s' % self.key else: self.__dict__['_err_text'] = 'Card image is", "%s is not an extension HDU.\" % `i` _text = self.run_option(option, err_text=err_text, fixable=0)", "_key: # if only specify extname, can only have one extension with #", "Data: if 'data' not in dir(hdu): continue if hdu.data is None: continue _bytes", "@return: keyword value @rtype: string, integer, or float \"\"\" _hdr = getheader(filename, *ext,", "_tbsize # comment out to avoid circular reference of _pcount # pass the", "header card.\"\"\" \"\"\" If the keyword already exists, it's value/comment will be updated.", "*ext, **extkeys): \"\"\"Update the specified extension with the input data/header. @type filename: string", "range(len(val_list)): if i == 0: headstr = \"%-8s= \" % self.key else: headstr", "is not a Card\" % str(value) def __delitem__(self, key): \"\"\"Delete a Card from", "# _convert is the scaled (physical) array. self._parent = input self._convert = [None]*self._nfields", "= input else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr", "handler keyboardInterruptSent = False def New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush is complete!\"", "following conditions are met: 1. Redistributions of source code must retain the above", "the I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all docstrings", "length = 80 # String for a FITS standard compliant (FSC) keyword. _keywd_FSC", "commentary cards _commentaryKeys = ['', 'COMMENT', 'HISTORY'] def __init__(self, key='', value='', comment=''): \"\"\"Construct", "== '&': _val = _val[:-1] longstring = longstring + _val elif name ==", "header data: image data _file: file associated with array (None) _datLoc: starting byte", "use header info. else: _shape = () for j in range(self.header['NAXIS']): if isinstance(self,", "by a URL cannot be accessed\"\"\" def http_error_default(self, url, fp, errcode, errmsg, headers):", "if nc > 0: _longstring = _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring +=", "%s, start must be integer.' % input _stop = input.stop if _stop is", "not in ['', None, 0] or bscale not in ['', None, 1]: array", "self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the data attribute.\"\"\" if", "input self._convert = [None]*self._nfields self.names = self._names def copy(self): r = rec.RecArray.copy(self) r.__class__", "# also copy the class tmp._hdutype = self._hdutype return tmp def _strip(self): \"\"\"Strip", "_zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i", "shift the unused bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct the", "be calculated when the file is written. input: input object array desp_output: output", "/ 8 return size def _verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option) # Verify", "self._parent.field(indx) # ASCII table, convert strings to numbers if self._coldefs._tbtype == 'TableHDU': _dict", "CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8, 'array data type'), Card('NAXIS', 2, 'number of", "0\", 0, option, _err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\"", "= self._locateEq() if eqLoc is None: eqLoc = 7 return self._cardimage[eqLoc+1:] def _check(self,", "isinstance(value, str): raise TypeError, 'bad value type' value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME']", "r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' + _numr_FSC + ')", "name to be written to. output_verify: output verification option, default='exception'. clobber: Overwrite the", "class. FITS record array is the data part of a table HDU's data", "bzero=None, disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct a Column by specifying attributes. All", "\"\"\" if isinstance(key, (int, long)): return key elif isinstance(key, str): _key = key.strip().upper()", "flags and factors for one field. indx is the index of the field.", "FITS standard, see the NASA/Science Office of Standards and Technology publication, NOST 100-2.0.", "raise IndexError, 'Index %s out of range.' % indx elif isinstance(indx, slice): indx", "BSCALE of the data bzero: BZERO of the data parbscales: list of bscales", "isinstance(key, tuple): _key = key[0] _ver = key[1] else: _key = key _ver", "or # scientific notation. One for FSC and one for non-FSC (NFSC) format:", "def __init__(self, name, mode='copyonwrite', memmap=0): if mode not in _python_mode.keys(): raise \"Mode '%s'", "in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the memmap object, it is", "an IOError exception is raised and the data is not written. Once sufficient", "at the wrong place (card %d).\" % (keywd, _index) fix_text = \"Fixed by", "== False): hdr['extend'] = True else: if hdr['naxis'] == 0: hdr.update('extend', True, after='naxis')", "(keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU,", "referenced If the optional keyword 'header' is set to True, this function will", "already exist. Use the directory of the input file and the base name", "_fits2rec[option[0]] elif dtype == 'F': output_format = 'f8' else: raise ValueError, \"Illegal format", "= 'big' # scale by TSCAL and TZERO if _scale or _zero: for", "elif isinstance(key, str): # try to find exact match first try: indx =", "the HDUList back to the file (for append and update modes only). output_verify:", "or 'columns' attribute. The data of random group FITS file will be like", "randomGroups = self.header.get('GROUPS','F') if simple == 'T' and randomGroups == 'T': groups =", "= _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del", "are for extension specification. They are flexible and are best illustrated by examples:", "of all keywords from the CardList.\"\"\" return map(lambda x: getattr(x,'key'), self) def index_of(self,", "npars = len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero:", "the keyword is undefined. The comment field will # return a match if", "already exists in header.' % newkey _index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value", "an integer or string. If integer, it is the index in the list.", "(an HDU object).\"\"\" tmp = hdu.columns # get the right shape for the", "field method), it will try to match the exact name first, so in", "a (raw) string. It will pad the string if it is not the", "hdr def getdata(filename, *ext, **extkeys): \"\"\"Get the data from an extension of a", "nx): \"\"\"Unwrap the X format column into a Boolean array. input: input Uint8", "self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO']", "with SIMPLE or XTENSION' for i in range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length])", "is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes) if _bytes", "else: raise ValueError, \"parameter value must be a sequence with %d arrays/numbers.\" %", "else: for hdu in self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver", "self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr == 'data': self.__dict__[attr]", "= _card.comment if isinstance(_comm, str) and _comm != '': longstring = longstring +", "if the column data is not NDarray, make it to be one, i.e.", "cname+'s') del attr[indx] del self._arrays[indx] self._nfields -= 1 def change_attrib(self, col_name, attrib, new_value):", "space already in memory else: self.data = raw_data if self._bscale != 1: num.multiply(self.data,", "dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) #", "== True: self._hdutype = PrimaryHDU else: self._hdutype = _ValidHDU elif cards[0].key == 'XTENSION':", "= Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key) if _key == 'END': break def", "header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename, 'update', *ext,", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "keyword value or comment from the card image.\"\"\" # for commentary cards, no", "cards specific to a certain kind of header. Strip cards like SIMPLE, BITPIX,", "endorsed by the International Astronomical Union in 1999 and mandated by NASA as", "are used only for the first case. bitpix: data type as expressed in", "raw_data._byteorder = 'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes:", "\"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if isinstance(input,", "specified by before or after. The argument `before' takes precedence over `after' if", "format %s\" % fmt else: if dtype == 'a': output_format = option+_rec2fits[dtype] elif", "if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image extension') else: c0 = Card('SIMPLE',", "copy(self): \"\"\"Make a copy of the Header.\"\"\" tmp = Header(self.ascard.copy()) # also copy", "memmap object, it is designed to use an independent # attribute of mmobject", "when the extension is a TableHDU containing ASCII data. \"\"\" def __init__(self, data=None,", "None: dim = `self.header['NAXIS']` if dim == '0': dim = '' self.header.update('EXTEND', True,", "in the object array are consistent. if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try:", "all keywords from the CardList.\"\"\" return map(lambda x: getattr(x,'key'), self) def index_of(self, key,", "isinstance(self, GroupsHDU): _gcount = ' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else:", "parsed from the card image.\"\"\" head = self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] =", "def __init__(self, name=None, format=None, unit=None, null=None, \\ bscale=None, bzero=None, disp=None, start=None, \\ dim=None,", "self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch # for card", "HDU to a new file. This is a convenience method to provide a", "self._tbtype = tbtype if isinstance(input, ColDefs): self.data = [col.copy() for col in input.data]", "of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes = naxis*[0] for j in range(naxis): axes[j]", "_ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more than one 80-char \"physical\" cards, the cards", "output_verify = 'exception' self.verify(option=output_verify) # check if the output file already exists if", "elif isinstance(_start, (int, long)): _start = _normalize(_start, naxis) else: raise IndexError, 'Illegal slice", "if the primary header needs the keyword EXTEND or if it has the", "-1 except: offset = len(input) # check for one word longer than strlen,", "The key can be an integer or string. If integer, it is the", "now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data is not", "_err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards begins with HIERARCH which", "TableHDU): for i in range(_tfields): del self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS header", "self._convert[indx]) if _zero: self._convert[indx] += bzero elif _bool: self._convert[indx] = num.equal(dummy, ord('T')) else:", "val_list[i] output = output + '%-80s' % (headstr + valstr) # do the", "keys: ext = ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2", "self[j].name if isinstance(_name, str): _name = _name.strip().upper() if _name == _key: # if", "self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8: if option in ['exception', 'warn']:", "eqStr = '' if self.__dict__.has_key('value'): valStr = str(self.value) # put all parts together", "Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None or self.key in Card._commentaryKeys: return result else:", "else: return input + ' ' * (Card.length-strlen) # minimum length is 80", "+ self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS HDU header part.\"\"\" blocks = repr(hdu.header.ascard)", "not the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update() return", "_extractKey(self): \"\"\"Returns the keyword name parsed from the card image.\"\"\" head = self._getKeyString()", "real.group('digt').translate(_fix_table2, ' ') if real.group('sign') == None: _val = eval(_rdigt) else: _val =", "self._ncards() for i in range(ncards): # take each 80-char card as a regular", "elif self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx] else: continue # ASCII table, convert", "_commonNames: value = eval(cname) # get the argument's value keyword = _keyNames[_commonNames.index(cname)] if", "= re.compile('END'+' '*77) _hdrLoc = self.__file.tell() # Read the first header block. block", "_max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap the X format column", "False. \"\"\" if (len(self) == 0): print \"There is nothing to write.\" return", "UInt8 case _zero = min _scale = (max - min) / (2.**8 -", "+= block block = self.__file.read(_blockLen) if block == '': break else: break hdu._raw", "is the scaled (physical) array. self._parent = input self._convert = [None]*self._nfields self.names =", "in the case there is extra space after the last HDU or corrupted", "self['TFIELDS'] for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i in", "@param filename: input FITS file name @type key: string @param key: keyword name", "for the parameters \"\"\" if isinstance(input, num.NumArray): _formats = '' _cols = []", "HDU\" # make sure the EXTEND keyword is in primary HDU if there", "'copyonwrite':'c', 'update':'r+'} TRUE = True # deprecated FALSE = False # deprecated _INDENT", "a tmp file, # delete the original file, and rename the tmp to", "Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\" Do you mean: \"Profits\"?", "input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ') if numr.group('sign')", "type %s' % type(key) def copy(self): \"\"\"Make a (deep)copy of the CardList.\"\"\" cards", "_blockLen) != 0: raise IOError, 'Header size is not multiple of %d: %d'", "type if type is None: type = self.NumCode[self._bitpix] _type = getattr(num, type) #", "\"XYZ\" and no other field name is a case variant of \"XYZ\", then", "FITS BITPIX value (8, 16, 32, 64, -32, or -64) pardata: parameter data,", "PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if", "as 0th HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)", "_zero: dummy -= bzero if _scale: dummy /= bscale elif self._coldefs._tbtype == 'TableHDU':", "= [i.lower() for i in parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names))", "out the heap of variable length array columns # this has to be", "header self.data = data self._xtn = ' ' def __setattr__(self, attr, value): \"\"\"Set", "\"XYZ\", then field('xyz'), field('Xyz'), etc. will get this field. \"\"\" if isinstance(key, (int,", "= [] blocks = self._raw if (len(blocks) % _blockLen) != 0: raise IOError,", "0 and val <= 999\", 0, option, _err) naxis = self.header.get('NAXIS', 0) if", "1] nbytes = ((nx-1) / 8) + 1 for i in range(nbytes): _min", "this list of conditions and the following disclaimer. 2. Redistributions in binary form", "the data attribute.\"\"\" if attr == 'section': return Section(self) elif attr == 'data':", "in range(_nfields): dict[col]['array'] = Delayed(input, col) # now build the columns tmp =", "raise AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns the size (in bytes) of the", "be placed. The argument `before' takes precedence over `after' if both specified. default=None.", "tmp = Column(format='I') # just use a throw-away format tmp.__dict__=self.__dict__.copy() return tmp class", "can be the header associated with the data. If the 3rd argument is", "else: if option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option ==", "if _scale or _zero: _arr = tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if _scale:", "'Illegal index %s' % indx def _normalize_slice(input, naxis): \"\"\"Set the slice's start/stop in", "a different table type' elif isinstance(input, FITS_rec): # input is a FITS_rec tmp", "> 2: raise ValueError, \"too many positional arguments\" elif n_ext1 == 1: if", "content is written self.__file.flush() return loc def writeHDUdata(self, hdu): \"\"\"Write FITS HDU data", "_text = \"Unfixable error: %s\" % _text else: exec(fix) #if option != 'silentfix':", "mode in ['update', 'append']: raise \"Writing to gzipped fits files is not supported\"", "is DELAYED): return self.data = data # update the header self.update_header() self._bitpix =", "j in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data, GroupData):", "\"\"\"For X format in binary tables.\"\"\" pass class _FormatP(str): \"\"\"For P format in", "_width - 1 attr[i] = _end - last_end last_end = _end self._width =", "search from the end. \"\"\" if isinstance(key, (int, long)): return key elif isinstance(key,", "attributes after updating self._resize = 0 for hdu in self: hdu.header._mod = 0", "\"\"\" list = [] _nblanks = input.count(' ') nmax = max(_nblanks, len(input)/strlen+1) arr", "_nblanks = input.count(' ') nmax = max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1)", "IndexError: raise IndexError, 'No data in this HDU.' if _data is None: raise", "are best illustrated by examples: No extra arguments implies the primary header >>>", "ext = ext2['ext'] elif n_ext2 == 2 and 'extver' in keys: ext =", "\"\"\"Construct a Card object from a (raw) string. It will pad the string", "None: return # Determine the destination (numarray) data type if type is None:", "pos, card, useblanks=1): \"\"\"Insert a Card to the CardList. pos: The position (index,", "gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif", "to a tmp file, # delete the original file, and rename the tmp", "_index = self.ascard._keylist.index(key) return 1 except: return 0 def rename_key(self, oldkey, newkey, force=0):", "self.update() elif data is None: pass else: raise TypeError, \"table data has incorrect", "# instead of issuing an error. The FITS standard # appears vague on", "with or without modification, are permitted provided that the following conditions are met:", "writeHDU(self, hdu): \"\"\"Write *one* FITS HDU. Must seek to the correct location before", "update_extend(self): \"\"\"Make sure if the primary header needs the keyword EXTEND or if", "not None: _valStr = numr.group('sign')+_valStr elif input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr", "[128, 64, 32, 16, 8, 4, 2, 1] nbytes = ((nx-1) / 8)", "self.__dict__[attr] = value def _verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option) # Verify location", "def _get_index(nameList, key): \"\"\" Get the index of the key in the name", "del self[-1] # it also delete the keylist item def keys(self): \"\"\"Return a", "other materials provided with the distribution. 3. The name of AURA and its", "(self, option='warn'): _text = '' _err = _ErrList([], unit='HDU') # the first (0th)", "_format += self.header['TFORM'+`j+1`] + ', ' _format = _format[:-2] + ']' _dims =", "setattr(self, cname, value.value) else: setattr(self, cname, value) # if the column data is", "\"Unfixable error: %s\" % _text else: exec(fix) #if option != 'silentfix': _text +=", "that a # string should not end with two single quotes, # whereas", "bscale, bzero = bzero)) self._coldefs = ColDefs(_cols) self.parnames = [i.lower() for i in", "\"input to ColDefs must be a table HDU or a list of Columns\"", "check to see # if we were provided with a Primary Header. If", "always fix silently the case where \"=\" is before column 9, # since", "eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ')", "this field. \"\"\" if isinstance(key, (int, long)): indx = int(key) elif isinstance(key, str):", "are flexible: the 3rd argument can be the header associated with the data.", "_arr = tmp._arrays[i] if _scale: _arr *= bscale if _zero: _arr += bzero", ">>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an", "= self.header['EXTNAME'] self.name = name def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err =", "(int, long)): return x else: return ColDefs(x) def __len__(self): return len(self.data) def __repr__(self):", "definition of) one Column.\"\"\" indx = _get_index(self.names, col_name) for cname in _commonNames: attr", "of all HDU's into memory.\"\"\" for i in range(len(self)): if self[i].data is not", "self).append(hdu) hdu._new = 1 self._resize = 1 else: raise \"HDUList can only append", "8, 'array data type'), Card('NAXIS', 2, 'number of array dimensions'), Card('NAXIS1', 0, 'length", "# input is a list of Columns tmp = hdu.columns = ColDefs(input, tbtype)", "self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard def items(self): \"\"\"Return a list", "0 size = 1 for j in range(groups,naxis): size = size * self.header['NAXIS'+`j+1`]", "no equal sign, return the string after column 8. \"\"\" eqLoc = self._locateEq()", "\"\"\"Construct a primary HDU. data: the data in the HDU, default=None. header: the", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "dims[groups:]) size = abs(bitpix) * gcount * (pcount + datasize) / 8 if", "_fixValue(self, input): \"\"\"Fix the card image for fixable non-standard compliance.\"\"\" _valStr = None", "1 def __str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard def", "hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names,", "is a list of corresponding attribute values from all Columns. \"\"\" def __init__(self,", "both header and data are copied.\"\"\" # touch the data, so it's defined", "# used for lazy instantiation of data ASCIITNULL = 0 # value for", "_tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize # comment out to avoid", "None: found = j nfound += 1 else: # if the keyword EXTVER", "the 3rd argument is not a header, it (and other positional arguments) are", "= output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self, val): \"\"\"Verify val to be printable", "_FormatP, _FormatX, _VLF \"\"\" \"\"\" Do you mean: \"Profits\"? - Google Search, when", "for j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap", "of the keyword, or index of the Card after which the new card", "HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new = 1 self._resize = 1 else:", "is no exact name matched, it will try to match the name with", "(string, integer). \"\"\" if isinstance(key, (int, slice)): return key elif isinstance(key, tuple): _key", "+ ' ' * (Card.length-strlen) # minimum length is 80 else: strlen =", "leading zeros from numbers, otherwise # Python might evaluate them as octal values.", "for HDUList.\" for hdu in hdus: if not isinstance(hdu, _AllHDU): raise \"Element %d", "HDUList.\"\"\" if self.__file is None: _name = '(No file associated with this HDUList)'", "1: return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal index", "Card by indexing or by the keyword name.\"\"\" if isinstance (value, Card): _key", "mode=mode) n_ext1 = len(ext1) n_ext2 = len(ext2) keys = ext2.keys() # parse the", "_formats = (_fmt+',') * npars data_fmt = '%s%s' % (`input.shape[1:]`, _fmt) _formats +=", "_data._heapoffset = hdu._theap + hdu._datLoc _data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap =", "'== 3', _isInt+\" and val == 0\", 0, option, _err) _after = self.header['NAXIS']", "_FormatP(str): \"\"\"For P format in variable length table.\"\"\" pass # TFORM regular expression", "or table data.' else: hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename, data, header=None, **keys):", "0): _after = 'naxis' else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after =", "opener # class to the urllibrary urllib._urlopener.tempcache = {} # Initialize tempcache with", "if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: # try to convert to a", "file name @type: string @param ext: The rest of the arguments are for", "i in range(len(self)): if i > 0 and (not isinstance(self[i], _ExtensionHDU)): err_text =", "% input _stop = input.stop if _stop is None: _stop = naxis elif", "expected by the header, a TypeError exception is raised. \"\"\" if self.writeComplete: raise", "# beginning of the header area hdu._datLoc = self.__file.tell() # beginning of the", "the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'BINTABLE' hdr =", "type.' if 'header' in extkeys: header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist,", "isinstance(_card.value, str): raise ValueError, 'Cards with CONTINUE must have string value.' if name", "self.__dict__[name] = attr return self.__dict__[name] \"\"\" # make sure to consider the case", "# pass these attributes hdu._file = self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc", "a COMMENT card. value: Comment text to be added. before: [same as in", "in range(1, _max+1): if _where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ':", "= dat_format, bscale = _bscale, bzero = _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape =", "nested list structure constructed by error messages generated by verifications at different class", "to be used? default=0. \"\"\" # instantiate a FITS file object (ffo) ffo", "for card in self.ascard: pairs.append((card.key, card.value)) return pairs def has_key(self, key): \"\"\"Check for", "result:' print x if _option == 'exception' and x: raise VerifyError def _pad(input):", "__str__ has only one argument. \"\"\" result = \"\" element = 0 #", "- _where hdu.data._heapsize = _shift - hdu.data._gap _size = _size + _shift #", "if _str is not None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct a Card object", "pick out column definition keywords dict = [{} for i in range(_nfields)] #", "in _format: self._parent.field(indx).sub('E', 'D') # binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy =", "raise VerifyError, '\\n'+x if (_option != \"silentfix\") and x: print 'Output verification result:'", "in ['TDISP', 'TDIM', 'THEAP']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype ==", "0] or bscale not in ['', None, 1]: array = array.copy() if bzero", "1 _end = self.starts[i] + _width - 1 attr[i] = _end - last_end", "open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open a FITS file and return an", "input arrays can be just list or tuple, not required to be NDArray", "how_many): if self._blanks > 0: for i in range(min(self._blanks, how_many)): del self[-1] #", "tmp def _get_scale_factors(self, indx): \"\"\" Get the scaling flags and factors for one", "verbose=0): \"\"\"Close the associated FITS file and memmap object, if any. output_verify: output", "the HDUList, indexed by number or name.\"\"\" key = self.index_of(key) _item = super(HDUList,", "If useblanks != 0, and if there are blank cards directly before END,", "= self.header.get('NAXIS', 0) # for random group image, NAXIS1 should be 0, so", "data is supplied, a minimal header is created @type filename: string @param filename:", "is not a header, it (and other positional arguments) are assumed to be", "_gethdr = False hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext]", "= open(filename, mode='update') f.append(hdu) f.close() def update(filename, data, *ext, **extkeys): \"\"\"Update the specified", "existing file '%s'.\" % name os.remove(name) else: raise IOError, \"File '%s' already exist.\"", "1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes =", "name %s already exists.' % new_name else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name,", "TDIM keyword \"\"\" # any of the input argument (except array) can be", "_realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ') if imag.group('sign')", "must be positive.' % input else: raise IndexError, 'Illegal slice %s, step must", "self._names def copy(self): r = rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs = self._coldefs f", "and len(indx) == len(value): for i in range(len(indx)): self.field(indx[i])[:] = value[i] else: raise", "not defined.' % name self.__dict__[name] = attr return self.__dict__[name] \"\"\" # make sure", "\"\"\"Returns the table's column definitions.\"\"\" return self.columns def update(self): \"\"\" Update header keywords", "need to deal with byte order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big':", "isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def", "# Get the name of the current thread and determine if this is", "open(filename, mode='update') f.append(hdu) f.close() def update(filename, data, *ext, **extkeys): \"\"\"Update the specified extension", "results = results + \"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1] print results def", "= [0]*naxis for i in range(naxis): mo = re_naxisn.search(block, pos) pos = mo.end(0)", "self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount = ' %d Groups %d Parameters' % (self.header['GCOUNT'],", "#self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\" Get the table data from input (an", "None: npars = 0 else: npars = len(pardata) if parbscales is None: parbscales", "FITS_rec and its ._parent def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key) if isinstance(key,", "be initialized till the HDU is accessed. \"\"\" def _getname(self): \"\"\"Get the extname", "and update the keywords of BSCALE and BZERO in self.header. This method should", "1 else: # if the keyword EXTVER does not exist, default it to", "# this can be reset by user. _isInt = \"isinstance(val, (int, long))\" #", "range(naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1)", "== 'key': self._extractKey() elif name in ['value', 'comment']: self._extractValueComment(name) else: raise AttributeError, name", "val = val.upper() if val == 'END': raise ValueError, \"keyword 'END' not allowed\"", "has invalid value '%s'.\" % (keywd, val) fix_text = \"Fixed by setting a", "_normalize(_start, naxis) else: raise IndexError, 'Illegal slice %s, start must be integer.' %", "num.getType(dtype).bytes for i in range(len(input)): if dtype == 'a': data_output[i] = chararray.array(input[i], itemsize=1)", "in range(_nfields)] # definition dictionaries for each field for _card in hdr.ascardlist(): _key", "valStr = '%-20s' % valStr # must be before int checking since bool", "fromstring(self, input): \"\"\"Construct a Card object from a (raw) string. It will pad", "array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate the starting point", "result += _INDENT*tab+\"%s\\n\" % item # second time go through the next level", "= key + (slice(None),) * (naxis-len(key)) offset = 0 for i in range(naxis):", "= num.getType(dtype).bytes for i in range(len(input)): if dtype == 'a': data_output[i] = chararray.array(input[i],", "= [self[i] for i in indx] return ColDefs(tmp) def _setup(self): \"\"\" Initialize all", "the input column definitions.\"\"\" \"\"\" input: a list of Columns or a ColDefs", "key): \"\"\"Delete card(s) with the name 'key'.\"\"\" # delete ALL cards with the", "sign in the card image and return the string after the equal sign.", "image and return the string after the equal sign. If there is no", "out of bound or not found.' % key self._resize = 1 def __delitem__(self,", "raise ValueError, 'Option %s not recognized.' % option if (_option == \"ignore\"): return", "to tables \"\"\" def __init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None):", "= input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr == 'data': self.__dict__[attr] =", "_bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose: print \"One or more header", "if reverse = 1. \"\"\" fmt = input_format (repeat, dtype, option) = _parse_tformat(fmt)", "the first field, and field('ABC') will get the second field. If there is", "num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset _offset += len(data_output[i]) * _nbytes", "\"\"\" if isinstance (card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) # update the", "_ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"Construct an", "for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx in range(self._nfields):", "value: keyword value, default=''. comment: comment, default=''. \"\"\" if key != '' or", "option='warn'): _err = _ErrList([], unit='Card') isValid = \"val in [8, 16, 32, 64,", "name: output FITS file name to be written to. output_verify: output verification option,", "cell with value = TNULL # this can be reset by user. _isInt", "list of Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards = cards # if the", "if self.__file.mode == 'append': for hdu in self: if (verbose): try: _extver =", "attributes hdu._file = self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan", "since a greedy match will find a single-quote after # the comment separator", "ext = ext2['extname'], ext2['extver'] else: ext = ext2['extname'] else: raise KeyError, 'Insufficient keyword", "_keylist. self._checkKey(self.key) # verify the value, it may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString())", "or self.__dict__.has_key('_cardimage')): valStr = '' # string value should occupies at least 8", "moving it to the right place (card %d).\" % insert_pos fix = \"_cards=self.header.ascard;", "format: # NFSC allows lower case of DE for exponent, allows space between", "# Handle gzip files if mode in ['update', 'append']: raise \"Writing to gzipped", "from an extension of a FITS file. @param filename: input FITS file name", "and x.find('Unfixable') != -1: raise VerifyError, '\\n'+x if (_option != \"silentfix\") and x:", "it has methods to change # the content of header without being able", "\"\"\" def __init__(self, name=None, format=None, unit=None, null=None, \\ bscale=None, bzero=None, disp=None, start=None, \\", "self.key in Card._commentaryKeys: if not isinstance(self.value, str): raise ValueError, 'Value in a commentary", "= '%-8s' % self.key else: keyStr = ' '*8 # value string #", "a TypeError exception is raised. \"\"\" if self.writeComplete: raise IOError, \"The stream is", "long)): if indx >= 0 and indx < naxis: if naxis > 1:", "allows lower case of DE for exponent, allows space between sign, # digits,", "= 0. \"\"\" # Get the name of the current thread and determine", "Card._commentaryKeys): raise ValueError, 'Regular and commentary keys can not be renamed to each", "0) and (newkey in self.ascard._keylist): raise ValueError, 'Intended keyword %s already exists in", "else: # construct a list of cards of minimal header _list = CardList([", "self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s", "_err) return _err # 0.8.8 def _iswholeline(indx, naxis): if isinstance(indx, (int, long)): if", "insensitivity. So, in the last example, field('Abc') will cause an exception since there", "else: raise ValueError, \"Data is inconsistent with the format `%s`.\" % format else:", "break # construct the Header object, using the cards. try: header = Header(CardList(_cardList,", "header will be added as the first extension. If the file does already", "tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart - _tbsize", "in \"update/append\" mode # CardList needs its own _mod attribute since it has", "helps delete leading zeros from numbers, otherwise # Python might evaluate them as", "= (threading.activeCount() == 1) and (threadName.getName() == 'MainThread') if singleThread: # Define new", "ValueError, 'keyword name %s is too long (> 8), use HIERARCH.' % val", "if option == 'fix': self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s'", "except: pass def info(self): \"\"\"Summarize the info of the HDU's in this HDUList.\"\"\"", "memory else: self.data = raw_data if self._bscale != 1: num.multiply(self.data, self._bscale, self.data) if", "def http_error_default(self, url, fp, errcode, errmsg, headers): raise IOError, (errcode, errmsg, url) urllib._urlopener", "the same as 7A in # binary table, so both will produce 'a7'.", "key): dims = [] if not isinstance(key, tuple): key = (key,) naxis =", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "self.__dict__['comment'] = '' return valu = self._check(option='parse') if name == 'value': if valu", "# update the 3rd extension >>> update(file, dat, hdr, 3) # update the", "\\ (self.name, type, len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\"", "maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg>", "if not resized, update in place else: for hdu in self: if (verbose):", "The default is \"all\" which will print out all attributes. It forgives plurals", "It appears to find the # end of a string rather well, but", "_fix_table2 = maketrans('dD', 'eE') class Card(_Verify): # string length of a card length", "input else: raise ValueError, 'column definitions have a different table type' elif isinstance(input,", "raise IndexError, 'No data in this HDU.' if _data is None: raise IndexError,", "__getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out = tmp out._parent", "comment if self.key in Card._commentaryKeys: if not isinstance(self.value, str): raise ValueError, 'Value in", "_data is None and isinstance(_ext, _Zero): try: hdu = hdulist[1] _data = hdu.data", "= value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr]", "cardList.append(_card) keyList.append(_key) if _key == 'END': break def _readHDU(self): \"\"\"Read the skeleton structure", "convert strings to numbers if self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32,", "* gcount * (pcount + size) / 8 return size def _verify(self, option='warn'):", "def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list,", "+= ' ' + fix_text return _text def verify (self, option='warn'): \"\"\"Wrapper for", "_isInt+\" and val == 0\", 0, option, _err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS", "self.req_cards(firstkey, '== 0', '', firstval, option, _err) self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid,", "= 0 elif name == 'comment': self.__dict__['comment'] = '' if valu is not", "content is written self.__file.flush() # return both the location and the size of", "it will be written to the beginning of the file. If the file", "'Illegal slice %s, stop must be integer.' % input if _stop < _start:", "\"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val == 0', 0,", "\"\"\"Check the existence, location, and value of a required Card.\"\"\" \"\"\"If pos =", "isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element %s is not an extension HDU.\" %", "isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx]", "_wrapx(input, output, nx): \"\"\"Wrap the X format column Boolean array into an UInt8", "All attributes except format can be optional. name: column name, corresponding to TTYPE", "FITS_rec): # input is a FITS_rec tmp = hdu.columns = input._coldefs else: #", "indx in range(self._nfields): if (self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx),", "to stream data to a FITS file instead of requiring data to all", "the first extension. If the file does already exist, but the provided header", "gcount * (pcount + datasize) / 8 if simple and not groups: name", "input is a list of Columns elif isinstance(input, (list, tuple)): for col in", "new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table from the input", "comment field will # return a match if the comment separator is found,", "_tfields = self['TFIELDS'] del self['NAXIS'] for i in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype,", "format spec, i.e. A7 in ASCII table is the same as 7A in", "== 0: if dtype in _fits2rec.keys(): # FITS format if dtype == 'A':", "8 characters. \"\"\" def _verify(self, option='warn'): \"\"\"No verification (for now).\"\"\" return _ErrList([]) class", "if _step <= 0: raise IndexError, 'Illegal slice %s, step must be positive.'", "value[i] else: raise ValueError, \"parameter value must be a sequence with %d arrays/numbers.\"", "*([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' +", "be written\" curDataSize = self._ffo.getfile().tell() - self._datLoc if curDataSize + data.itemsize()*data._size > self._size:", "%s\" % (x, _width[indx]) else: self._parent.field(indx)[i] = x if 'D' in _format: self._parent.field(indx).sub('E',", "None @param header: the header associated with 'data', if None, an appropriate header", "longstring = longstring + _comm.rstrip() + ' ' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self):", "coldata = hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file) _shift = self.__file.tell() - _where", "if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name):", "to be raised when a file specified by a URL cannot be accessed\"\"\"", "= offset return list class Header: \"\"\"FITS header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct", "# if not memmap, use the space already in memory else: self.data =", "misalignment. \"\"\" if isinstance(value, num.NumArray) and value.type() == self._dtype: pass elif isinstance(value, chararray.CharArray)", "dtype+option in _rec2fits.keys(): # record format _repeat = '' if repeat != 1:", "the respective HDU classes, # so the checking is in order, in case", "class ColDefs(object): \"\"\"Column definitions class. It has attributes corresponding to the Column attributes", "the amount of data specified in the header provided to the class constructor", "header: the header to be used (as a template), default=None. If header=None, a", "of shape (s, nx) nx: number of bits \"\"\" pow2 = [128, 64,", "= hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize # comment out", "block = self.__file.read(_blockLen) if block == '': raise EOFError hdu = _TempHDU() hdu._raw", "card to be FITS standard.: %s' % self.key # verify the key, it", "print \"update header in place: Name =\", hdu.name, _extver if 'data' in dir(hdu):", "of EXTNAME and EXTVER, as separate arguments or as a tuple: >>> getdata('in.fits',", "is a case variant of \"XYZ\", then field('xyz'), field('Xyz'), etc. will get this", "IndexError, 'Illegal slice %s, start must be integer.' % input _stop = input.stop", "for hdu in hdus: if not isinstance(hdu, _AllHDU): raise \"Element %d in the", "indexed by number or name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1", "HDU, both header and data are copied.\"\"\" if self.data is not None: _data", "if there is extension if len(self) > 1: self.update_extend() def index_of(self, key): \"\"\"Get", "Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val) self.__dict__['_fixable'] =", "self._xtn self.header.ascard[0].comment = 'ASCII table extension' ''' def format(self): strfmt, strlen = '',", "column right after the last field if self._tbtype == 'TableHDU': last_end = 0", "= 64 output = '' # do the value string valfmt = \"'%-s&'\"", "valStr + commentStr # need this in case card-with-continue's value is shortened if", "self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos, card, useblanks=1): \"\"\"Insert a Card to the", "'== '+`j`, _isInt+\" and val>= 0\", 1, option, _err) # verify each card", "err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each element calls their own verify for i", "by the International Astronomical Union in 1999 and mandated by NASA as the", "self.__file.flush() loc = self.__file.tell() _size = 0 if hdu.data is not None: #", "selected option.\"\"\" _text = err_text if not fixable: option = 'unfixable' if option", "try: return self[key] except: return default def update(self, key, value, comment=None, before=None, after=None):", "chararray.CharArray, Delayed)): try: # try to convert to a numarray first array =", "def copy(self): \"\"\"Make a copy of the table HDU, both header and data", "header and appended to the end of the file. \"\"\" self.header = header.copy()", "+ strlen # collect the pieces in a list tmp = input[xoffset:offset] list.append(tmp)", "n_ext2 == 0: ext = ext1[0] else: if isinstance(ext1[0], (int, tuple)): raise KeyError,", "if \".\" not in valueStr and \"E\" not in valueStr: valueStr += \".0\"", "array: %s\" % array array._dtype = recfmt._dtype else: raise ValueError, \"Data is inconsistent", "_ext = _getext(filename, 'update', *ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close() def info(filename): \"\"\"Print", "# the original dummy = self.field(i) if self._convert[i] is not None: out._convert[i] =", "from string import maketrans import copy import signal import threading # Module variables", "_bscale = self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format, bscale", "the header provided to the class constructor may be written to the stream.", "__repr__(self): return self._cardimage def __getattr__(self, name): \"\"\" instanciate specified attribute object.\"\"\" if name", "_pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch # for card image longer", "to FITS standard') _list = CardList([ c0, Card('BITPIX', 8, 'array data type'), Card('NAXIS',", "numarray shape if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape", "or -64) pardata: parameter data, as a list of (numeric) arrays. parnames: list", "= ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] =", "self.ascard[key] self._mod = 1 def __str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\"", "newkey def get(self, key, default=None): \"\"\"Get a keyword value from the CardList. If", "only), corresponding to TBCOL keyword dim: column dimension corresponding to TDIM keyword \"\"\"", "8).' raise ValueError, self._err_text, '\\n%s' % self._cardimage elif option in ['fix', 'silentfix']: result", "the 'BITPIX', 'NAXIS', or 'END' cards. A corrupted HDU usually means that the", "in [8, 16, 32, 64, -32, -64]\" # Verify location and value of", "explanations/examples. @rtype: L{Header} object @return: header \"\"\" hdulist, _ext = _getext(filename, 'readonly', *ext,", "it will try to match the exact name first, so in the example", "range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format", "+ size else: strfmt = '>' + strfmt[:-1] return strfmt ''' def _verify(self,", "byteswap little endian arrays before writing # output = data.byteswapped() else: output =", "== 1) and (threadName.getName() == 'MainThread') if singleThread: # Define new signal interput", "written to a file. name: output FITS file name to be written to.", "input_format return (dtype, width) def _get_index(nameList, key): \"\"\" Get the index of the", "= '= ' if keyStr.strip() in Card._commentaryKeys: # not using self.key eqStr =", "col = eval(_key.group('num')) if col <= _nfields and col > 0: cname =", "dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder =", "corresponding indentations. A tricky use of __str__, since normally __str__ has only one", "'\\n' return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from", "multiply the length of all remaining axes else: offset *= _naxis if dims", "scale the array back to storage values if there is bscale/bzero if isinstance(array,", "keyword argument(s): %s' % ext2 if isinstance(ext1[0], str): if n_ext2 == 1 and", "% input if _stop < _start: raise IndexError, 'Illegal slice %s, stop <", "exist, but the provided header represents a Primary header, the header will be", "break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break # combine contiguous CONTINUE cards with", "in input.data] # if the input is a list of Columns elif isinstance(input,", "tbtype == 'TableHDU': for i in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if width", "%d extensions of %s' % (nfound, `key`) else: return found def readall(self): \"\"\"Read", "self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new) card image from", "data(*). (*) In future it may be possible to decipher where the last", "self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale =", "super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise IndexError, 'Extension %s is out of bound", "_pos = '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err) self.req_cards('PCOUNT', _pos, _isInt,", "1) and (threadName.getName() == 'MainThread') if singleThread: # Define new signal interput handler", "80 # String for a FITS standard compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]*", "find out how many blank cards are *directly* before the END card self._blanks", "= self.starts[i] + _width - 1 attr[i] = _end - last_end last_end =", "= self[j]._extver if _ver == _extver: found = j nfound += 1 if", "< len(key): raise IndexError, 'too many indices.' elif naxis > len(key): key =", "of range error for BZERO = +32768 self.header.update('BZERO', _zero) else: del self.header['BZERO'] if", "the whole card must have string value. \"\"\" def __str__(self): \"\"\"Format a list", "= _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal sign in the card", "if (verbose): print \"append HDU\", hdu.name, _extver hdu._new = 0 elif self.__file.mode ==", "type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale by TSCAL and TZERO if _scale", "the 5th extension \"\"\" # parse the arguments header = None if len(ext)", "a keyword name or index. \"\"\" if before != None: loc = self.index_of(before)", "0 mo = re_groups.search(block) if mo and simple: groups = 1 else: groups", "for updating The rest of the arguments are flexible: the 3rd argument can", "if header is None: raise ValueError, \"No header to setup HDU.\" # if", "is a string, it must be of the syntax of \"> n\", #", "_rec2fits.keys(): # record format _repeat = '' if repeat != 1: _repeat =", "= hdu._theap - _tbsize # comment out to avoid circular reference of _pcount", "not isinstance(ext[0], (int, long, str, tuple)): raise KeyError, 'Input argument has wrong data", "must be a Header object\" if data is DELAYED: # this should never", "file name to be written to. output_verify: output verification option, default = 'exception'.", "Name of the FITS file to be opened. mode: Open mode, 'readonly' (default),", "\"\"\" # parse the arguments header = None if len(ext) > 0: if", "consider platform dependence of the format (e.g. E-009 vs. E-09) elif isinstance(self.value, float):", "else: self.ascard.append(Card(key, value, comment)) self._mod = 1 def add_history(self, value, before=None, after=None): \"\"\"Add", "= _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try: return self.__dict__[attr] except KeyError: raise AttributeError(attr)", "blocks = blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0: raise IOError self.__file.flush()", "+ self.comment else: commentStr = '' # equal sign string eqStr = '=", "deprecated _INDENT = \" \" DELAYED = \"delayed\" # used for lazy instantiation", "for i in range(1, len(self)): if str(self[-i]) != ' '*Card.length: self._blanks = i", "value, corresponding to TZERO keyword disp: display format, corresponding to TDISP keyword start:", "size = self.size() if size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs = self.columns else:", "module. @group Header-related Classes: Card, CardList, _Card_with_continue, Header, _Hierarch @group HDU Classes: _AllHDU,", "= TNULL # this can be reset by user. _isInt = \"isinstance(val, (int,", "if test: val = self.header[keywd] if not eval(test): err_text = \"'%s' card has", "'THEAP']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for i", "= hdulist[_ext] _data = hdu.data if _data is None and isinstance(_ext, _Zero): try:", "*\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys of commentary cards", "find a single-quote after # the comment separator resulting in an incorrect #", "True, 'conforms to FITS standard') _list = CardList([ c0, Card('BITPIX', 8, 'array data", "bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt: code, width,", "comment string in another. Also, it does not break at the blank space", "the field method), it will try to match the exact name first, so", "r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC + ') *\\))' r')?", "to the new table for i in range(len(tmp)): if tmp._arrays[i] is None: size", "= _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete the original file\", oldName", "memmory mapping to be used? default=0. \"\"\" # instantiate a FITS file object", "+= _INDENT*tab+\"%s\\n\" % item # second time go through the next level items,", "not None: raise ValueError, 'comment %s is not a string' % val self.__dict__['comment']", "deprecated FALSE = False # deprecated _INDENT = \" \" DELAYED = \"delayed\"", "\"\"\"FITS header card list class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct the CardList object", "isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) # print out a message only if there", "Header(self.ascard.copy()) # also copy the class tmp._hdutype = self._hdutype return tmp def _strip(self):", "for i in range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if", "non-FSC (NFSC) format: # NFSC allows lower case of DE for exponent, allows", "else: _start = _where + 1 if _keyList[_start:].count('CONTINUE') == 0: break # construct", "after != None: _card = Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key,", "stream has been filled will raise an IOError exception. If the dtype of", "if val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and isinstance(self,", "'data', if None, an appropriate header will be created for the data object", "when trying to read HDU #%d.\\n There may be extra bytes after the", "it to a tmp file, # delete the original file, and rename the", "else: comm = self.comment commfmt = \"%-s\" if not comm == '': nlines", "elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif self.data is", "_err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option,", "before=before, after=after) def add_blank(self, value='', before=None, after=None): \"\"\"Add a blank card. value: Text", "_fmt = ' '*_lead + _pc + _format[1:] + _dict[_format[0]] + ' '*_trail", "end with an even number of # quotes to be precise. # #", "(a), field('abc') will get the first field, and field('ABC') will get the second", "populate the cardlist self.ascard = CardList(cards) def __getitem__ (self, key): \"\"\"Get a header", "attributes to be a list of null strings.\"\"\" for cname in _commonNames: setattr(self,", "header=header) clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None): \"\"\"Append the", "None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update data in place: Name =\", hdu.name,", "nlines = len(comm) / comm_len + 1 comm_list = self._words_group(comm, comm_len) for i", "os.path.exists(_name): return _name else: raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass", "eqLoc = 7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the card image with", "up the open. Any header will not be initialized till the HDU is", "User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all docstrings in this", "format `%s`.\" % format else: raise ValueError, \"Must specify format to construct Column\"", "f.append(hdu) f.close() def update(filename, data, *ext, **extkeys): \"\"\"Update the specified extension with the", "strings and there # is no comment if self.key in Card._commentaryKeys: if not", "if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = '' # string value should occupies", "of shape (s, nbytes) output: output Boolean array of shape (s, nx) nx:", "\"Element %d in the ColDefs input is not a Column.\" % input.index(col) self.data", "'TableHDU' (text table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype =", "_err = PrimaryHDU._verify(self, option=option) # Verify locations and values of mandatory keywords. self.req_cards('NAXIS',", "data to be written to the file. :Returns: None Notes ----- The file", "the data portion of the HDU. :Parameters: None :Returns: size : integer The", "0 return (_str, _bool, _number, _scale, _zero, bscale, bzero) def field(self, key): \"\"\"A", "by setting a new value '%s'.\" % fix_value if fixable: fix = \"self.header['%s']", "value # if more than one group parameter have the same name, the", "\"\"\" This class is used when one or more mandatory Cards are corrupted", "= _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value def change_name(self, col_name, new_name): \"\"\"Change a", "3, header=hdr) # update the 3rd extension >>> update(file, dat, header=hdr, ext=5) #", "= None def size(self): \"\"\"Returns the size (in bytes) of the HDU's data", "hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype == tbtype: tmp = hdu.columns", "name == '_arrays': attr = [col.array for col in self.data] elif name ==", "as # the original dummy = self.field(i) if self._convert[i] is not None: out._convert[i]", "specified data size. File may have been truncated.' hdu._ffile = self return hdu", "def getfile(self): return self.__file def _readheader(self, cardList, keyList, blocks): \"\"\"Read blocks of header,", "ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other,", "- last_end last_end = _end self._width = _end else: raise KeyError, 'Attribute %s", "directory of the input file and the base name of the mktemp() output.", "= chararray.array(value, itemsize=1) else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max =", "is not an HDU.\" % hdu try: super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise", "the TFORM value into repeat, data type, and option.\"\"\" try: (repeat, dtype, option)", "extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting", "data/header. @type filename: string @param filename: name of the file to be updated", "raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS", "(keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix,", "element += 1 return result class _Verify: \"\"\"Shared methods for verification.\"\"\" def run_option(self,", "= self._coldefs.formats[indx][0] == 'A' _bool = 0 # there is no boolean in", "return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple of image", "= raw_data.copy() # if not memmap, use the space already in memory else:", "output \"data\" array of data type dtype. The descriptor location will have a", "written self.__file.flush() # return both the location and the size of the data", "__init__(self, hdu): self.hdu = hdu def __getitem__(self, key): dims = [] if not", "parameter data, as a list of (numeric) arrays. parnames: list of parameter names.", "'TDISP', 'TBCOL', 'TDIM'] # mapping from TFORM data type to numarray data type", "of data type 2Int32 dtype: data type of the variable array \"\"\" _offset", "valStr.strip() # comment string if keyStr.strip() in Card._commentaryKeys: # do NOT use self.key", "isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif self.data is None:", "in range(1, len(self)): if str(self[-i]) != ' '*Card.length: self._blanks = i - 1", "_get_index(self.names, col_name) for cname in _commonNames: attr = getattr(self, cname+'s') del attr[indx] del", "== 'columns': class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif", "None: if self.__file.memmap == 1: self.mmobject = self.__file._mm if self.__file.mode in ['append', 'update']:", "but the beginning locations are computed. \"\"\" _cardList = [] _keyList = []", "input data, either the group data itself (a numarray) or a record array", "1): return _WholeLine(naxis, 0) else: if indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start) else:", "'header' in extkeys: header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext =", "nbytes = ((nx-1) / 8) + 1 for i in range(nbytes): _min =", "%d).\" % (keywd, _index) fix_text = \"Fixed by moving it to the right", "= dim if tbtype == 'TableHDU': _formats = '' _itemsize = 0 for", "They are flexible and are best illustrated by examples: No extra arguments implies", "the card does not exist, the new card will have the fix_value as", "hdu.data._gap = _heapstart - _tbsize _pcount = hdu.data._heapsize + hdu.data._gap if _pcount >", "----- The file will be opened and the header appended to the end", "'BITPIX', 'NAXIS', or 'END' cards. A corrupted HDU usually means that the data", "string if keyStr.strip() in Card._commentaryKeys: # do NOT use self.key commentStr = ''", "in the list. If string, (a) Field (column) names are case sensitive: you", "header): \"\"\" Construct a StreamingHDU object given a file name and a header.", "0 attr = [0] * len(self) for i in range(len(self)): (_format, _width) =", "attr == '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError: raise", "number or name.\"\"\" key = self.index_of(key) _item = super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU):", "the supplied data. This argument is optional. @keyword clobber: (optional) if True and", "before=None, after=None): \"\"\"Add a blank card. value: Text to be added. before: [same", "= [] _pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale", "Redistributions in binary form must reproduce the above copyright notice, this list of", "tmp file, # delete the original file, and rename the tmp to the", "_end else: raise KeyError, 'Attribute %s not defined.' % name self.__dict__[name] = attr", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "mandatory Cards are corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or 'END' cards.", "except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards begins with", "the P format column array, both the data descriptors and the data. It", "list = _commonNames else: list = attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower() if", "def _checkText(self, val): \"\"\"Verify val to be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is", "'data' not in dir(hdu): continue if hdu.data is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements()", "value != '' or comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary", "used for lazy instantiation of data ASCIITNULL = 0 # value for ASCII", "== []: dims = [1] npt = 1 for n in dims: npt", "ValueError, \"keyword 'END' not allowed\" self._checkKey(val) else: if val[:8].upper() == 'HIERARCH': val =", "\"incorrect array type\" self.header['NAXIS'] = len(axes) # add NAXISi if it does not", "so the checking is in order, in case of required cards in wrong", "not be the column right after the last field elif tbtype == 'TableHDU':", "locations are computed. \"\"\" _cardList = [] _keyList = [] blocks = self._raw", "# the shape will be in the order of NAXIS's which is the", "= \"self.header['%s'] = %s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return", "del self['NAXIS'] for i in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND']", "objects import numarray.memmap as Memmap from string import maketrans import copy import signal", "max(self._max, len(value)) class Column: \"\"\"Column class which contains the definition of one column,", "rename to CONTINUE' if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys: if not", "out the old table definition keywords. Mark them first, # then delete from", "for variable length columns for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key =", "def get_history(self): \"\"\"Get all histories as a list of string texts.\"\"\" output =", "bscale=1, bzero=0): \"\"\"Scale image data by using BSCALE/BZERO. Call to this method will", "reconstruct another kind of header. \"\"\" try: # have both SIMPLE and XTENSION", "self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) #", "ValueError, 'Regular and commentary keys can not be renamed to each other.' elif", "return block def __str__(self): \"\"\"Format a list of cards into a printable string.\"\"\"", "and the following disclaimer. 2. Redistributions in binary form must reproduce the above", "% new_name else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): \"\"\"Change a Column's", "valStr = '%20s' % _floatFormat(self.value) else: valStr = '%20s' % self._valuestring elif isinstance(self.value,", "_keyList[_where:_where+nc] _start = _where # if not the real CONTINUE card, skip to", "self.mmobject = self.__file._mm if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close", "\"\"\"Read blocks of header, and put each card into a list of cards.", "if mo is not None: naxis = int(mo.group(1)) pos = mo.end(0) else: raise", "if loc == 0: offset = -1 except: offset = len(input) # check", "= self.__file.tell() - _where hdu.data._heapsize = _shift - hdu.data._gap _size = _size +", "file if exists, default = False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(),", "If it does not, check to see # if we were provided with", "% _blockLen) != 0: raise IOError, 'Header size is not multiple of %d:", "field for ASCII table if self._coldefs._tbtype == 'TableHDU': _loc = [1] _width =", "+ 1 for i in range(nbytes): _min = i*8 _max = min((i+1)*8, nx)", "in order, in case of required cards in wrong order. if isinstance(self, _ExtensionHDU):", "updating the list(s). # Use lists, instead of dictionaries so the names can", "num.array(_mmap, type=code, shape=dims) else: raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big' if", "# FITS format if dtype == 'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate", "_err.append(_result) return _err def append(self, hdu): \"\"\"Append a new HDU to the HDUList.\"\"\"", "raise ValueError, 'Illegal value %s' % str(val) self.__dict__['value'] = val def _setcomment(self, val):", "CardList. pos: The position (index, keyword name will not be allowed) to insert.", "def __init__(self, data=None, header=None, name=None): \"\"\"Construct an image HDU. data: the data in", "del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try: return", "for cname in _commonNames: value = eval(cname) # get the argument's value keyword", "using numarray.strings's num2char because the # result is not allowed to expand (as", "data part of the random group, # since binary table does not support", "files. This file format was endorsed by the International Astronomical Union in 1999", "if self._valueModified: valStr = '%20s' % _floatFormat(self.value) else: valStr = '%20s' % self._valuestring", "= 1 def write(self,data): \"\"\" Write the given data to the stream. :Parameters:", "\"\"\"Set an HDU to the HDUList, indexed by number or name.\"\"\" _key =", "!= 1: raise \"Zip files with multiple members are not supported.\" self.tfile =", "update()] \"\"\" self._add_commentary('comment', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): \"\"\"Add a", "dtype of the input data does not match what is expected by the", "EOFError hdu = _TempHDU() hdu._raw = '' # continue reading header blocks until", "if recfmt == _booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array", "_out) array = _out # make a copy if scaled, so as not", "0, 'length of dimension 1'), Card('NAXIS2', 0, 'length of dimension 2'), Card('PCOUNT', 0,", "= 1, search from the end. \"\"\" if isinstance(key, (int, long)): return key", "\"\"\" if self.__file != None: if self.__file.memmap == 1: self.mmobject = self.__file._mm if", "Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append', 'update'): print \"flush for", "complex): if self._valueModified: _tmp = '(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag)", "= \"%-8s= \" % self.key else: headstr = \"CONTINUE \" valstr = valfmt", "format if dtype == 'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate both the", "ASCII characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks for a", "argument. \"\"\" _err = errlist fix = '' cards = self.header.ascard try: _index", "None or after != None: _card = Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after)", "cards # if a long string has CONTINUE cards, the \"Card\" is considered", "new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last = self.ascard.index_of(key, backward=1)", "0) # the primary header >>> getdata('in.fits', 2) # the second extension >>>", "a string rather well, but will accept # strings with an odd number", "header else: # construct a list of cards of minimal header if isinstance(self,", "_VLF(map(_func, array)) except: try: # this handles ['abc'] and [['a','b','c']] # equally, beautiful!", "re_groups.search(block) if mo and simple: groups = 1 else: groups = 0 mo", "= {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the", "_TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS'] for i in range(_naxis): del self['NAXIS'+`i+1`] if", "the column right after the last field elif tbtype == 'TableHDU': (_format, _width)", "return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal sign in the card image and", "3. The name of AURA and its representatives may not be used to", "(j == 0): _after = 'naxis' else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j],", "HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList,", "elif after != None: loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos,", "supplied data. This argument is optional. @keyword clobber: (optional) if True and if", "= [None]*npars if bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] #", "file. @param filename: input FITS file name @type: string @param ext: The rest", "Default = None. \"\"\" self.__file = file if hdus is None: hdus =", "Card, CardList, _Card_with_continue, Header, _Hierarch @group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU,", "<= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get the", "found, return the default value. key: keyword name or index default: if no", "is therefore not very usable after the call. type (string): destination data type,", "in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option, _err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS", "data, header) else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) f", "File size is smaller than specified data size. File may have been truncated.'", "opened. mode: Open mode, 'readonly' (default), 'update', or 'append'. memmap: Is memmory mapping", "= self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format, bscale =", "hdu.name, _extver # reset the modification attributes after updating for hdu in self:", "at the beginning in Solaris. self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self,", "nbytes*8 - nx for i in range(nbytes): _min = i*8 _max = min((i+1)*8,", "in range(_max): _where = _keyList[_start:].index('CONTINUE') + _start for nc in range(1, _max+1): if", "= newkey def get(self, key, default=None): \"\"\"Get a keyword value from the CardList.", "if new_name != col_name and new_name in self.names: raise ValueError, 'New name %s", "__builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode, the pointer is at the end after", "if isinstance(ext[0], Header): header = ext[0] ext = ext[1:] elif not isinstance(ext[0], (int,", "extension. @type filename: string @param filename: input FITS file name \"\"\" f =", "'/' _name = dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name else: raise", "the original file\", oldName # reopen the renamed new file with \"update\" mode", "list of Columns tmp = hdu.columns = ColDefs(input, tbtype) # read the delayed", "= 'IMAGE' self.header._hdutype = ImageHDU # insert the require keywords PCOUNT and GCOUNT", "self.index_of(key) _item = super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList,", "string, integer, or float \"\"\" _hdr = getheader(filename, *ext, **extkeys) return _hdr[key] def", "if not isinstance(i, chararray.CharArray): if i._type.bytes > 1: if i._byteorder != 'big': i.byteswap()", "list[i][-1] == 's': list[i]=list[i][:-1] for att in list: if att not in _commonNames:", "verify for i in range(len(self)): if i > 0 and (not isinstance(self[i], _ExtensionHDU)):", "0.8.8 def _iswholeline(indx, naxis): if isinstance(indx, (int, long)): if indx >= 0 and", "xtension = cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype = TableHDU elif xtension ==", "copy(self): r = rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs = self._coldefs f = FITS_rec(r)", "printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable string %s' %", "% hdu try: super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise IndexError, 'Extension %s is", "are flexible and are best illustrated by examples: No extra arguments implies the", "not %d: %d' % (_blockLen, len(block)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']):", "\"\"\" list.__init__(self, cards) self._cards = cards # if the key list is not", "x: raise VerifyError def _pad(input): \"\"\"Pad balnk space to the input string to", "by using the \"test\" argument. \"\"\" _err = errlist fix = '' cards", "are computed. \"\"\" _cardList = [] _keyList = [] blocks = self._raw if", "NAXIS's which is the # reverse of the numarray shape if isinstance(self, GroupsHDU):", "_verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and", "not in ['', None, 1]: array /= bscale self.array = array def __repr__(self):", "attr): \"\"\"Get the 'data' or 'columns' attribute.\"\"\" if attr == 'data': size =", "TNULL # this can be reset by user. _isInt = \"isinstance(val, (int, long))\"", "ColDefs(input, tbtype) # read the delayed data for i in range(len(tmp)): _arr =", "if option == 'old': _scale = self._bscale _zero = self._bzero elif option ==", "extension \"\"\" # parse the arguments header = None if len(ext) > 0:", "and (_scale or _zero): # only do the scaling the first time and", "make sure the EXTEND keyword is there if there is extension if len(self)", "_type = _dict[self._coldefs._Formats[indx][0]] # if the string = TNULL, return ASCIITNULL nullval =", "object is returned. \"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct a HDUList object. hdus:", "try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] =", "not begin with SIMPLE or XTENSION' for i in range(0, len(blocks), Card.length): _card", "fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt = _fits2rec[fits_fmt] # 'E' ->", "to which the header and data will be streamed. header : Header The", "key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception',", "repeat != 1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype == 'X':", "_tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue # skip if there is no", "return result def __setitem__(self, key, value): \"\"\"Set a Card by indexing or by", "'Illegal value %s' % str(val) self.__dict__['value'] = val def _setcomment(self, val): \"\"\"Set the", "scaling flags and factors for one field. indx is the index of the", "# Boolean (logical) column if self._coldefs._recformats[indx]._dtype is _booltype: for i in range(len(self._parent)): dummy[i]", "If the card does not exist, the new card will have the fix_value", "!= '': longstring = longstring + _comm.rstrip() + ' ' self.__dict__[name] = longstring.rstrip()", "def _get_tbdata(hdu): \"\"\" Get the table data from input (an HDU object).\"\"\" tmp", "*ext, **extkeys) return _hdr[key] def _makehdu(data, header): if header is None: if isinstance(data,", "offset = len(input) # check for one word longer than strlen, break in", "specified in the header provided to the class constructor may be written to", "binary table does not support ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]`", "self._keylist[_key] = value.key.upper() self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is not", "fix_value, option, errlist): \"\"\"Check the existence, location, and value of a required Card.\"\"\"", "\"\"\" # mappings between FITS and numarray typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32',", "Parse the TFORM value into data type and width. try: (dtype, width) =", "= 2880 # the FITS block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'}", "self.field('data') elif attr == '_unique': _unique = {} for i in range(len(self.parnames)): _name", "dir(hdu): if hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update data", "_isInt = \"isinstance(val, (int, long))\" # Functions def _padLength(stringLen): \"\"\"Bytes needed to pad", "code illustrates its use: header = pyfits.Header() for all the cards you need", "(pcount + size) / 8 return size def close(self): \"\"\" Close the 'physical'", "output_verify='exception', clobber=False): \"\"\"Write the HDU to a new file. This is a convenience", "CONTINUE' if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys: if not (newkey in", "table extension HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None):", "does not begin with SIMPLE or XTENSION' for i in range(0, len(blocks), Card.length):", "(default) or 'TableHDU' (text table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'}", "if naxis > 0: size = 1 for j in range(naxis): size =", "None: eqLoc = 7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the card image", "== 'IMAGE': self._hdutype = ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU", "in update()] \"\"\" self._add_commentary('history', value, before=before, after=after) def add_comment(self, value, before=None, after=None): \"\"\"Add", "data area size, including padding hdu._datSpan = _size + _padLength(_size) hdu._new = 0", "HDUList object is created from files # other than FITS, the close() call", "= ext2['extname'] else: raise KeyError, 'Insufficient keyword argument: %s' % ext2 return hdulist,", "= self.__file._mm if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the", "or _zero): # only do the scaling the first time and store it", "data of the table header: header to be used for the HDU name:", "desc[:,0].sum()*_dtype.bytes # conversion for both ASCII and binary tables if _number or _str:", "_Hierarch): keyStr = 'HIERARCH %s ' % self.key else: keyStr = '%-8s' %", "keyword \"\"\" # any of the input argument (except array) can be a", "the length of a card image (80 columns). If the card image is", "= name def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute.\"\"\" if attr", "= None self.__dict__[attr] = data elif attr == 'columns': class_name = str(self.__class__) class_name", "calculated or the 'END' card is not found. In the case of a", "']' _dims = \"%dR x %dC\" % (_nrows, _ncols) return \"%-10s %-11s %5d", "verbose: print \"One or more header is resized.\" break # Data: if 'data'", "record array (i.e. table), or groups data object depending on the type of", ">= 0 and val <= 999\", 0, option, _err) naxis = self.header.get('NAXIS', 0)", "a table HDU or a list of Columns\" def __getattr__(self, name): \"\"\"Populate the", "raise ValueError, 'Intended keyword %s already exists in header.' % newkey _index =", "def __setitem__ (self, key, value): \"\"\"Set a header keyword value.\"\"\" self.ascard[key].value = value", "self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims = self._dimShape() code", "try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple of", "raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 if isinstance(ext1[0], str): if n_ext2", "2: if n_ext2 == 0: ext = ext1 else: raise KeyError, 'Redundant/conflicting keyword", "is expected by the header, a TypeError exception is raised. \"\"\" if self.writeComplete:", "hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver = 1 hdu._file = self.__file hdu._hdrLoc =", "else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) f = open(filename,", "line. # Instead, just truncate the comment if isinstance(self.value, str) and len(valStr) >", "' ') if real.group('sign') == None: _val = eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt)", "resize attributes after updating self._resize = 0 for hdu in self: hdu.header._mod =", "not recognized.' % tform if repeat == '': repeat = 1 else: repeat", "BZERO del self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update the header keywords to agree", "for random group image, NAXIS1 should be 0, so we skip NAXIS1. if", "= re_gcount.search(block) if mo is not None: gcount = int(mo.group(1)) else: gcount =", "reading will be delayed for col in range(_nfields): dict[col]['array'] = Delayed(input, col) #", "= None, None, None self.header = header self.data = data self._xtn = '", "the equal sign position if self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8:", "isinstance(key, str): _key = key.strip().upper() if _key[:8] == 'HIERARCH': _key = _key[8:].strip() _keylist", "= self.header.get('NAXIS', 0) if naxis < 1000: for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`,", "'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT',", "if self.__file.tell() > self._size: print 'Warning: File size is smaller than specified data", "'IMAGE': self._hdutype = ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else:", "may not exist for name in ['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self,", "= '' # only append HDU's which are \"new\" if hdu._new: self.__file.writeHDU(hdu) if", "del dummy return out # if not a slice, do this because Record", "1, 'number of groups')) if header is not None: hcopy = header.copy() hcopy._strip()", "by using BSCALE/BZERO. Call to this method will scale self.data and update the", "it will use this space first, instead of appending after these blank cards,", "self).insert(pos, card) self._keylist.insert(pos, card.key) # update the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks()", "super(CardList, self).__delitem__(_key) del self._keylist[_key] # update the keylist self.count_blanks() self._mod = 1 def", "desp_output: output \"descriptor\" array of data type 2Int32 dtype: data type of the", "self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver = '' if hdu.header._mod", "not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data is not None: # check", "class _Card_with_continue(Card): \"\"\"Cards having more than one 80-char \"physical\" cards, the cards after", "= pyfits.Header() for all the cards you need in the header: header.update(key,value,comment) shdu", "= len(comm) / comm_len + 1 comm_list = self._words_group(comm, comm_len) for i in", "option, _err) self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 0 and val <=", "_FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def flush(self,", "it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new)", "primary HDU if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's", "the next card to search # to avoid starting at the same CONTINUE", "the next level # must present, even it has nothing. for item in", "and keep it unchanged else: self.header = header else: # construct a list", "_zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self,", "writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDUList to a new file. name: output", "files if mode in ['update', 'append']: raise \"Writing to zipped fits files is", "# update the 2nd SCI extension >>> update(file, dat, 3, header=hdr) # update", "used when the file is first opened. This is to speed up the", "% indx+1 if 'A' in _format: _pc = '%-' else: _pc = '%'", "'Block does not begin with SIMPLE or XTENSION' for i in range(0, len(blocks),", "_cols = self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') #", "if not isinstance(hdu, _AllHDU): raise ValueError, \"%s is not an HDU.\" % hdu", "type' value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value, 'extension", "= ' '*_lead + _pc + _format[1:] + _dict[_format[0]] + ' '*_trail #", "= 0 else: # flat the shape temporarily to save memory dims =", "= _unique try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def par(self, parName): \"\"\"Get", "image HDU. data: the data in the HDU, default=None. header: the header to", "= GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames = self.columns._pnames else: data = None self.__dict__[attr]", "output_format = _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes = ((repeat-1) / 8) +", "range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the", "remaining axes else: offset *= _naxis if dims == []: dims = [1]", "self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not in self.__dict__: self.__dict__['_valueModified'] = 0 elif", "cards of minimal header if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image extension')", "2: raise ValueError, \"too many positional arguments\" elif n_ext1 == 1: if n_ext2", "filename: name of the file to append to @type data: array, table, or", "'Long card image must have CONTINUE cards after the first card.' if not", "hdu.data._heapsize = _shift - hdu.data._gap _size = _size + _shift # pad the", "% input_format return (dtype, width) def _get_index(nameList, key): \"\"\" Get the index of", "== 'PRIMARY': hdu._extver = 1 hdu._file = self.__file hdu._hdrLoc = _hdrLoc # beginning", "_realStr = real.group('digt').translate(_fix_table, ' ') if real.group('sign') is not None: _realStr = real.group('sign')+_realStr", "try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc = None return eqLoc def _getKeyString(self): \"\"\"Locate", "card into a list of cards. Will deal with CONTINUE cards in a", "# touch the data, so it's defined (in the case of reading from", "and value of mandatory keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\"", "= 'binary table extension' class StreamingHDU: \"\"\" A class that provides the capability", "text[:-1] def copy(self): tmp = Column(format='I') # just use a throw-away format tmp.__dict__=self.__dict__.copy()", "the HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn =", "ValueError, valu size = eval(width)+1 strfmt = strfmt + 's'+str(size) + ',' strlen", "of BSCALE and BZERO in self.header. This method should only be used right", "Header ends, but this task may be difficult when the extension is a", "than 8 characters. \"\"\" def _verify(self, option='warn'): \"\"\"No verification (for now).\"\"\" return _ErrList([])", "formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File):", "primary header >>> getdata('in.fits') By extension number: >>> getdata('in.fits', 0) # the primary", "num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct the P format column array,", "the file. \"\"\" self.header = header.copy() # # Check if the file already", "bottom=0): \"\"\"Append a Card to the CardList. card: The Card to be appended.", "start must be integer.' % input _stop = input.stop if _stop is None:", "sign for commentary cards (i.e. part of the string value) _key = self._cardimage[:8].strip().upper()", "except defaults, must be compliant to FITS standard. key: keyword name, default=''. value:", "but the provided header represents a Primary header, the header will be modified", "(column) names are case sensitive: you can have two different columns called 'abc'", "type=code, shape=dims) raw_data._byteorder = 'big' if (self._bzero != 0 or self._bscale != 1):", "data is DELAYED: # this should never happen if header is None: raise", "== 'HIERARCH': key = key[8:].strip() _index = self.ascard._keylist.index(key) return 1 except: return 0", "keys of commentary cards _commentaryKeys = ['', 'COMMENT', 'HISTORY'] def __init__(self, key='', value='',", "# skip if there is no match if (keyword in _keyNames): col =", "card to be FITS standard.: %s' % self.key else: self.__dict__['_err_text'] = 'Card image", "the data. If the 3rd argument is not a header, it (and other", "def setpar(self, parName, value): \"\"\"Set the group parameter values.\"\"\" if isinstance(parName, (int, long)):", "hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close the associated FITS file and memmap", "i - 1 break def append(self, card, useblanks=1, bottom=0): \"\"\"Append a Card to", "of cards. Will deal with CONTINUE cards in a later stage as CONTINUE", "== 'COMMENT': output.append(_card.value) return output def _add_commentary(self, key, value, before=None, after=None): \"\"\"Add a", "object. header: header to be used to populate the non-required keywords nrows: number", "backward=0): \"\"\"Get the index of a keyword in the CardList. key: the keyword", "option, _err) self.req_cards('GROUPS', _pos, 'val == True', True, option, _err) return _err #", "self.header['NAXIS'] + 3 # if the card EXTEND exists, must be after it.", "table, so both will produce 'a7'. if fmt.lstrip()[0] == 'A' and option !=", "A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR", "time print out all top level messages for item in self: if not", "\"\"\"Check for existence of a keyword. Returns 1 if found, otherwise, 0. key:", "+ ') *, *(?P<imag>' + _numr_NFSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/", "_min = i*8 _max = min((i+1)*8, nx) for j in range(_min, _max): if", "hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go through header keywords to", "even it has nothing. for item in self: if isinstance(item, _ErrList): _dummy =", "of bits \"\"\" output[...] = 0 # reset the output nbytes = ((nx-1)", "None else: if _key == 'HIERARCH': _limit = Card.length else: _limit = 10", "is used when one or more mandatory Cards are corrupted (unparsable), such as", "\"\"\"Delete an HDU from the HDUList, indexed by number or name.\"\"\" key =", "_zero, bscale, bzero) = self._get_scale_factors(indx) # for P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy", "different from the old one if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key]", "of all remaining axes else: offset *= _naxis if dims == []: dims", "dummy = self._parent.field(indx) # further conversion for both ASCII and binary tables if", "'X format' elif dtype+option in _rec2fits.keys(): # record format _repeat = '' if", "the above _rec2fits = {} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For", "= _card.key cardList.append(_card) keyList.append(_key) if _key == 'END': break def _readHDU(self): \"\"\"Read the", "self.__file.mode not in ('append', 'update'): print \"flush for '%s' mode is not supported.\"", "_unique try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def par(self, parName): \"\"\"Get the", "IndexError, 'Illegal slice %s, step must be integer.' % input return slice(_start, _stop,", "self._convert[indx] else: continue # ASCII table, convert numbers to strings if self._coldefs._tbtype ==", "hduList._resize = 0 return hduList fitsopen = open # Convenience functions class _Zero(int):", "EXTEND or if it has the proper value. \"\"\" hdr = self[0].header if", "itemsize=1) else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max, len(value))", "self._add_commentary('history', value, before=before, after=after) def add_comment(self, value, before=None, after=None): \"\"\"Add a COMMENT card.", "numbers, otherwise # Python might evaluate them as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>'", "a Column's name.\"\"\" if new_name != col_name and new_name in self.names: raise ValueError,", "_repeat = '' if repeat != 1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option]", "_tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart - _tbsize _pcount", "name == 'key': self._extractKey() elif name in ['value', 'comment']: self._extractValueComment(name) else: raise AttributeError,", "= self._get_scale_factors(indx) # add the location offset of the heap area for each", "= \"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len) for i in", "self.__file.write(_padLength(_size)*'\\0') # flush, to make sure the content is written self.__file.flush() # return", "it will be created and if the header represents a Primary header, it", "circular reference of _pcount # pass the attributes for attr in ['formats', 'names']:", "(_key.strip()).upper() nfound = 0 for j in range(len(self)): _name = self[j].name if isinstance(_name,", "fmt = self.__format_RE.match(valu) if fmt: code, width, prec = fmt.group('code', 'width', 'prec') else:", "# the comment separator resulting in an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/|", "hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT, for variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2']", "header) else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) f =", "if value != None: text += cname + ' = ' + `value`", "list of Columns\" def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname = name[:-1] if", "option, _err) self.req_cards('BITPIX', None, 'val == 8', 8, option, _err) self.req_cards('TFIELDS', '== 7',", "range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if", "self.header['EXTEND'] #_after += 1 except: pass _pos = '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt,", "extension header and appended to the end of the file. \"\"\" self.header =", "_heapstart - _tbsize _pcount = hdu.data._heapsize + hdu.data._gap if _pcount > 0: hdu.header['PCOUNT']", "manipulating their contents. A module for reading and writing Flexible Image Transport System", "_val = _val[:-1] longstring = longstring + _val elif name == 'comment': _comm", "keep it unchanged else: self.header = header else: # construct a list of", "just truncate the comment if isinstance(self.value, str) and len(valStr) > (Card.length-10): self.__class__ =", "location before calling this method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) +", "not isinstance(hdu, _AllHDU): raise \"Element %d in the HDUList input is not an", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "speed up the open. Any header will not be initialized till the HDU", "%s)\" % (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text,", "= hdu.data except IndexError: raise IndexError, 'No data in this HDU.' if _data", "del self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for i in range(_tfields): del self['TBCOL'+`i+1`] except:", "print \"open a temp file\", _name for hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan)", "c0, Card('BITPIX', 8, 'array data type'), Card('NAXIS', 0, 'number of array dimensions'), ])", "new card will be created and it will be placed before or after", "ColDefs(x) def __len__(self): return len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other):", "is a single treaded application threadName = threading.currentThread() singleThread = (threading.activeCount() == 1)", "!= None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is", "extension >>> update(file, dat, 3, header=hdr) # update the 3rd extension >>> update(file,", "for i in range(self._nfields): # touch all fields to expand the original ._convert", "non-required keywords nrows: number of rows in the new table fill: if =", "str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod = 1", "silently the case where \"=\" is before column 9, # since there is", "not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber)", "= name if self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS') def", "self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue # skip if", "raise ValueError, 'Can not rename to CONTINUE' if newkey in Card._commentaryKeys or oldkey", "avoid infinite loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = '' # string", "found = j nfound += 1 if (nfound == 0): raise KeyError, 'extension", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "header, data shape and type for each extension. @type filename: string @param filename:", "(errcode, errmsg, url) urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener #", "= '' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update header", "after column 8. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc =", "\"Fixed by inserting a new '%s' card.\" % keywd if fixable: # use", "not to confuse the indexing. _list = [] for i in range(len(self.header.ascard)-1,-1,-1): _card", "== 'warn': pass # fix the value elif option == 'unfixable': _text =", "insert the keywords EXTEND if header is None: dim = `self.header['NAXIS']` if dim", "is too long.\" % self.key if len(output) <= Card.length: output = \"%-80s\" %", "one header card.\"\"\" \"\"\" If the keyword already exists, it's value/comment will be", "& EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci', 2)) #", "from raw string. option: verification option, default=silentfix. \"\"\" # Only if the card", "# construct a table HDU hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype", "Must seek to the correct location before calling this method. \"\"\" if isinstance(hdu,", "self).__getitem__(_key) def __getslice__(self, start, end): _cards = super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end])", "listed in _commonNames. The default is \"all\" which will print out all attributes.", "\"\"\" import re, os, tempfile, exceptions import operator import __builtin__ import urllib import", "IndexError, 'Illegal slice %s, stop < start.' % input _step = input.step if", "the array. Does not support theap yet. \"\"\" def __init__(self, name=None, format=None, unit=None,", "self.data = num.array(raw_data, type=num.Float32) else: # floating point cases if self._ffile.memmap: self.data =", "to find exact match first try: indx = nameList.index(key.rstrip()) except ValueError: # try", "# if more than one group parameter have the same name, the #", "= self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt: code, width, prec", "== 0', 0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`,", "continue # skip if there is no match if (keyword in _keyNames): col", "raise ValueError, \"%s is not an HDU.\" % hdu try: super(HDUList, self).__setitem__(_key, hdu)", "input_format (repeat, dtype, option) = _parse_tformat(fmt) if reverse == 0: if dtype in", "nrows: if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled data", "Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None, field=None):", "= (), '' _nrows = 0 else: _nrows = len(self.data) _ncols = len(self.columns.formats)", "file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err", "the close() call can also close the mm object. try: self.mmobject.close() except: pass", "%d).\" % insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index,", "of dimension 1'), Card('NAXIS2', 0, 'length of dimension 2'), Card('PCOUNT', 0, 'number of", "a StreamingHDU object given a file name and a header. :Parameters: name :", "the 2nd SCI extension >>> update(file, dat, 3, header=hdr) # update the 3rd", "(list, tuple)): other = [other] _other = [_get_index(self.names, key) for key in other]", "_after = 'naxis' else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after)", "getheader(filename, *ext, **extkeys): \"\"\"Get the header from an extension of a FITS file.", "end after the open in # Linux, but is at the beginning in", "cname = name[:-1] if cname in _commonNames: attr = [''] * len(self) for", "\" DELAYED = \"delayed\" # used for lazy instantiation of data ASCIITNULL =", "it does not, check to see # if we were provided with a", "the International Astronomical Union in 1999 and mandated by NASA as the standard", "range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'):", "self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip': # Handle zip files if mode", "all columns after this call. The final offset will be calculated when the", "= self.__file.tell() # Read the first header block. block = self.__file.read(_blockLen) if block", "_pnames = [] _pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']):", "_len > Card.length: strlen = _len % Card.length if strlen == 0: return", "if isinstance(recfmt, _FormatP): try: _func = lambda x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func,", "= `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape = tuple(_shape)", "attributes after updating for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0", "= hdu._getsize(hdu._raw) # get extname and extver if hdu.name == '': hdu.name, hdu._extver", "space for HIERARCH cards if isinstance(self, _Hierarch): valStr = valStr.strip() # comment string", "add_comment(self, value, before=None, after=None): \"\"\"Add a COMMENT card. value: Comment text to be", "= self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err) return", "keyword name or index. \"\"\" if before != None: loc = self.index_of(before) self.insert(loc,", "HDU #%d.\\n There may be extra bytes after the last HDU or the", "from a # FITS file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'):", "self.count_blanks() self._mod = 1 def count_blanks(self): \"\"\"Find out how many blank cards are", "Column by specifying attributes. All attributes except format can be optional. name: column", "the key list is not supplied (as in reading in the FITS file),", "= None # for the unparsable case if input is None: _tmp =", "import numarray.generic as ndarray import numarray.strings as chararray import numarray.records as rec import", "i in comm_list: commstr = \"CONTINUE '&' / \" + commfmt % i", "of the word. \"\"\" list = [] _nblanks = input.count(' ') nmax =", "not touched yet, use header info. else: _shape = () for j in", "mo is not None: naxis = int(mo.group(1)) pos = mo.end(0) else: raise ValueError(\"NAXIS", "Object array because there is no guarantee # the elements in the object", "data area return loc, _size+_padLength(_size) def close(self): \"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close()", "elif not isinstance(hdus, (HDUList, list)): raise \"Invalid input for HDUList.\" for hdu in", "TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\( *'", "32, 16, 8, 4, 2, 1] nbytes = ((nx-1) / 8) + 1", "elif isinstance(dtype, _FormatX): print 'X format' elif dtype+option in _rec2fits.keys(): # record format", "if isinstance(_key, int): raise ValueError, \"An element in the HDUList must be an", "> nrows: nrows = dim if tbtype == 'TableHDU': _formats = '' _itemsize", "is not a Card\" % str(card) def _pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert", "raise \"HDUList can only append an HDU\" # make sure the EXTEND keyword", "mode: Open mode, 'readonly' (default), 'update', or 'append'. memmap: Is memmory mapping to", "equal sign string eqStr = '= ' if keyStr.strip() in Card._commentaryKeys: # not", "a Card\" % str(card) def _use_blanks(self, how_many): if self._blanks > 0: for i", "\"\"\" pow2 = [128, 64, 32, 16, 8, 4, 2, 1] nbytes =", "is set to True, this function will return a (data, header) tuple. \"\"\"", "= max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1) # locations of the blanks", "float in fixed or # scientific notation. One for FSC and one for", "which will print out all attributes. It forgives plurals and blanks. If there", "value != None: text += cname + ' = ' + `value` +", "= FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns # populate data to the", "depending on the type of the extension being referenced If the optional keyword", "- self._blanks i = nc - 1 if not bottom: for i in", "attr = [col.array for col in self.data] elif name == '_recformats': if self._tbtype", "GroupsHDU) and j == 0: continue _shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if", "return self.columns def update(self): \"\"\" Update header keywords to reflect recent changes of", "does not match what is expected by the header, a TypeError exception is", "ext2.keys() # parse the extension spec if n_ext1 > 2: raise ValueError, \"too", "corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size (in bytes) of the data portion of", "== None: _val += eval(_idigt)*1j else: _val += eval(imag.group('sign') + _idigt)*1j else: _val", "_card.key == 'COMMENT': output.append(_card.value) return output def _add_commentary(self, key, value, before=None, after=None): \"\"\"Add", "__repr__(self): \"\"\"Format a list of cards into a string.\"\"\" block = '' for", "to the right place (card %d).\" % insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del", "name def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute.\"\"\" if attr ==", "16, 32, 64, -32, -64]\" # Verify location and value of mandatory keywords.", "sign not at column 8).' raise ValueError, self._err_text, '\\n%s' % self._cardimage elif option", "self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards begins", "'= ' if keyStr.strip() in Card._commentaryKeys: # not using self.key eqStr = ''", "dummy[i]._byteorder = 'big' # scale by TSCAL and TZERO if _scale or _zero:", "not found.' % key self._resize = 1 def __delitem__(self, key): \"\"\"Delete an HDU", "# parse the arguments header = None if len(ext) > 0: if isinstance(ext[0],", "and (indx.step == 1): return _WholeLine(naxis, 0) else: if indx.step == 1: return", "the stream is padded to fill a complete FITS block and no more", "def _dimShape(self): \"\"\"Returns a tuple of image dimensions, reverse the order of NAXIS.\"\"\"", "value if \".\" not in valueStr and \"E\" not in valueStr: valueStr +=", "EXTEND exists, must be after it. try: _dum = self.header['EXTEND'] #_after += 1", "'physical' FITS file. :Parameters: None :Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class", "# only set if the value is different from the old one if", "an error. The FITS standard # appears vague on this issue and only", "`%s`.\" % format else: raise ValueError, \"Must specify format to construct Column\" #", "key, value): \"\"\"Set a Card by indexing or by the keyword name.\"\"\" if", "# if more than one group parameter have the same name else: result", "Undefined() __credits__=\"\"\" Copyright (C) 2004 Association of Universities for Research in Astronomy (AURA)", "column definitions.\"\"\" return self.columns def update(self): \"\"\" Update header keywords to reflect recent", "variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart", "eval(_idigt)*1j else: _val += eval(imag.group('sign') + _idigt)*1j else: _val = UNDEFINED self.__dict__['value'] =", "data type if type is None: type = self.NumCode[self._bitpix] _type = getattr(num, type)", "and len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue output = self._breakup_strings() else: print 'card", "or comment from the card image.\"\"\" longstring = '' ncards = self._ncards() for", "and data are copied.\"\"\" if self.data is not None: _data = self.data.copy() else:", "else: _comment = self.ascard[j].comment self.ascard[j] = Card(key, value, _comment) elif before != None", "add_col(self, column): \"\"\"Append one Column to the column definition.\"\"\" return self+column def del_col(self,", "in ['update', 'append']: raise \"Writing to gzipped fits files is not supported\" zfile", "input object array desp_output: output \"descriptor\" array of data type 2Int32 dtype: data", "else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr == 'data':", "(ASCII table only), corresponding to TBCOL keyword dim: column dimension corresponding to TDIM", "may be possible to decipher where the last block of the Header ends,", "# it also delete the keylist item def keys(self): \"\"\"Return a list of", "files if mode in ['update', 'append']: raise \"Writing to gzipped fits files is", "an extension HDU.\" % `i` _text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result", "= GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`,", "* (pcount + size) / 8 return size def _verify(self, option='warn'): _err =", "i in range(len(self)): val = getattr(self[i], cname) if val != None: attr[i] =", "= None. \"\"\" self.__file = file if hdus is None: hdus = []", "conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the", "self.header = header else: # construct a list of cards of minimal header", "does). for i in range(len(dummy)): x = _fmt % dummy[i] if len(x) >", "not None: # Make a \"copy\" (not just a view) of the input", "_fmt % dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does not", "start, end): _hdus = super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return result def __setitem__(self,", "_str = result.group('comm') if _str is not None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct", "break # if the HDUList is resized, need to write it to a", "try: # legit FITS format? convert to record format (e.g. '3J'->'3i4') recfmt =", "name, i.e., EXTNAME value (if unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') #", "else: exec(fix) #if option != 'silentfix': _text += ' ' + fix_text return", "if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled data =", "> 0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else:", "use CONTINUE if the string value can fit in one line. # Instead,", "storing high energy astrophysics data. For details of the FITS standard, see the", "for _card in self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self, keywd, pos, test, fix_value,", "val) fix_text = \"Fixed by setting a new value '%s'.\" % fix_value if", "the shape will be in the order of NAXIS's which is the #", "verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and val == 0\",", "spec to record format spec. Do the opposite if reverse = 1. \"\"\"", "to the _keylist. self._checkKey(self.key) # verify the value, it may be fixable result", "__add__(self, other, option='left'): if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b", "must be integer.' % input return slice(_start, _stop, _step) class _KeyType: def __init__(self,", "self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def get(self, key, default=None): \"\"\"Get a keyword value", "elif len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue # remove the key/value/comment attributes, some", "optional. name: column name, corresponding to TTYPE keyword format: column format, corresponding to", "_FormatP): VLdata = self.data.field(i) VLdata._max = max(map(len, VLdata)) val = 'P' + _convert_format(val._dtype,", "getheader(filename, *ext, **extkeys) return _hdr[key] def _makehdu(data, header): if header is None: if", "cards directly before END, it will use this space first, instead of appending", "format: column format, corresponding to TFORM keyword unit: column unit, corresponding to TUNIT", "reflect recent changes of columns.\"\"\" _update = self.header.update _append = self.header.ascard.append _cols =", "Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') == None: _val += eval(_idigt)*1j", "else: name = '' mo = re_extver.search(self._raw) if mo: extver = int(mo.group(1)) else:", "the first header block. block = self.__file.read(_blockLen) if block == '': raise EOFError", "if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS", "import numarray as num import numarray.generic as ndarray import numarray.strings as chararray import", "len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1) # locations of the blanks blank_loc =", "input _stop = input.stop if _stop is None: _stop = naxis elif isinstance(_stop,", "str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if data is touched, use data info. if", "if col <= _nfields and col > 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] =", "will be calculated when the file is written. input: input object array desp_output:", "that all of the required data has been written to the stream. Notes", "\"Card('%s', %s)\" % (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option,", "'start', 'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM']", "as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)):", "(len(blocks) % _blockLen) != 0: raise IOError, 'Header size is not multiple of", "extname='err', extver=2) @return: an array, record array (i.e. table), or groups data object", "# longstring case (CONTINUE card) else: # try not to use CONTINUE if", "the value string valfmt = \"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list = self._words_group(val,", "bscales for the parameters parbzeros: list of bzeros for the parameters \"\"\" if", "avoid misalignment. \"\"\" if isinstance(value, num.NumArray) and value.type() == self._dtype: pass elif isinstance(value,", "i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err) return _err class BinTableHDU(_TableBaseHDU):", "except: self._hdutype = _CorruptedHDU # populate the cardlist self.ascard = CardList(cards) def __getitem__", "r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\( *'", "value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] = value def _verify(self, option='warn'): _err", "len(comm) / comm_len + 1 comm_list = self._words_group(comm, comm_len) for i in comm_list:", "_zero: self._convert[indx] += bzero elif _bool: self._convert[indx] = num.equal(dummy, ord('T')) else: return dummy", "self.header.ascard[i] del _list # populate the new table definition keywords for i in", "= HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: # #", "# return a match if the comment separator is found, though the #", "if _val[-1] == '&': _val = _val[:-1] longstring = longstring + _val elif", "= '' _cols = [] if pardata is None: npars = 0 else:", "output = hdu.data else: output = hdu.data output.tofile(self.__file) _size = output.nelements() * output._itemsize", "' ' def __setattr__(self, attr, value): \"\"\"Set an HDU attribute.\"\"\" if attr ==", "errlist): \"\"\"Check the existence, location, and value of a required Card.\"\"\" \"\"\"If pos", "= imag.group('sign') + _imagStr _valStr = '(' + _realStr + ', ' +", "0 if self._cardimage[:8].upper() == 'HIERARCH': _start = 8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc]", "1 if (nfound == 0): raise KeyError, 'extension %s not found' % `key`", "object array desp_output: output \"descriptor\" array of data type 2Int32 dtype: data type", "= '' mo = re_extver.search(self._raw) if mo: extver = int(mo.group(1)) else: extver =", "hdu.data is None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes) if", "== 0): print \"There is nothing to write.\" return self.update_tbhdu() if output_verify ==", "# minimum length is 80 else: strlen = _len % Card.length return input", "Use lists, instead of dictionaries so the names can be displayed in a", "is no unique mapping. If there is a field named \"XYZ\" and no", "_cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero = bzero)) self._coldefs = ColDefs(_cols)", "the HDUList object is created from files # other than FITS, the close()", "raise ValueError, 'New name %s already exists.' % new_name else: self.change_attrib(col_name, 'name', new_name)", "case the FITS_rec was created in a LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder", "the card image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card image from", "will not be the first extension in the file so we # must", ")|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC +", "super(HDUList, self).__getitem__(key) def __getslice__(self, start, end): _hdus = super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus)", "indx > npts: indx = npts return indx _start = input.start if _start", "# needed for __add__ def __add__(self, other, option='left'): if isinstance(other, Column): b =", "_len % Card.length if strlen == 0: return input else: return input +", "_err.append(_text) else: _result = self[i]._verify(option) if _result: _err.append(_result) return _err def append(self, hdu):", "For example: >>> update(file, dat, hdr, 'sci') # update the 'sci' extension >>>", "+ 1 comm_list = self._words_group(comm, comm_len) for i in comm_list: commstr = \"CONTINUE", "re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount", "strlen+offset)[0][0] offset = blank_loc[loc-1] + 1 if loc == 0: offset = -1", "size. File may have been truncated.' hdu._ffile = self return hdu def writeHDU(self,", "the \"regular\" data is written (above) _where = self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0')", "after. The argument `before' takes precedence over `after' if both specified. They can", "appended after the last non-blank card. \"\"\" if isinstance (card, Card): nc =", "key can be an integer, a string, or a tuple of (string, integer).", "standard, see the NASA/Science Office of Standards and Technology publication, NOST 100-2.0. License:", "else: self._parent.field(indx)[i] = x if 'D' in _format: self._parent.field(indx).sub('E', 'D') # binary table", "\"\"\"Get attribute(s) information of the column definition.\"\"\" \"\"\"The attrib can be one or", "del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where # if not the real CONTINUE", "texts.\"\"\" output = [] for _card in self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value)", "\"self.header['%s'] = %s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err", "extver=2) @return: an array, record array (i.e. table), or groups data object depending", "if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file,", "= re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') != None: # Check for numbers with", "_ValidHDU elif cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype", "for i in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero =", "Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key == 'END': break else: _cardList.append(_card) _keyList.append(_key) #", "= int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size = abs(bitpix) * gcount * (pcount", "an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC +", "KeyError, 'Keyword %s not found.' % `key` else: raise KeyError, 'Illegal key data", "_ErrList): result += _INDENT*tab+\"%s\\n\" % item # second time go through the next", "'BINTABLE' hdr = self.header if hdr[0] != self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment =", "\"regular\" data is written (above) _where = self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for", "not a primary HDU.\" fix_text = 'Fixed by inserting one as 0th HDU.'", "_extver = `hdu.header['extver']` except: _extver = '' # only append HDU's which are", "the HDU is resized for hdu in self: # Header: # Add 1", "dat, header=hdr, ext=5) # update the 5th extension \"\"\" # parse the arguments", "parts where each part is no longer than strlen and no word is", "None: _imagStr = imag.group('sign') + _imagStr _valStr = '(' + _realStr + ',", "(self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS HDU header part.\"\"\" blocks =", "-_zero # 0.9.6.3 to avoid out of range error for BZERO = +32768", "format `%s` for ASCII table.' % input_format return (dtype, width) def _get_index(nameList, key):", "n_ext2 == 1: ext = ext2['ext'] elif n_ext2 == 2 and 'extver' in", "found' % `key` elif (nfound > 1): raise KeyError, 'there are %d extensions", "match first try: indx = nameList.index(key.rstrip()) except ValueError: # try to match case-insentively,", "from the card image.\"\"\" # for commentary cards, no need to parse further", "False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist", "= -1 except: offset = len(input) # check for one word longer than", "FITS object. When a FITS file is opened, a HDUList object is returned.", "'number of table fields') ]) if header is not None: # Make a", "the original BSCALE and BZERO values when the data was read/created. If \"minmax\",", "not actually read here, but the beginning locations are computed. \"\"\" _cardList =", "supposed location is specified if pos is not None: test_pos = '_index '+", "between FITS and numarray typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'}", "size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames = self.columns._pnames else: data", "_scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # for P format if isinstance(self._coldefs._recformats[indx], _FormatP):", "the HDU, default=None. header: the header to be used (as a template), default=None.", "used for the HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name)", "and x: raise VerifyError def _pad(input): \"\"\"Pad balnk space to the input string", "num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy return self._convert[indx] (_str, _bool, _number,", "itself (a numarray) or a record array (FITS_rec) which will contain both group", "is not a Column.\" % input.index(col) self.data = [col.copy() for col in input]", "\"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu", "must be a table HDU or a list of Columns\" def __getattr__(self, name):", "self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1 def __delslice__(self, i, j): \"\"\"Delete a slice", "strlen): \"\"\"Split a long string into parts where each part is no longer", "are for extension specification. See L{getdata} for explanations/examples. @rtype: L{Header} object @return: header", "% (x, _width[indx]) else: self._parent.field(indx)[i] = x if 'D' in _format: self._parent.field(indx).sub('E', 'D')", "__init__(self, cards=[]): \"\"\"Construct a Header from a CardList. cards: A list of Cards,", "len(indx) def _getitem(self, offset): row = (offset - self._byteoffset) / self._strides[0] return _Group(self,", "attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if val is not None: raise ValueError, 'comment", "_zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n > 0: if isinstance(tmp._recformats[i], _FormatX): if", "or comment from the card image.\"\"\" # for commentary cards, no need to", "col in input] # if the format of an ASCII column has no", "self._resize = 1 if verbose: print \"One or more data area is resized.\"", "an HDU from the HDUList. The key can be an integer, a string,", "val = val[8:].strip() self.__class__ = _Hierarch else: raise ValueError, 'keyword name %s is", "re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo is not None: bitpix = int(mo.group(1)) else:", "# keys of commentary cards _commentaryKeys = ['', 'COMMENT', 'HISTORY'] def __init__(self, key='',", "in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu = self._check(option='parse') if", "key in other] indx=range(len(self)) for x in _other: indx.remove(x) tmp = [self[i] for", "new_name) def change_unit(self, col_name, new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def", "enable file cacheing class _File: \"\"\"A file I/O class\"\"\" def __init__(self, name, mode='copyonwrite',", "re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS", "(hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose: print \"One or more header is resized.\"", "(self, key): \"\"\"Get a header keyword value.\"\"\" return self.ascard[key].value def __setitem__ (self, key,", "for i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical) column if self._coldefs._recformats[indx]._dtype", "_text def verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option = option.lower() if _option", "# OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise IndexError,", "%s:\\n\" % (self.unit, element) result += _dummy element += 1 return result class", "if valu is None: raise ValueError, \"Unparsable card, fix it first with .verify('fix').\"", "' ' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating number to make sure", "The name of AURA and its representatives may not be used to endorse", "data. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None, None, None", "self._hdutype = ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype", "does not fit into the output's itemsize of %s\" % (x, _width[indx]) else:", "is something if _dummy.strip(): if self.unit: result += _INDENT*tab+\"%s %s:\\n\" % (self.unit, element)", "input: input object array desp_output: output \"descriptor\" array of data type 2Int32 dtype:", "ColDefs has the attribute .names while Column has .name), Each attribute in ColDefs", "naxis = self.hdu.header['NAXIS'] if naxis < len(key): raise IndexError, 'too many indices.' elif", "unless it is # a null string elif isinstance(self.value, str): if self.value ==", "> Card.length: raise ValueError, \"The keyword %s with its value is too long.\"", "only set if the value is different from the old one if str(self[_key])", "commentary card. \"\"\" # no equal sign for commentary cards (i.e. part of", "if 'data' in dir(self): if self.data is None: _shape, _format = (), ''", "1 if loc == 0: offset = -1 except: offset = len(input) #", "add the location offset of the heap area for each # variable length", "= 0 for i in range(nmax): try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset", "in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i]", "else: if isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2", "len(namelist) != 1: raise \"Zip files with multiple members are not supported.\" self.tfile", "the parameters \"\"\" if isinstance(input, num.NumArray): _formats = '' _cols = [] if", "= last_end + 1 _end = self.starts[i] + _width - 1 attr[i] =", "self: if not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" % item # second time", "% (`input.shape[1:]`, _fmt) _formats += data_fmt gcount = input.shape[0] for i in range(npars):", "= nameList.index(key.rstrip()) except ValueError: # try to match case-insentively, _key = key.lower().rstrip() _list", "\"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list of Columns, an (table) HDU", "a Card\" % str(card) def _pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert a Card", "try: self.header['NAXIS'+`j+1`] = axes[j] except: if (j == 0): _after = 'naxis' else", "'': repeat = 1 else: repeat = eval(repeat) return (repeat, dtype, option) def", "length is not %d: %d' % (_blockLen, len(block)) elif (blocks[:8] not in ['SIMPLE", "bscale _zero = bzero else: if option == 'old': _scale = self._bscale _zero", "if dim == '0': dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU):", "'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: # # This will not", "= hdu.data.byteswapped() else: output = hdu.data # Binary table byteswap elif isinstance(hdu, BinTableHDU):", "card.\"\"\" for i in range(1, len(self)): if str(self[-i]) != ' '*Card.length: self._blanks =", "otherwise # Python might evaluate them as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' +", "print out a message only if there is something if _dummy.strip(): if self.unit:", "self.key in Card._commentaryKeys: return result else: if option in ['fix', 'silentfix']: result =", "dtype == 'X': nbytes = ((repeat-1) / 8) + 1 # use an", "shape of the record if nrows == 0: for arr in tmp._arrays: if", "__init__(self, data=None, header=None): self._file, self._datLoc = None, None if header is not None:", "_number, _scale, _zero, bscale, bzero) def field(self, key): \"\"\"A view of a Column's", "except: if (j == 0): _after = 'naxis' else : _after = 'naxis'+`j`", "name else: result = self.field(indx[0]).astype('f8') for i in indx[1:]: result += self.field(i) return", "is returned. \"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct a HDUList object. hdus: Input,", "= key[0] _ver = key[1] else: _key = key _ver = None if", "'%s'.\" % key else: raise NameError, \"Illegal key '%s'.\" % `key` return indx", "long)): result = self.field(parName) else: indx = self._unique[parName.lower()] if len(indx) == 1: result", "\"isinstance(val, (int, long))\" # Functions def _padLength(stringLen): \"\"\"Bytes needed to pad the input", "')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC +", "\"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'BINTABLE' hdr = self.header if hdr[0]", "header = ext[0] ext = ext[1:] elif not isinstance(ext[0], (int, long, str, tuple)):", "cname in _commonNames: attr = [''] * len(self) for i in range(len(self)): val", "i in range(nmax): try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1] +", "list of cards. Will deal with CONTINUE cards in a later stage as", "1 return _indx except: raise KeyError, 'Keyword %s not found.' % `key` else:", "delete the keywords BSCALE and BZERO after scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX']", "def _extractKey(self): \"\"\"Returns the keyword name parsed from the card image.\"\"\" head =", "hdu._raw += block _size, hdu.name = hdu._getsize(hdu._raw) # get extname and extver if", "''' def format(self): strfmt, strlen = '', 0 for j in range(self.header['TFIELDS']): bcol", "if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose: print \"One or more", "keywords to pick out column definition keywords dict = [{} for i in", "= 0 (_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n > 0: if", "HDU, as well as a list of HDU's as input if isinstance(hdus, _ValidHDU):", "\"\"\" if before != None: loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after", "'\\n%s' % self._cardimage # verify the comment (string), it is never fixable if", "test: val = self.header[keywd] if not eval(test): err_text = \"'%s' card has invalid", "\"\"\" if not os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU):", "isinstance(value, (list, tuple)) and len(indx) == len(value): for i in range(len(indx)): self.field(indx[i])[:] =", "HDUList(_hdus) return result def __setitem__(self, key, hdu): \"\"\"Set an HDU to the HDUList,", "= re_extname.search(self._raw) if mo: name = mo.group(1).rstrip() else: name = '' mo =", "data by using BSCALE/BZERO. Call to this method will scale self.data and update", "+ list(self.data) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other):", "class. It has attributes corresponding to the Column attributes (e.g. ColDefs has the", "= pos.split() if _parse[0] in ['>=', '==']: insert_pos = eval(_parse[1]) # if the", "x: x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key) # occurrence of _key in _list", "raise IndexError, 'Subsection data is not contiguous.' # the offset needs to multiply", "option == 'ignore': return elif option == 'parse': # check the value only,", "multiple match raise NameError, \"Ambiguous key name '%s'.\" % key else: raise NameError,", "if _result: _err.append(_result) return _err def append(self, hdu): \"\"\"Append a new HDU to", "pretty. \"\"\" val_len = 67 comm_len = 64 output = '' # do", "rest of the arguments are flexible: the 3rd argument can be the header", "None, option, _err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU class.\"\"\"", "header represents a Primary header, it will be written to the beginning of", "the Column attributes (e.g. ColDefs has the attribute .names while Column has .name),", "blanks if = 0, copy the data from input, undefined cells will still", "should not end with an even number of # quotes to be precise.", "if data is DELAYED: # this should never happen if header is None:", "\"\"\"Change a Column's name.\"\"\" if new_name != col_name and new_name in self.names: raise", "cname == 'format' and isinstance(self, BinTableHDU): val = _cols._recformats[i] if isinstance(val, _FormatX): val", "Primary header, a default Primary HDU will be inserted at the beginning of", "= hdu.data else: output = hdu.data output.tofile(self.__file) _size = output.nelements() * output._itemsize #", "if key != '' or value != '' or comment != '': self._setkey(key)", "\"\"\"Write FITS HDU header part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END') blocks = blocks", "None: loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos, card, useblanks=1): \"\"\"Insert", "pos: The position (index, keyword name will not be allowed) to insert. The", "value, default=''. comment: comment, default=''. \"\"\" if key != '' or value !=", "number or name.\"\"\" _key = self.index_of(key) if isinstance(hdu, (slice, list)): if isinstance(_key, int):", "of the FITS standard, see the NASA/Science Office of Standards and Technology publication,", "data type, use numarray attribute format, (e.g. 'UInt8', 'Int16', 'Float32' etc.). If is", "of the Card after which the new card will be placed. default=None. \"\"\"", "will be placed. default=None. \"\"\" if self.has_key(key): j = self.ascard.index_of(key) if comment is", "(int, long)): _stop = _normalize(_stop, naxis) else: raise IndexError, 'Illegal slice %s, stop", "in variable length table.\"\"\" pass # TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') #", "modification attributes after updating for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod =", "_keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A number sub-string, either an", "HDU, both header and data are copied.\"\"\" # touch the data, so it's", "*ext, **extkeys) hdu = hdulist[_ext] _data = hdu.data if _data is None and", "block of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis", "# deal with var length table if isinstance(coldata, _VLF): for i in coldata:", "= \"isinstance(val, (int, long))\" # Functions def _padLength(stringLen): \"\"\"Bytes needed to pad the", "') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys of commentary", "= [] for i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key = _tdef_re.match(_card.key) try:", "if _size > 0: self.__file.write(_padLength(_size)*'\\0') # flush, to make sure the content is", "text = '' for cname in _commonNames: value = getattr(self, cname) if value", "elif valu.group('cplx') != None: # Check for numbers with leading 0s. real =", "raise KeyError, 'Illegal key data type %s' % type(key) def copy(self): \"\"\"Make a", "# if we were provided with a Primary Header. If not we will", "mode not in ['readonly', 'copyonwrite', 'update']: raise \"Memory mapping is not implemented for", "_result: _err.append(_result) return _err def append(self, hdu): \"\"\"Append a new HDU to the", "the sliced FITS_rec will view the same scaled columns as # the original", "input._tbtype == tbtype: tmp = hdu.columns = input else: raise ValueError, 'column definitions", "_shape = list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.')+1:]", "r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC + ')", "self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the card image with the specified option. \"\"\"", "'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after) # delete extra NAXISi's for j in", "' * (Card.length-strlen) # minimum length is 80 else: strlen = _len %", "if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')'", "ext[1:] elif not isinstance(ext[0], (int, long, str, tuple)): raise KeyError, 'Input argument has", "equal sign in the card image and return the string before the equal", "'%-20s' % valStr # must be before int checking since bool is also", "an array, record array (i.e. table), or groups data object depending on the", "= self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0) size = abs(bitpix) * gcount *", ">= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break # combine contiguous CONTINUE", "def append(filename, data, header=None): \"\"\"Append the header/data to FITS file if filename exists,", "= Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key == 'END': break else: _cardList.append(_card) _keyList.append(_key)", "if naxis > 1: size = 1 for j in range(1, naxis): size", "'%s' not recognized\" % mode if mode != 'append' and not os.path.exists(name): self.name,", "now build the columns tmp = [Column(**attrs) for attrs in dict] self.data =", "after=after) else: self.ascard.append(Card(key, value, comment)) self._mod = 1 def add_history(self, value, before=None, after=None):", "-bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array", "and new_name in self.names: raise ValueError, 'New name %s already exists.' % new_name", "be a record array self.__setstate__(input.__getstate__()) # _parent is the original (storage) array, #", "from the HDUList. The key can be an integer, a string, or a", "string should not end with two single quotes, # whereas it should not", "This simply calls the close method of the _File class. It has this", "def fromstring(self, input): \"\"\"Construct a Card object from a (raw) string. It will", "to deal with byte order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big': output", "f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C) 2004 Association of Universities for", "Ambiguous or conflicting specifications will raise an exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err',", "valu is not None: _comm = valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip()", "0', '', firstval, option, _err) self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid, 8, option,", "= '' if repeat != 1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif", "raise AttributeError(attr) def getfile(self): return self.__file def _readheader(self, cardList, keyList, blocks): \"\"\"Read blocks", "\"'%s' is not an attribute of the column definitions.\"%att continue print \"%s:\" %", "string value) _key = self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc = None else:", "astrophysics data. For details of the FITS standard, see the NASA/Science Office of", "class is used when one or more mandatory Cards are corrupted (unparsable), such", "argument(s): %s' % ext2 else: if 'extname' in keys: if 'extver' in keys:", "_npts = map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes", "for P format _data._heapoffset = hdu._theap + hdu._datLoc _data._file = hdu._file _tbsize =", "attrib+'s')[indx] = new_value def change_name(self, col_name, new_name): \"\"\"Change a Column's name.\"\"\" if new_name", "of cards into a printable string.\"\"\" output = '' for card in self:", "column): \"\"\"Append one Column to the column definition.\"\"\" return self+column def del_col(self, col_name):", "Initialize tempcache with an empty # dictionary to enable file cacheing class _File:", "self._get_scale_factors(indx) # add the location offset of the heap area for each #", "self._resize = 1 def __delslice__(self, i, j): \"\"\"Delete a slice of HDUs from", "%s\" % _text else: exec(fix) #if option != 'silentfix': _text += ' '", "definition.\"\"\" \"\"\"The attrib can be one or more of the attributes listed in", "self._ffile.memmap: self.data = raw_data.copy() # if not memmap, use the space already in", "dimension 2'), Card('PCOUNT', 0, 'number of group parameters'), Card('GCOUNT', 1, 'number of groups'),", "a FITS file. @type filename: string @param filename: input FITS file name @type", "method to provide a user easier output interface if only one HDU needs", "indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise IndexError, 'Subsection data", "True, after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDUList to a new", "KeyError, 'Illegal key data type %s' % type(key) def copy(self): \"\"\"Make a (deep)copy", "close the mm object. try: self.mmobject.close() except: pass def info(self): \"\"\"Summarize the info", "keyStr.strip() in Card._commentaryKeys: # do NOT use self.key commentStr = '' elif self.__dict__.has_key('comment')", "if len(coldata) > 0: coldata.tofile(self.__file) _shift = self.__file.tell() - _where hdu.data._heapsize = _shift", "_Verify): \"\"\"Base class for all HDUs which are not corrupted.\"\"\" # 0.6.5.5 def", "= num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def __init__(self, data=None,", "are copied.\"\"\" # touch the data, so it's defined (in the case of", "= _hdrLoc # beginning of the header area hdu._datLoc = self.__file.tell() # beginning", "FITS HDU data part.\"\"\" self.__file.flush() loc = self.__file.tell() _size = 0 if hdu.data", "in Card._commentaryKeys: # not using self.key eqStr = '' if self.__dict__.has_key('value'): valStr =", "# Binary table byteswap elif isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields): coldata =", "num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx] += bzero elif", "VLdata._max = max(map(len, VLdata)) val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' %", "default=0. \"\"\" # instantiate a FITS file object (ffo) ffo = _File(name, mode=mode,", "in a list tmp = input[xoffset:offset] list.append(tmp) if len(input) == offset: break xoffset", "card does not exist, the new card will have the fix_value as its", "an int if isinstance(pos, str): _parse = pos.split() if _parse[0] in ['>=', '==']:", "_FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype == 'F': output_format = 'f8' else: raise", "self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\" Return the size", "be delayed for col in range(_nfields): dict[col]['array'] = Delayed(input, col) # now build", "needs to be scaled too if recfmt == _booltype: _out = num.zeros(array.shape, type=recfmt)", "isValid = \"val in [8, 16, 32, 64, -32, -64]\" # Verify location", "_commonNames: val = getattr(_cols, cname+'s')[i] if val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if", "is reached while 1: # find the END card mo = end_RE.search(block) if", "def _summary(self): return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU,", "the HDUList. Default = None. \"\"\" self.__file = file if hdus is None:", "# do the value string valfmt = \"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list", "size is smaller than specified data size. File may have been truncated.' hdu._ffile", "card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) * Card.length _bytes =", "- 1 break def append(self, card, useblanks=1, bottom=0): \"\"\"Append a Card to the", "before=before, after=after) else: if key[0] == ' ': useblanks = new_card._cardimage != '", "\"\"\"Get a keyword's value from a header in a FITS file. @type filename:", "AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "_start = 0 for i in range(_max): _where = _keyList[_start:].index('CONTINUE') + _start for", "comment separator is found, though the # comment maybe an empty string. _value_FSC_RE", "= re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount =", "header) f = open(filename, mode='update') f.append(hdu) f.close() def update(filename, data, *ext, **extkeys): \"\"\"Update", "parName, value): \"\"\"Set the group parameter values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:] =", "the keywords BSCALE and BZERO after scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] =", "_TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'BINTABLE' hdr = self.header if hdr[0] !=", "and GCOUNT dim = `self.header['NAXIS']` if dim == '0': dim = '' #", "does not include Object array because there is no guarantee # the elements", "'END': raise ValueError, \"keyword 'END' not allowed\" self._checkKey(val) else: if val[:8].upper() == 'HIERARCH':", "= _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] = _coldefs elif attr == '_theap': self.__dict__[attr]", "self.update_tbhdu() if output_verify == 'warn': output_verify = 'exception' self.verify(option=output_verify) # check if the", "a temp file\", _name for hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu)", "== 'warn': output_verify = 'exception' self.verify(option=output_verify) # check if the output file already", "blank card). If there is no card (or blank card), append at the", "need this in case card-with-continue's value is shortened if not isinstance(self, _Hierarch): self.__class__", "format _repeat = '' if repeat != 1: _repeat = `repeat` output_format =", "\"\"\"Returns a CardList.\"\"\" return self.ascard def items(self): \"\"\"Return a list of all keyword-value", "_hdrLoc = self.__file.tell() # Read the first header block. block = self.__file.read(_blockLen) if", "_normalize(indx, npts): if indx < -npts: indx = 0 elif indx < 0:", "the keyword EXTEND or if it has the proper value. \"\"\" hdr =", "'readonly' (default), 'update', or 'append'. memmap: Is memmory mapping to be used? default=0.", "no __getstate__. # also more efficient. else: return tmp def _get_scale_factors(self, indx): \"\"\"", "HISTORY card. value: History text to be added. before: [same as in update()]", "the # comment maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>'", "def __init__(self): self = 0 def _getext(filename, mode, *ext1, **ext2): \"\"\"Open the input", "in _commonNames: print \"'%s' is not an attribute of the column definitions.\"%att continue", "verification option, default='exception'. clobber: Overwrite the output file if exists, default = False.", "update()] \"\"\" self._add_commentary(' ', value, before=before, after=after) def get_history(self): \"\"\"Get all histories as", "the heap area for each # variable length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc", "if any. output_verify: output verification option, default = 'exception'. verbose: print out verbose", "== 'TableHDU': self._Formats = self.formats if len(self) == 1: dummy = [] else:", "def par(self, parName): \"\"\"Get the group parameter values.\"\"\" if isinstance(parName, (int, long)): result", "the order of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes = naxis*[0] for j in", "shape temporarily to save memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max", "keyword = _key.group('label') except: continue # skip if there is no match if", "# Deal with CONTINUE cards # if a long string has CONTINUE cards,", "bscale not in ['', None, 1]: array = array.copy() if bzero not in", "blank space between words. So it may not look pretty. \"\"\" val_len =", "else: _nbytes = num.getType(dtype).bytes for i in range(len(input)): if dtype == 'a': data_output[i]", "# if the card does not exist if _index is None: err_text =", "all keyword-value pairs from the CardList.\"\"\" pairs = [] for card in self.ascard:", "inconsistent with the format `%s`.\" % format else: raise ValueError, \"Must specify format", "try: # have both SIMPLE and XTENSION to accomodate Extension # and Corrupted", "# may get modified. the data is still a \"view\" (for now) hcopy", "indx < naxis: if naxis > 1: return _SinglePoint(1, indx) elif naxis ==", "associated with the data to be written to the file. :Returns: None Notes", "to save memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data)", "tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the largest column shape as the shape of", "cards) self._cards = cards # if the key list is not supplied (as", "CardList object from a list of Cards. cards: A list of Cards, default=[].", "setattr(self, cname, value) # if the column data is not NDarray, make it", "input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr == 'data': self.__dict__[attr] = self.field('data')", "names, they must be separated by comma(s). \"\"\" if attrib.strip().lower() in ['all', '']:", "of mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 1 and val", "not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: # try to convert to a numarray", "it is designed to use an independent # attribute of mmobject so if", "data are copied.\"\"\" if self.data is not None: _data = self.data.copy() else: _data", "\"\"\"Convert ASCII table format spec to record format spec. \"\"\" ascii2rec = {'A':'a',", "array)) except: raise ValueError, \"Inconsistent input data array: %s\" % array array._dtype =", "table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if", "<strg> regex is not correct for all cases, but # it comes pretty", "where n is an int if isinstance(pos, str): _parse = pos.split() if _parse[0]", "0 for j in range(len(self)): _name = self[j].name if isinstance(_name, str): _name =", "and is therefore not very usable after the call. type (string): destination data", "_hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete the original file\", oldName #", "return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the file is first opened.", "bzero) def field(self, key): \"\"\"A view of a Column's data as an array.\"\"\"", "associated with the HDUList. Default = None. \"\"\" self.__file = file if hdus", "a single-quote after # the comment separator resulting in an incorrect # match.", "pass # needed for __add__ def __add__(self, other, option='left'): if isinstance(other, Column): b", "type, and option.\"\"\" try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\"", "number of bytes of data required to fill the stream per the header", "data shape and type for each extension. @type filename: string @param filename: input", "== 'columns': _cols = [] _pnames = [] _pcount = self.header['PCOUNT'] _format =", "it first with .verify('fix').\" if valu.group('bool') != None: _val = valu.group('bool')=='T' elif valu.group('strg')", "HDU or the file is corrupted.' % (len(hduList)+1) break # initialize/reset attributes to", "elif option == 'unfixable': _text = \"Unfixable error: %s\" % _text else: exec(fix)", "of dictionaries so the names can be displayed in a # preferred order.", "> 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data reading will be", "after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name,", "self.data = raw_data try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns", "attributes. It forgives plurals and blanks. If there are two or more attribute", "if self.__file.memmap == 1: self.mmobject = self.__file._mm if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify,", "The rest of the arguments are flexible: the 3rd argument can be the", "'eE') class Card(_Verify): # string length of a card length = 80 #", "2', _isInt+\" and val >= 0 and val <= 999\", 0, option, _err)", "0: hdu.header['PCOUNT'] = _pcount # update TFORM for variable length columns for i", "[_convert_format(fmt) for fmt in self.formats] elif self._tbtype == 'TableHDU': self._Formats = self.formats if", "this call. The final offset will be calculated when the file is written.", "size) / 8 return size def _verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option) #", "errors list class. It has a nested list structure constructed by error messages", "of bzeros for the parameters \"\"\" if isinstance(input, num.NumArray): _formats = '' _cols", "(None) \"\"\" # mappings between FITS and numarray typecodes NumCode = {8:'UInt8', 16:'Int16',", "if a FITS string, boolean, # number, or complex value is found, otherwise", "'Card image is not FITS standard (unparsable value string).' raise ValueError, self._err_text +", "header=None, a minimal Header will be provided. name: The name of the HDU,", "range(len(self)): _name = self[j].name if isinstance(_name, str): _name = _name.strip().upper() if _name ==", "input (an HDU object).\"\"\" tmp = hdu.columns # get the right shape for", "= extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename, 'update', *ext, **extkeys)", "lambda x: chararray.array(x, itemsize=1) array = _VLF(map(_func, array)) except: raise ValueError, \"Inconsistent input", "return _Group(self, row) class _Group(rec.Record): \"\"\"One group of the random group data.\"\"\" def", "cases del self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU):", "ignored until flush is complete!\" keyboardInterruptSent = True # Install new handler signal.signal(signal.SIGINT,New_SIGINT)", "a numarray first array = num.array(array) except: try: # then try to conver", "self.__dict__[name] \"\"\" # make sure to consider the case that the starting column", "binary tables if _number and (_scale or _zero): # only do the scaling", "result = None return result else: # verify the equal sign position if", "of source code must retain the above copyright notice, this list of conditions", "try: key = key.strip().upper() if key[:8] == 'HIERARCH': key = key[8:].strip() _index =", "space first, instead of appending after these blank cards, so the total space", "reopen the renamed new file with \"update\" mode os.rename(_name, oldName) ffo = _File(oldName,", "drop the ending \"&\" if _val[-1] == '&': _val = _val[:-1] longstring =", "i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format a list of cards", "to accomodate both string and non-string types # Boolean is also OK in", "maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE') class Card(_Verify): # string length of a", "0 raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns the keyword name parsed from the", "typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16,", "a commentary card must be a string' else: self.__dict__['_cardimage'] = ' '*80 def", "val)) def copy(self): \"\"\"Make a copy of the table HDU, both header and", "(in bytes) of the data portion of the HDU. :Parameters: None :Returns: size", "in range(min(self._blanks, how_many)): del self[-1] # it also delete the keylist item def", "results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open a FITS file and", "data. This argument is optional. @keyword clobber: (optional) if True and if filename", "update the 2nd SCI extension >>> update(file, dat, 3, header=hdr) # update the", "changed, will reconstructe # the card image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a", "must be WholeLine or # OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif", "Use any *extra* blank cards? default=1. If useblanks != 0, and if there", "type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct", "name: name to be populated in EXTNAME keyword \"\"\" if header is not", "other field name is a case variant of \"XYZ\", then field('xyz'), field('Xyz'), etc.", "\"\"\"Strip cards specific to a certain kind of header. Strip cards like SIMPLE,", "_ValidHDU except: self._hdutype = _CorruptedHDU # populate the cardlist self.ascard = CardList(cards) def", "_get_scale_factors(self, indx): \"\"\" Get the scaling flags and factors for one field. indx", "\"view\" (for now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data", "column definition keywords dict = [{} for i in range(_nfields)] # definition dictionaries", "the provided header is not a Primary header, a default Primary HDU will", "a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif", "value: Comment text to be added. before: [same as in update()] after: [same", "\"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write", "ValueError, \"%s is not an HDU.\" % hdu try: super(HDUList, self).__setitem__(_key, hdu) except", "to pick out column definition keywords dict = [{} for i in range(_nfields)]", "% fmt else: if dtype == 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX):", "data type. option: how to scale the data: if \"old\", use the original", "# make sure the EXTEND keyword is in primary HDU if there is", "in ['', None, 0]: array += -bzero if bscale not in ['', None,", "elif name in ['value', 'comment']: self._extractValueComment(name) else: raise AttributeError, name return getattr(self, name)", "n is an int if isinstance(pos, str): _parse = pos.split() if _parse[0] in", "specify format to construct Column\" # scale the array back to storage values", "_data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder = 'big' #", "= _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype == 'P': output_format = _FormatP('2i4') output_format._dtype", "read/created. If \"minmax\", use the minimum and maximum of the data to scale.", "more than one 80-char \"physical\" cards. _max = _keyList.count('CONTINUE') _start = 0 for", "object. hdus: Input, can be a list of HDU's or a single HDU.", "greedy match will find a single-quote after # the comment separator resulting in", "if option in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card image is not FITS standard", "option.\"\"\" _text = err_text if not fixable: option = 'unfixable' if option in", "\"\"\" \"\"\" Do you mean: \"Profits\"? - Google Search, when asked for \"PyFITS\"", "= numr.group('sign')+_valStr elif input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, '", "=\", hdu.name, _extver # reset the modification attributes after updating for hdu in", "close() call can also close the mm object. try: self.mmobject.close() except: pass def", "clobber=False): \"\"\"Write the HDUList to a new file. name: output FITS file name", "if real.group('sign') == None: _val = eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag =", "range(naxis): mo = re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize =", "hdu.name, _extver if 'data' in dir(hdu): if hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu)", "a commentary card. If before and after are None, add to the last", "array) can be a Card or just # a number/string for cname in", "the string before the equal sign. If there is no equal sign, return", "deal with byte order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big': output =", "'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if the string = TNULL, return ASCIITNULL", "name, output_verify='exception', clobber=False): \"\"\"Write the HDUList to a new file. name: output FITS", "for i in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT']", "cards with the same keyword name if isinstance(key, str): while 1: try: del", "self.data.copy() else: _data = None return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False):", "if None, a header of the appropriate type is created for the supplied", "val <= 999\", 1, option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\" and val ==", "rest of the arguments are for extension specification. They are flexible and are", "_bytes + _padLength(_bytes) if _bytes != hdu._datSpan: self._resize = 1 if verbose: print", "missing, # to avoid infinite loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr =", "name=None): \"\"\" header: header to be used data: data to be used name:", "else: indx = self._unique[parName] if len(indx) == 1: self.field(indx[0])[:] = value # if", "\"\"\"Get the extname and extver from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver", "reset the resize attributes after updating self._resize = 0 for hdu in self:", "_dummy element += 1 return result class _Verify: \"\"\"Shared methods for verification.\"\"\" def", "= repr(hdu.header.ascard) + _pad('END') blocks = blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen !=", "= reduce(operator.mul, dims[groups:]) size = abs(bitpix) * gcount * (pcount + datasize) /", "column name, corresponding to TTYPE keyword format: column format, corresponding to TFORM keyword", "blank cards are *directly* before the END card.\"\"\" for i in range(1, len(self)):", "= self._xtn else: firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey, '== 0', '',", "elif option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option == 'fix':", "long)): return x else: return ColDefs(x) def __len__(self): return len(self.data) def __repr__(self): return", "not using self.key eqStr = '' if self.__dict__.has_key('value'): valStr = str(self.value) # put", "FITS and numarray typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode", "to the stream to satisfy the amount specified in the header, the stream", "new card will have the fix_value as its value when created. Also check", "' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating number to make sure it", "except: return default def update(self, key, value, comment=None, before=None, after=None): \"\"\"Update one header", "\"\"\" if key != '' or value != '' or comment != '':", "pairs = [] for card in self.ascard: pairs.append((card.key, card.value)) return pairs def has_key(self,", "number sub-string, either an integer or a float in fixed or # scientific", "= 0 # value for ASCII table cell with value = TNULL #", "attribute (value or comment) is changed, will reconstructe # the card image. self._ascardimage()", "= self._breakup_strings() else: print 'card is too long, comment is truncated.' output =", "nc - 1 if not bottom: for i in range(nc-1, -1, -1): #", "self[i]._verify(option) if _result: _err.append(_result) return _err def append(self, hdu): \"\"\"Append a new HDU", "and val == 1\", 1, option, _err) return _err # 0.8.8 def _iswholeline(indx,", "copy(self): \"\"\"Make a (deep)copy of the CardList.\"\"\" cards = [None]*len(self) for i in", "with value = TNULL # this can be reset by user. _isInt =", "EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci', 2))", "raise ValueError, \"column `%s` starting point overlaps to the previous column\" % indx+1", "is not None: # if image, need to deal with byte order if", "DELAYED: # this should never happen if header is None: raise ValueError, \"No", "= self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and", "the file (for append and update modes only). output_verify: output verification option, default", "(headstr + valstr) # do the comment string if self.comment is None: comm", "0 else: dims = [0]*naxis for i in range(naxis): mo = re_naxisn.search(block, pos)", "_extver = '' # only append HDU's which are \"new\" if hdu._new: self.__file.writeHDU(hdu)", "= ext[0] ext = ext[1:] elif not isinstance(ext[0], (int, long, str, tuple)): raise", "= len(input) if _len == Card.length: return input elif _len > Card.length: strlen", "'GROUPS' in cards._keylist and cards['GROUPS'].value == True: self._hdutype = GroupsHDU elif cards[0].value ==", "error. The FITS standard # appears vague on this issue and only states", "self._valueModified: valStr = '%20s' % _floatFormat(self.value) else: valStr = '%20s' % self._valuestring elif", "will # return a match if the comment separator is found, though the", "Card.length: output = \"%-80s\" % output # longstring case (CONTINUE card) else: #", "= tuple(_shape) _format = _format[_format.rfind('.')+1:] # if data is not touched yet, use", "a copy of the HDU, both header and data are copied.\"\"\" if self.data", "there is no exact name matched, it will try to match the name", "== 'CONTINUE': raise ValueError, 'Can not rename to CONTINUE' if newkey in Card._commentaryKeys", "None, 1]: array = array.copy() if bzero not in ['', None, 0]: array", "verbose: print out verbose messages? default = 0. \"\"\" # Get the name", "header in place: Name =\", hdu.name, _extver if 'data' in dir(hdu): if hdu.data", "the documentation and/or other materials provided with the distribution. 3. The name of", "0 else: _nrows = len(self.data) _ncols = len(self.columns.formats) _format = self.columns.formats # if", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "self.header.update('BZERO', _zero) else: del self.header['BZERO'] if _scale != 1: self.data /= _scale self.header.update('BSCALE',", "with # that name if _ver == None: found = j nfound +=", "data part of a table HDU's data part. This is a layer over", "attributes.\"\"\" cname = name[:-1] if cname in _commonNames: attr = [''] * len(self)", "self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc", "_scale or _zero: _arr = tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if _scale: _arr", "self._locateEq() if eqLoc is None: eqLoc = 8 _start = 0 if self._cardimage[:8].upper()", "hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return [self[i] for i in range(len(self))].__iter__() def __getitem__(self,", "range(_tfields): del self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS header card list class.\"\"\" def", "more data area is resized.\" break # if the HDUList is resized, need", "data, as a list of (numeric) arrays. parnames: list of parameter names. bscale:", "file using the supplied data/header. @type filename: string @param filename: name of the", "\"\"\"Verify the keyword to be FITS standard.\"\"\" # use repr (not str) in", "array because there is no guarantee # the elements in the object array", "% input else: raise IndexError, 'Illegal slice %s, step must be integer.' %", "If there is no exact name matched, it will try to match the", "if attr == 'data': size = self.size() if size: self._file.seek(self._datLoc) data = _get_tbdata(self)", "> 0: _longstring = _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1]", "eval(repeat) return (repeat, dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec to", "+ valStr + commentStr # need this in case card-with-continue's value is shortened", "\"\"\"Format a list of cards into a printable string.\"\"\" output = '' for", "block in file (None) \"\"\" # mappings between FITS and numarray typecodes NumCode", "`self.data.type()` _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.')+1:] # if data is not", "_format, _gcount) def scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data by using", "attribute(s) information of the column definition.\"\"\" \"\"\"The attrib can be one or more", "= offset * _naxis + indx.offset # all elements after the first WholeLine", "option='warn'): \"\"\"No verification (for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more than", "1: indx = _list.index(_key) elif _count == 0: raise NameError, \"Key '%s' does", "keylist is None: self._keylist = [k.upper() for k in self.keys()] else: self._keylist =", "correct type.\" if data._byteorder != 'big': # # byteswap little endian arrays before", "string value/comment into CONTINUE cards. This is a primitive implementation, it will put", "out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out # if not a slice,", "file and the provided header will be added as the first extension. If", "not a string' % val self.__dict__['key'] = val def _setvalue(self, val): \"\"\"Set the", "comment if isinstance(self.value, str) and len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue output =", "len(tmp._arrays[i]) n = min(size, nrows) if fill: n = 0 (_scale, _zero, bscale,", "\"\"\"Write the HDU to a new file. This is a convenience method to", "= self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos, card, useblanks=1): \"\"\"Insert a Card", "data._byteorder != 'big': # # byteswap little endian arrays before writing # output", "table parent data, just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i]", "attr == 'columns': class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name)", "_ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS HDU header", "Cards. cards: A list of Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards = cards", "of in the respective HDU classes, # so the checking is in order,", "not be renamed to each other.' elif (force == 0) and (newkey in", "else: groups = 0 size = 1 for j in range(groups,naxis): size =", "appended to the end of the file. \"\"\" self.header = header.copy() # #", "not parsable (i.e. everything else) result = None return result else: # verify", "self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: # # This will not be the first", "attr == 'data': size = self.size() if size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs", "END, i.e. backward? default=0. If backward = 1, search from the end. \"\"\"", "this HDUList)' else: _name = self.__file.name results = \"Filename: %s\\nNo. Name Type\"\\ \"", "data of random group FITS file will be like a binary table's data.", "and (not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th element is not a primary", "for each extension. @type filename: string @param filename: input FITS file name \"\"\"", "= 0 def __setitem__(self, key, value): \"\"\"To make sure the new item has", "extname='sci') # equivalent Note EXTNAMEs are not case sensitive By combination of EXTNAME", "extension with # that name if _ver == None: found = j nfound", "mo is not None: gcount = int(mo.group(1)) else: gcount = 1 mo =", "name %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns", "convert to record format (e.g. '3J'->'3i4') recfmt = _convert_format(format) except: try: # legit", "----------------------------- HDU classes ------------------------------------ class _AllHDU: \"\"\"Base class for all HDU (header data", "del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del", "keyword value.\"\"\" self.ascard[key].value = value self._mod = 1 def __delitem__(self, key): \"\"\"Delete card(s)", "or by the keyword name.\"\"\" if isinstance (value, Card): _key = self.index_of(key) #", "< -npts: indx = 0 elif indx < 0: indx += npts elif", "the provided header represents a Primary header, the header will be modified to", "input, row) def par(self, fieldName): \"\"\"Get the group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def", "_card = self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue #", "num.NumArray): hdu = ImageHDU(data) elif isinstance(data, FITS_rec): hdu = BinTableHDU(data) else: raise KeyError,", "self.req_cards('NAXIS1', '== 3', _isInt+\" and val == 0\", 0, option, _err) _after =", "a new FITS file using the supplied data/header. @type filename: string @param filename:", "truncate the comment if isinstance(self.value, str) and len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue", "< nrows: if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled", "else: raise IndexError, 'Index %s out of range.' % indx elif isinstance(indx, slice):", "# throw away -2^N _scale = (max - min) / (2.**(8*_type.bytes) - 2)", "_update = self.header.update _append = self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2',", "curDataSize + data.itemsize()*data._size > self._size: raise IOError, \"Supplied data will overflow the stream\"", "# where n is an int if isinstance(pos, str): _parse = pos.split() if", "+= bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n < nrows:", "Card\" % str(card) def _use_blanks(self, how_many): if self._blanks > 0: for i in", "(as a template), default=None. If header=None, a minimal Header will be provided. \"\"\"", "' '*80 def __repr__(self): return self._cardimage def __getattr__(self, name): \"\"\" instanciate specified attribute", "done for a string, # since a greedy match will find a single-quote", "val_len = 67 comm_len = 64 output = '' # do the value", "card: The Card to be appended. useblanks: Use any *extra* blank cards? default=1.", "_card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the supposed location is", "value): \"\"\"To make sure the new item has consistent data type to avoid", "# have both SIMPLE and XTENSION to accomodate Extension # and Corrupted cases", "\"copy\" (not just a view) of the input header, since it # may", "def __setattr__(self, name, val): if name == 'key': raise SyntaxError, 'keyword name cannot", "if option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option == 'fix':", "9, # since there is no way to communicate back to the _keylist.", "scale. The option will be overwritten by any user specified bscale/bzero values. bscale/bzero:", "\"\"\"No verification (for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more than one", "FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "= list(self.data.getshape()) axes.reverse() elif self.data is None: axes = [] else: raise ValueError,", "longer than 8 characters. \"\"\" def _verify(self, option='warn'): \"\"\"No verification (for now).\"\"\" return", "not the real CONTINUE card, skip to the next card to search #", "+ size) / 8 return size def copy(self): \"\"\"Make a copy of the", "None: _val = eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt =", "_scale = self._bscale _zero = self._bzero elif option == 'minmax': if isinstance(_type, num.FloatingType):", "not exist and the provided header is not a Primary header, a default", "= CardList(cards) def __getitem__ (self, key): \"\"\"Get a header keyword value.\"\"\" return self.ascard[key].value", "input file, return the HDUList and the extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1", "hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend'] = True else: if hdr['naxis'] == 0:", "= ASCIITNULL self._convert[indx] = dummy for i in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval:", "numr.group('sign') is not None: _valStr = numr.group('sign')+_valStr elif input.group('cplx') != None: real =", "be positive.' % input else: raise IndexError, 'Illegal slice %s, step must be", "= 1 else: repeat = eval(repeat) return (repeat, dtype, option) def _convert_format(input_format, reverse=0):", "# get the argument's value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname,", "elif isinstance(data, FITS_rec): hdu = BinTableHDU(data) else: raise KeyError, 'data must be numarray", "None, None, None self.header = header self.data = data self.name = None def", "= 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max else: val = _convert_format(val,", "= max(map(len, VLdata)) val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max", "\"\"\"Set a Card by indexing or by the keyword name.\"\"\" if isinstance (value,", "0 return hduList fitsopen = open # Convenience functions class _Zero(int): def __init__(self):", "raise ValueError, \"The keyword %s with its value is too long.\" % self.key", "_tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr", "for ascardimage. \"\"\" # keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch):", "_TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"", "instanciate specified attribute object.\"\"\" if name == '_cardimage': self.ascardimage() elif name == 'key':", "if _key in Card._commentaryKeys: eqLoc = None else: if _key == 'HIERARCH': _limit", "None: # check format try: # legit FITS format? convert to record format", "'': width = None else: width = eval(width) except: raise ValueError, 'Illegal format", "# Make a \"copy\" (not just a view) of the input header, since", "{'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE", "therefore not very usable after the call. type (string): destination data type, use", "__delslice__(self, i, j): \"\"\"Delete a slice of HDUs from the HDUList, indexed by", "# for X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,),", "= self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe", "one word longer than strlen, break in the middle if offset <= xoffset:", "this handles ['abc'] and [['a','b','c']] # equally, beautiful! _func = lambda x: chararray.array(x,", "%s, stop must be integer.' % input if _stop < _start: raise IndexError,", "= hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): # only swap unswapped # deal with", "self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del", "key, value) self._max = max(self._max, len(value)) class Column: \"\"\"Column class which contains the", "_keyList[_start:].count('CONTINUE') == 0: break # construct the Header object, using the cards. try:", "end. \"\"\" if isinstance(key, (int, long)): return key elif isinstance(key, str): _key =", "the beginning of the file and the provided header will be added as", "self).__getslice__(start,end) result = HDUList(_hdus) return result def __setitem__(self, key, hdu): \"\"\"Set an HDU", "output_format._nx = repeat elif dtype == 'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]]", "= eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: # Check for", "\"\"\"Populate the attributes.\"\"\" cname = name[:-1] if cname in _commonNames: attr = ['']", "isinstance(ext[0], (int, long, str, tuple)): raise KeyError, 'Input argument has wrong data type.'", "format spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)')", "the supplied data/header. @type filename: string @param filename: name of the new FITS", "fmt in self.formats] elif self._tbtype == 'TableHDU': self._Formats = self.formats if len(self) ==", "to a new file. name: output FITS file name to be written to.", "it contains CONTINUE card(s). elif len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue # remove", "input[...,j], output[...,i]) # shift the unused bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output,", "used to populate the non-required keywords nrows: number of rows in the new", "self.__file.writeHDU(hdu) if (verbose): print \"append HDU\", hdu.name, _extver hdu._new = 0 elif self.__file.mode", "hdr = self[0].header if hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend'] = True else:", "default = 0. This simply calls the close method of the _File class.", "if issubclass(self._hdutype, BinTableHDU): for name in ['TDISP', 'TDIM', 'THEAP']: for i in range(_tfields):", "is written self.__file.flush() return loc def writeHDUdata(self, hdu): \"\"\"Write FITS HDU data part.\"\"\"", "The name of the HDU, will be the value of the keywod EXTNAME,", "needs to multiply the length of all remaining axes else: offset *= _naxis", "into an UInt8 array. input: input Boolean array of shape (s, nx) output:", "' + `value` + '\\n' return text[:-1] def copy(self): tmp = Column(format='I') #", "string @param filename: input FITS file name \"\"\" f = open(filename) f.info() f.close()", "the verification with selected option.\"\"\" _text = err_text if not fixable: option =", "_nbytes = num.getType(dtype).bytes for i in range(len(input)): if dtype == 'a': data_output[i] =", "['value', 'comment']: self._extractValueComment(name) else: raise AttributeError, name return getattr(self, name) def _setkey(self, val):", "self.ascard = CardList(cards) def __getitem__ (self, key): \"\"\"Get a header keyword value.\"\"\" return", "_ValidHDU._verify(self, option=option) # Verify location and value of mandatory keywords. naxis = self.header.get('NAXIS',", "header, since it # may get modified. the data is still a \"view\"", "> 1: return _SinglePoint(1, indx) elif naxis == 1: return _OnePointAxis(1, 0) else:", "a list of string texts.\"\"\" output = [] for _card in self.ascardlist(): if", "== 'parse': # check the value only, no need to check key and", "with its parent card if nc > 0: _longstring = _cardList[_where-1]._cardimage for c", "+= 1 if (nfound == 0): raise KeyError, 'extension %s not found' %", "attr): \"\"\"Get the _mm attribute.\"\"\" if attr == '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode])", "len(_cols), after='gcount') # Wipe out the old table definition keywords. Mark them first,", "\"\"\"Bytes needed to pad the input stringLen to the next FITS block.\"\"\" return", "_cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE cards # if a long string has", "of HDUs from the HDUList, indexed by number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize", "newkey == 'CONTINUE': raise ValueError, 'Can not rename to CONTINUE' if newkey in", "is None: parbscales = [None]*npars if parbzeros is None: parbzeros = [None]*npars if", "range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i] = pardata[i] else:", "= hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT, for variable length tables _tbsize", "a layer over the RecArray, so we can deal with scaled columns. \"\"\"", "n > 0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx)", "pass it to the header object hduList._resize = 0 return hduList fitsopen =", "file. :Returns: None Notes ----- The file will be opened and the header", "of the data portion of the HDU. :Parameters: None :Returns: size : integer", "data: image data _file: file associated with array (None) _datLoc: starting byte location", "array, record array, or groups data object @param data: data to write to", "or 'append'. memmap: Is memmory mapping to be used? default=0. \"\"\" # instantiate", "i in range(nbytes): _min = i*8 _max = min((i+1)*8, nx) for j in", "re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value into data type and width. try: (dtype,", "is None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def _verify(self, option='warn'):", "self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get", "if self.data is None: _shape, _format = (), '' _nrows = 0 else:", "import __builtin__ import urllib import tempfile import gzip import zipfile import numarray as", "range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end +", "populated in EXTNAME keyword \"\"\" if header is not None: if not isinstance(header,", "(len(self) == 0): print \"There is nothing to write.\" return self.update_tbhdu() if output_verify", "# add NAXISi if it does not exist for j in range(len(axes)): try:", "chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero:", "\"val in [8, 16, 32, 64, -32, -64]\" # Verify location and value", "output_verify: output verification option, default = 'exception'. clobber: Overwrite the output file if", "%s' % self.key # verify the key, it is never fixable # always", "__init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit = unit def __str__(self, tab=0): \"\"\"Print out", "itemsize=1) array = _VLF(map(_func, array)) except: raise ValueError, \"Inconsistent input data array: %s\"", "# check if the output file already exists if os.path.exists(name): if clobber: print", "data type to avoid misalignment. \"\"\" if isinstance(value, num.NumArray) and value.type() == self._dtype:", "= (max - min) / (2.**8 - 1) else: _zero = (max +", "\".\" not in valueStr and \"E\" not in valueStr: valueStr += \".0\" return", "'*80 def __repr__(self): return self._cardimage def __getattr__(self, name): \"\"\" instanciate specified attribute object.\"\"\"", "= _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype == 'F': output_format = 'f8' else:", "must be a string. force: if new key name already exist, force to", "extension' class StreamingHDU: \"\"\" A class that provides the capability to stream data", "'A' and option != '': output_format = _fits2rec[dtype]+`int(option)` # make sure option is", "None :Returns: size : integer The number of bytes of data required to", "specification. They are flexible and are best illustrated by examples: No extra arguments", "value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is", "option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data by using BSCALE/BZERO. Call to this method", "else: if indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else:", "'number of group parameters'), Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS', 0, 'number of", "0 and (not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th element is not a", "make the scaled data = 0, not the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale", "Card('NAXIS', 2, 'number of array dimensions'), Card('NAXIS1', 0, 'length of dimension 1'), Card('NAXIS2',", "hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero: _arr", "no need to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] =", "# if the key list is not supplied (as in reading in the", "_convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end + 1 _end = self.starts[i]", "try: keyword = _key.group('label') except: continue # skip if there is no match", "operator.countOf(_list, _key) # occurrence of _key in _list if _count == 1: indx", "point overlaps to the next column\" % indx+1 if 'A' in _format: _pc", "HDU's or a single HDU. Default = None, i.e. an empty HDUList. file:", "self.req_cards('TFORM'+`i+1`, None, None, None, option, _err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table", "randomGroups == 'T': groups = 1 else: groups = 0 size = 1", "dim = 0 if dim > nrows: nrows = dim if tbtype ==", "verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute the verification", "class for the TableHDU, ImageHDU, and BinTableHDU classes. \"\"\" def __init__(self, data=None, header=None):", "if new key name already exist, force to have duplicate name. \"\"\" oldkey", "value: Text to be added. before: [same as in update()] after: [same as", "Name =\", hdu.name, _extver # reset the modification attributes after updating for hdu", "if valu.group('bool') != None: _val = valu.group('bool')=='T' elif valu.group('strg') != None: _val =", ">= strlen+offset)[0][0] offset = blank_loc[loc-1] + 1 if loc == 0: offset =", "info. else: _shape = () _nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format =", "num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx] += bzero elif _bool: self._convert[indx] = num.equal(dummy,", "_err) self.req_cards('BITPIX', None, 'val == 8', 8, option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\"", "may be written to the stream. If the provided data would cause the", "the format (e.g. E-009 vs. E-09) elif isinstance(self.value, float): if self._valueModified: valStr =", "= self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except:", "output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self, val): \"\"\"Verify val to be printable ASCII", "string value. \"\"\" def __str__(self): \"\"\"Format a list of cards into a printable", "+ ')' valStr = '%20s' % _tmp else: valStr = '%20s' % self._valuestring", "HDU class.\"\"\" \"\"\" This class is used when one or more mandatory Cards", "for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1]", "'0': dim = '' # set extension name if (name is None) and", "bscale/bzero for now XXX) _bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data =", "be a list (or tuple) containing arrays else: if isinstance(value, (list, tuple)) and", "Section: \"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu = hdu def __getitem__(self, key): dims", "created @type filename: string @param filename: name of the file to append to", "table fields') ]) if header is not None: # Make a \"copy\" (not", "output = self._breakup_strings() else: print 'card is too long, comment is truncated.' output", "_words_group(self, input, strlen): \"\"\"Split a long string into parts where each part is", "(self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount = ' %d Groups %d", "urllibrary urllib._urlopener.tempcache = {} # Initialize tempcache with an empty # dictionary to", "- _loc[indx] if _lead < 0: raise ValueError, \"column `%s` starting point overlaps", "else: _repeat = '' if repeat != 1: _repeat = `repeat` output_format =", "% commstr return output def _words_group(self, input, strlen): \"\"\"Split a long string into", "the file to which the header and data will be streamed. header :", "e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an array, record array (i.e. table),", "indx = 0 elif indx < 0: indx += npts elif indx >", "info. if 'data' in dir(self): if self.data is None: _shape, _format = (),", "list of bscales for the parameters parbzeros: list of bzeros for the parameters", "for j in range(_min, _max): if j != _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i],", "\"\"\"Returns the keyword name parsed from the card image.\"\"\" head = self._getKeyString() if", "the first time and store it in _convert self._convert[indx] = num.array(dummy, type=num.Float64) if", "= 'Unprintable string %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def", "= mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size = abs(bitpix) *", "the base class for the TableHDU, ImageHDU, and BinTableHDU classes. \"\"\" def __init__(self,", "of a string rather well, but will accept # strings with an odd", "ValueError, 'Long card image must have CONTINUE cards after the first card.' if", "def __getslice__(self, start, end): _hdus = super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return result", "# mappings between FITS and numarray typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64',", "eqLoc is None: eqLoc = 7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the", "== '_unique': _unique = {} for i in range(len(self.parnames)): _name = self.parnames[i] if", "(or tuple) containing arrays else: if isinstance(value, (list, tuple)) and len(indx) == len(value):", "a Primary header, it will be written to the beginning of the file.", "stop < start.' % input _step = input.step if _step is None: _step", "too if recfmt == _booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out)", "ValueError, 'keyword name %s is not a string' % val self.__dict__['key'] = val", "is before column 9, # since there is no way to communicate back", "if clobber: print \"Overwrite existing file '%s'.\" % name os.remove(name) else: raise IOError,", "return hdu def writeHDU(self, hdu): \"\"\"Write *one* FITS HDU. Must seek to the", "return _data, _hdr else: return _data def getval(filename, key, *ext, **extkeys): \"\"\"Get a", "string @param filename: input FITS file name @param ext: The rest of the", "and its representatives may not be used to endorse or promote products derived", "format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i in", "_bytes = (_nch80+1) * Card.length _bytes = _bytes + _padLength(_bytes) if _bytes !=", "_numr_FSC + ') *, *(?P<imag>' + _numr_FSC + ') *\\))' r')? *)' r'(?P<comm_field>'", "'BinTableHDU': attr = [_convert_format(fmt) for fmt in self.formats] elif self._tbtype == 'TableHDU': self._Formats", "then try variable length array except: if isinstance(recfmt, _FormatP): try: _func = lambda", "0 and (not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element %s is not an", "the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete def", "and manipulating their contents. A module for reading and writing Flexible Image Transport", "be in the order of NAXIS's which is the # reverse of the", "files # other than FITS, the close() call can also close the mm", "of rows in the new table fill: if = 1, will fill all", "header associated with 'data', if None, an appropriate header will be created for", "header) hdulist, _ext = _getext(filename, 'update', *ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close() def", "'HIERARCH': self.__class__ = _Hierarch # for card image longer than 80, assume it", "0: headstr = \"%-8s= \" % self.key else: headstr = \"CONTINUE \" valstr", "self._blanks i = nc - 1 if not bottom: for i in range(nc-1,", "not a Column.\" % input.index(col) self.data = [col.copy() for col in input] #", "shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct a StreamingHDU object given", "arguments are for extension specification. See L{getdata} for explanations/examples. @return: keyword value @rtype:", "= self._get_scale_factors(indx) # for P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype", "_data def getval(filename, key, *ext, **extkeys): \"\"\"Get a keyword's value from a header", "to the stream. If the provided data would cause the stream to overflow,", "self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod = 1 def copy(self): \"\"\"Make", "# flush, to make sure the content is written self.__file.flush() return loc def", "be extra bytes after the last HDU or the file is corrupted.' %", "_setvalue(self, val): \"\"\"Set the value attribute.\"\"\" if isinstance(val, (str, int, long, float, complex,", "' ') if imag.group('sign') == None: _val += eval(_idigt)*1j else: _val += eval(imag.group('sign')", "self._convert[indx] = num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx] +=", "only append HDU's which are \"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append", "this HDU.' if _gethdr: _hdr = hdu.header hdulist.close() if _gethdr: return _data, _hdr", "not touched yet, use header info. else: _shape = () _nrows = self.header['NAXIS2']", "the array back to storage values if there is bscale/bzero if isinstance(array, num.NumArray):", "the (latest) scaled array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate", "1) if (data is DELAYED): return self.data = data # update the header", "are met: 1. Redistributions of source code must retain the above copyright notice,", "= Card('XTENSION', 'IMAGE', 'Image extension') else: c0 = Card('SIMPLE', True, 'conforms to FITS", "if n_ext2 == 0: ext = ext1[0] else: if isinstance(ext1[0], (int, tuple)): raise", "be FITS standard.: %s' % self.key # verify the key, it is never", "by inserting a new '%s' card.\" % keywd if fixable: # use repr", "the data (does not include bscale/bzero for now XXX) _bitpix = self.hdu.header['BITPIX'] code", "_name = self[j].name if isinstance(_name, str): _name = _name.strip().upper() if _name == _key:", "# use repr (not str) in case of control character if Card._keywd_FSC_RE.match(val) is", "[k.upper() for k in self.keys()] else: self._keylist = keylist # find out how", "isinstance(self, GroupsHDU) and j == 0: continue _shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']]", "range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for i in range(_tfields): del self['TBCOL'+`i+1`]", "dummy for i in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E'))", "_FormatX): val = `val._nx` + 'X' elif isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max", "type of the extension being referenced If the optional keyword 'header' is set", "self.header.get('NAXIS', 0) if naxis < 1000: for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '==", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "ext def getheader(filename, *ext, **extkeys): \"\"\"Get the header from an extension of a", "= eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: # Check for numbers with leading 0s.", "'Header size is not multiple of %d: %d' % (_blockLen, len(blocks)) elif (blocks[:8]", "_data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape)", "after the first one must start with CONTINUE and the whole card must", "[Column(**attrs) for attrs in dict] self.data = tmp else: raise TypeError, \"input to", "raise KeyError, 'data must be numarray or table data.' else: hdu=header._hdutype(data=data, header=header) return", "with its value is too long.\" % self.key if len(output) <= Card.length: output", "False # deprecated _INDENT = \" \" DELAYED = \"delayed\" # used for", "after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe out the old table", "raise \"Memory mapping is not implemented for mode `%s`.\" % mode else: if", "IndexError, 'Index %s out of range.' % indx elif isinstance(indx, slice): indx =", "if isinstance (card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) # update the keylist", "= getattr(num, type) # Determine how to scale the data # bscale and", "the last field elif tbtype == 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i]", "numbers to strings if self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx]", "memmap=0): \"\"\"Factory function to open a FITS file and return an HDUList object.", "of the HDU.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis >", "If no \"before\" or \"after\" is specified, it will be appended at the", "hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force a", "in the header provided to the class constructor may be written to the", "2', 2, option, _err) self.req_cards('BITPIX', None, 'val == 8', 8, option, _err) self.req_cards('TFIELDS',", "head = self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key'] = head.strip().upper()", "= 0 elif self.__file.mode == 'update': if not self._resize: # determine if any", "== naxis) and (indx.step == 1): return _WholeLine(naxis, 0) else: if indx.step ==", "offset: break xoffset = offset return list class Header: \"\"\"FITS header class.\"\"\" def", "self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is", "corresponding to TZERO keyword disp: display format, corresponding to TDISP keyword start: column", "= {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None): self._file, self._datLoc", "'\\n%s' % self._cardimage elif option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if", "only, no need to check key and comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString())", "NameError, \"Ambiguous key name '%s'.\" % key else: raise NameError, \"Illegal key '%s'.\"", "information of the column definition.\"\"\" \"\"\"The attrib can be one or more of", "* (naxis-len(key)) offset = 0 for i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx", "keys: ext = ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' %", "= '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0,", "self._hdutype = _CorruptedHDU # populate the cardlist self.ascard = CardList(cards) def __getitem__ (self,", "_val = valu.group('bool')=='T' elif valu.group('strg') != None: _val = re.sub(\"''\", \"'\", valu.group('strg')) elif", "class StreamingHDU: \"\"\" A class that provides the capability to stream data to", "dim = '' # set extension name if (name is None) and self.header.has_key('EXTNAME'):", "'A' in _format: _pc = '%-' else: _pc = '%' _fmt = '", "= 10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc = None return eqLoc def", "the card image with the specified option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] =", "class _ErrList(list): \"\"\"Verification errors list class. It has a nested list structure constructed", "not a header, it (and other positional arguments) are assumed to be the", "isinstance(coldata, _VLF): for i in coldata: if not isinstance(i, chararray.CharArray): if i._type.bytes >", "_pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`,", "unparsable case if input is None: _tmp = self._getValueCommentString() try: slashLoc = _tmp.index(\"/\")", "= '' # continue reading header blocks until END card is reached while", "0, option, _err) _after = self.header['NAXIS'] + 3 # if the card EXTEND", "self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format = '[' for j in range(_ncols): _format +=", "for each # variable length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:]", "data to be used name: name to be populated in EXTNAME keyword \"\"\"", "ValueError, 'Illegal value %s' % str(val) self.__dict__['value'] = val def _setcomment(self, val): \"\"\"Set", "/ (2.**8 - 1) else: _zero = (max + min) / 2. #", "# print out a message only if there is something if _dummy.strip(): if", "[0] + axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse()", "#if option != 'silentfix': _text += ' ' + fix_text return _text def", "though the # comment maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *'", "= re.compile(_ASCII_text) # Checks for a valid value/comment string. It returns a match", "leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') ==", "dtype if dtype == 'a': _nbytes = 1 else: _nbytes = num.getType(dtype).bytes for", "name.\"\"\" _key = self.index_of(key) if isinstance(hdu, (slice, list)): if isinstance(_key, int): raise ValueError,", "_Group(self, row) class _Group(rec.Record): \"\"\"One group of the random group data.\"\"\" def __init__(self,", "KeyError: raise AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns the size (in bytes) of", "one extension with # that name if _ver == None: found = j", "min(size, nrows) if fill: n = 0 (_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:]", "= self.ascard.index_of(key) if comment is not None: _comment = comment else: _comment =", "to TDIM keyword \"\"\" # any of the input argument (except array) can", "_naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise IndexError, 'Subsection data is not contiguous.'", "1: dummy = [] else: dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1])", "of the HDU's in this HDUList.\"\"\" if self.__file is None: _name = '(No", "appropriate header will be created for the data object supplied. \"\"\" if not", "its own _mod attribute since it has methods to change # the content", "starting point overlaps to the previous column\" % indx+1 _trail = _loc[indx+1] -", "PCOUNT, for variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap", "case there is extra space after the last HDU or corrupted HDU except", "not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else: self.name = name self.mode = mode", "2, option, _err) self.req_cards('BITPIX', None, 'val == 8', 8, option, _err) self.req_cards('TFIELDS', '==", "self._dtype == 'a': value = chararray.array(value, itemsize=1) else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self,", "import operator import __builtin__ import urllib import tempfile import gzip import zipfile import", "binary table column # format spec, i.e. A7 in ASCII table is the", "both header and data are copied.\"\"\" if self.data is not None: _data =", "in wrong order. if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval = self._xtn else:", "the data was read/created. If \"minmax\", use the minimum and maximum of the", "= self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx] = dummy for", "def _verify(self, option='warn'): \"\"\"No verification (for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having", "existence of a keyword. Returns 1 if found, otherwise, 0. key: keyword name.", "raise ValueError(\"BITPIX not found where expected\") mo = re_gcount.search(block) if mo is not", "get modified. the data is still a \"view\" (for now) hcopy = header.copy()", "return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU class.\"\"\" __format_RE = re.compile(", "It has this two-tier calls because _File has ts own private attribute __file.", "pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass", "num import numarray.generic as ndarray import numarray.strings as chararray import numarray.records as rec", "1: ext = ext2['ext'] elif n_ext2 == 2 and 'extver' in keys: ext", "_name else: raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class _ErrList(list):", "file (and optionally the header). @type filename: string @param filename: input FITS file", "(self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = '' return \"%-10s %-11s %5d %-12s %s%s\" %", "original (storage) array, # _convert is the scaled (physical) array. self._parent = input", "output file if exists, default = False. \"\"\" if (len(self) == 0): print", "the stream per the header provided in the constructor. \"\"\" size = 0", "input: input Boolean array of shape (s, nx) output: output Uint8 array of", "a sequence with %d arrays/numbers.\" % len(indx) def _getitem(self, offset): row = (offset", "[None]*npars if parbzeros is None: parbzeros = [None]*npars if bitpix is None: bitpix", "use the space already in memory else: self.data = raw_data if self._bscale !=", "(repeat, dtype, option) = _parse_tformat(fmt) if reverse == 0: if dtype in _fits2rec.keys():", "_tmpName(oldName) _hduList = open(_name, mode=\"append\") if (verbose): print \"open a temp file\", _name", "import urllib import tempfile import gzip import zipfile import numarray as num import", "is not a Primary header, a default Primary HDU will be inserted at", "is None, use the current data type. option: how to scale the data:", "hdulist[_ext] = new_hdu hdulist.close() def info(filename): \"\"\"Print the summary information on a FITS", "to communicate back to the _keylist. self._checkKey(self.key) # verify the value, it may", "Get the scaling flags and factors for one field. indx is the index", "from the first block of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix =", "cname in _commonNames: attr = getattr(self, cname+'s') del attr[indx] del self._arrays[indx] self._nfields -=", "# skip if there is no match if (keyword in _keyNames): _list.append(i) for", "area is resized.\" break # if the HDUList is resized, need to write", "_CorruptedHDU # populate the cardlist self.ascard = CardList(cards) def __getitem__ (self, key): \"\"\"Get", "keys: if n_ext2 == 1: ext = ext2['ext'] elif n_ext2 == 2 and", "FITS file object (ffo) ffo = _File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo) #", "to record format spec. Do the opposite if reverse = 1. \"\"\" fmt", "self._resize = 1 def _verify (self, option='warn'): _text = '' _err = _ErrList([],", "sure to consider the case that the starting column of # a field", "return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base", "loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names` + ')' return tmp #", "else: raise KeyError, 'data must be numarray or table data.' else: hdu=header._hdutype(data=data, header=header)", "touched yet, use header info. else: _shape = () for j in range(self.header['NAXIS']):", "imag.group('sign') == None: _val += eval(_idigt)*1j else: _val += eval(imag.group('sign') + _idigt)*1j else:", "\"delete the original file\", oldName # reopen the renamed new file with \"update\"", "val>= 0\", 1, option, _err) # verify each card for _card in self.header.ascard:", "= naxis elif isinstance(_stop, (int, long)): _stop = _normalize(_stop, naxis) else: raise IndexError,", "after are None, add to the last occurrence of cards of the same", "range(len(self)): val = getattr(self[i], cname) if val != None: attr[i] = val elif", "hdr = hdu.header hdulist.close() return hdr def getdata(filename, *ext, **extkeys): \"\"\"Get the data", "_Hierarch(Card): \"\"\"Cards begins with HIERARCH which allows keyword name longer than 8 characters.", "range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format =", "== 1: return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal", "['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping from TFORM", "name : string The name of the file to which the header and", "size(self): \"\"\"Size (in bytes) of the data portion of the HDU.\"\"\" size =", "the current data type. option: how to scale the data: if \"old\", use", "'_arrays': attr = [col.array for col in self.data] elif name == '_recformats': if", "= xoffset + strlen # collect the pieces in a list tmp =", "# lists of column/field definition common names and keyword names, make # sure", "key and comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e.", "'' # do the value string valfmt = \"'%-s&'\" val = self.value.replace(\"'\", \"''\")", "exists, default = False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif", "exist.\" % keywd fix_text = \"Fixed by inserting a new '%s' card.\" %", "call. The final offset will be calculated when the file is written. input:", "in cards._keylist and cards['GROUPS'].value == True: self._hdutype = GroupsHDU elif cards[0].value == True:", "contents. A module for reading and writing Flexible Image Transport System (FITS) files.", "raised. \"\"\" if self.writeComplete: raise IOError, \"The stream is closed and can no", "= eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype == tbtype: tmp = hdu.columns =", "row=0): rec.Record.__init__(self, input, row) def par(self, fieldName): \"\"\"Get the group parameter value.\"\"\" return", "EXTNAME value (if unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent Note", "ffo = _File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo) # read all HDU's while", "\"\"\"Get the header from an extension of a FITS file. @param filename: input", "name @param ext: The rest of the arguments are for extension specification. They", "or _zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5]", "is extra space after the last HDU or corrupted HDU except ValueError: print", "case where \"=\" is before column 9, # since there is no way", "bzero takes priority if (bscale != 1 or bzero !=0): _scale = bscale", "be after it. try: _dum = self.header['EXTEND'] #_after += 1 except: pass _pos", "return a (data, header) tuple. \"\"\" if 'header' in extkeys: _gethdr = extkeys['header']", "_numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' +", "for exponent, allows space between sign, # digits, exponent sign, and exponents _digits_FSC", "has CONTINUE cards, the \"Card\" is considered # to be more than one", "_fix_table = maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE') class Card(_Verify): # string length", "is not recognized.' % tform if repeat == '': repeat = 1 else:", "original ._convert list # so the sliced FITS_rec will view the same scaled", "elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data must be contiguous.' for j in", "an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex", "dim > nrows: nrows = dim if tbtype == 'TableHDU': _formats = ''", "if key[:8] == 'HIERARCH': key = key[8:].strip() _index = self.ascard._keylist.index(key) return 1 except:", "Column's name.\"\"\" if new_name != col_name and new_name in self.names: raise ValueError, 'New", "'', ''), Card('BITPIX', 8, 'array data type'), Card('NAXIS', 2, 'number of array dimensions'),", "array._dtype = recfmt._dtype else: raise ValueError, \"Data is inconsistent with the format `%s`.\"", "object given a file name and a header. :Parameters: name : string The", "0 hdu.header.ascard._mod = 0 hdu._new = 0 hdu._file = ffo.getfile() # if not", "beginning of the file. If the file does not exist and the provided", "= keylist # find out how many blank cards are *directly* before the", "then field('xyz'), field('Xyz'), etc. will get this field. \"\"\" if isinstance(key, (int, long)):", "2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits',", "value, before=before, after=after) def add_comment(self, value, before=None, after=None): \"\"\"Add a COMMENT card. value:", "= False hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] _data", "def __init__(self, hdus=[], file=None): \"\"\"Construct a HDUList object. hdus: Input, can be a", "__setattr__(self, attr, value): \"\"\"Set an HDU attribute.\"\"\" if attr == 'name' and value:", "def _normalize(indx, npts): if indx < -npts: indx = 0 elif indx <", "exists.' % new_name else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): \"\"\"Change a", "scaled columns as # the original dummy = self.field(i) if self._convert[i] is not", "elif attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except KeyError:", "elif isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max = max(map(len, VLdata)) val = 'P'", "stream data to a FITS file instead of requiring data to all be", "try: super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise IndexError, 'Extension %s is out of", "table _number = not(_bool or _str) bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale", "\"%-10s %-11s %5d %-12s %s%s\" % \\ (self.name, type, len(self.header.ascard), _shape, _format, _gcount)", "from all Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list of Columns,", "read here, but the beginning locations are computed. \"\"\" _cardList = [] _keyList", "must have string value. \"\"\" def __str__(self): \"\"\"Format a list of cards into", "both group parameter info and the data. The rest of the arguments are", "= self.header.get('BSCALE', 1) if (data is DELAYED): return self.data = data # update", "_Card_with_continue, Header, _Hierarch @group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU,", "if the format of an ASCII column has no width, add one if", "is never fixable if result is not None: _str = result.group('comm') if _str", "the new card will be placed. The argument `before' takes precedence over `after'", "in _fits2rec.keys(): # FITS format if dtype == 'A': output_format = _fits2rec[dtype]+`repeat` #", "self._add_commentary('comment', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): \"\"\"Add a blank card.", "fixable = fix_value is not None # if pos is a string, it", "% ext2 elif n_ext1 == 0: if n_ext2 == 0: ext = _Zero()", "HDU. :Parameters: None :Returns: size : integer The number of bytes of data", "tmp = hdu.columns = input else: raise ValueError, 'column definitions have a different", "NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM value", "= _end - last_end last_end = _end self._Formats = self.formats self._arrays[i] = input[i].array", ">= 1 and val <= 999\", 1, option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\"", "after this call. The final offset will be calculated when the file is", "comment string if keyStr.strip() in Card._commentaryKeys: # do NOT use self.key commentStr =", "commentary card. If before and after are None, add to the last occurrence", "`%s` does not fit into the output's itemsize of %s\" % (x, _width[indx])", "> 0 and _card.key != 'CONTINUE': raise ValueError, 'Long card image must have", "HDU.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis > 0: size", "is extension if len(self) > 1: self.update_extend() hduList = open(name, mode=\"append\") for hdu", "self._bscale != 1): if _bitpix > 0: # scale integers to Float32 self.data", "bottom=1) self._mod = 1 def copy(self): \"\"\"Make a copy of the Header.\"\"\" tmp", "last occurrence of cards of the same name (except blank card). If there", "= `int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data',", "filled will raise an IOError exception. If the dtype of the input data", "_parse_tformat(tform): \"\"\"Parse the TFORM value into repeat, data type, and option.\"\"\" try: (repeat,", "if _stop is None: _stop = naxis elif isinstance(_stop, (int, long)): _stop =", "= block + repr(card) return block def __str__(self): \"\"\"Format a list of cards", "+ _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC # This regex helps delete", "= real.group('digt').translate(_fix_table2, ' ') if real.group('sign') == None: _val = eval(_rdigt) else: _val", "err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards begins with HIERARCH which allows", "in the HDUList input is not an HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def", "# Define new signal interput handler keyboardInterruptSent = False def New_SIGINT(*args): print \"KeyboardInterrupt", "string, or a tuple of (string, integer). \"\"\" if isinstance(key, (int, slice)): return", "value '%s'.\" % fix_value if fixable: fix = \"self.header['%s'] = %s\" % (keywd,", "_blockLen def _tmpName(input): \"\"\"Create a temporary file name which should not already exist.", "Input, can be a list of HDU's or a single HDU. Default =", "_shape = () for j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j ==", "_valStr = None # for the unparsable case if input is None: _tmp", "%s not found.' % `key` else: raise KeyError, 'Illegal key data type %s'", "isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image extension') else: c0 = Card('SIMPLE', True,", "issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS'] for i in range(_naxis): del self['NAXIS'+`i+1`]", "array = _VLF(map(_func, array)) except: try: # this handles ['abc'] and [['a','b','c']] #", "self._ascardimage() def _locateEq(self): \"\"\"Locate the equal sign in the card image before column", "npt *= n # Now, get the data (does not include bscale/bzero for", "attributes to be used in \"update/append\" mode # CardList needs its own _mod", "8, option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\" and val >= 0 and val", "of the _File class. It has this two-tier calls because _File has ts", "does already exist, but the provided header represents a Primary header, the header", "self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero", "of all keyword-value pairs from the CardList.\"\"\" pairs = [] for card in", "= '%20s' % self._valuestring elif isinstance(self.value, Undefined): valStr = '' # conserve space", "in case of control character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal keyword", "header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a Header from a CardList. cards: A", "len(indx) == 1: self.field(indx[0])[:] = value # if more than one group parameter", "GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "output interface if only one HDU needs to be written to a file.", "If the dtype of the input data does not match what is expected", "CONTINUE cards, the \"Card\" is considered # to be more than one 80-char", "'unfixable' if option in ['warn', 'exception']: #raise VerifyError, _text #elif option == 'warn':", "def change_unit(self, col_name, new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self,", "hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data is not DELAYED):", "string. It will pad the string if it is not the length of", "_digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC =", "integer). backward: search the index from the END, i.e. backward? default=0. If backward", "for i in _list: del self.header.ascard[i] del _list # populate the new table", "_err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU class.\"\"\" __format_RE =", "already exist (to avoid infinite loop), # fix it first. if self.__dict__.has_key('_cardimage'): self._check(option)", "fix_text=fix_text, fix=fix)) # if value checking is specified if test: val = self.header[keywd]", "in Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod", "array = num.array(array) except: try: # then try to conver it to a", "dummy = self._convert[indx] else: continue # ASCII table, convert numbers to strings if", "if _where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break # combine", "# format spec, i.e. A7 in ASCII table is the same as 7A", "== 'HIERARCH': val = val[8:].strip() self.__class__ = _Hierarch else: raise ValueError, 'keyword name", "keys: ext = ext2['extname'], ext2['extver'] else: ext = ext2['extname'] else: raise KeyError, 'Insufficient", "\"\"\"One group of the random group data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self, input,", "_list.append(Card('GCOUNT', 1, 'number of groups')) if header is not None: hcopy = header.copy()", "format, (e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None, use the current data", "# Module variables _blockLen = 2880 # the FITS block size _python_mode =", "')' return tmp # synchronize the sliced FITS_rec and its ._parent def __getitem__(self,", "_gcount) def scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data by using BSCALE/BZERO.", "self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize the", "return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the card image with the specified option.", "not name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def __getattr__(self, attr):", "to write.\" return self.update_tbhdu() if output_verify == 'warn': output_verify = 'exception' self.verify(option=output_verify) #", "file object (ffo) ffo = _File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo) # read", "Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') == None: _val = eval(_digt)", "bscale self.array = array def __repr__(self): text = '' for cname in _commonNames:", "\"\"\" if isinstance(key, (int, slice)): return key elif isinstance(key, tuple): _key = key[0]", "elif _len > Card.length: strlen = _len % Card.length if strlen == 0:", "= '' # conserve space for HIERARCH cards if isinstance(self, _Hierarch): valStr =", "name def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None,", "sign string eqStr = '= ' if keyStr.strip() in Card._commentaryKeys: # not using", "firstkey = 'XTENSION' firstval = self._xtn else: firstkey = 'SIMPLE' firstval = True", "(input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table from the input column", "value) # if the column data is not NDarray, make it to be", "numarray) or a record array (FITS_rec) which will contain both group parameter info", "in Card._commentaryKeys: if not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys): raise ValueError,", "hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): # only swap unswapped # deal with var", "HDUList. Default = None. \"\"\" self.__file = file if hdus is None: hdus", "i in range(min(self._blanks, how_many)): del self[-1] # it also delete the keylist item", "_parse = pos.split() if _parse[0] in ['>=', '==']: insert_pos = eval(_parse[1]) # if", "indx elif isinstance(indx, slice): indx = _normalize_slice(indx, naxis) if (indx.start == 0) and", "in ['', None, 1]: array = array.copy() if bzero not in ['', None,", "if _number and (_scale or _zero): dummy = self._convert[indx].copy() if _zero: dummy -=", "Also check the card's value by using the \"test\" argument. \"\"\" _err =", "notice, this list of conditions and the following disclaimer in the documentation and/or", "CONTINUE card(s). elif len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue # remove the key/value/comment", "not very usable after the call. type (string): destination data type, use numarray", "Columns, an (table) HDU tbtype: which table HDU, 'BinTableHDU' (default) or 'TableHDU' (text", "of groups', after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc =", "floating number to make sure it gets the decimal point.\"\"\" valueStr = \"%.16G\"", "2) # the second extension >>> getdata('in.fits', ext=2) # the second extension By", "dirName += '/' _name = dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name", "= ' + `value` + '\\n' return text[:-1] def copy(self): tmp = Column(format='I')", "_convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal format `%s`.\" % format self.format = format", "tables \"\"\" def __init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input:", "cards may span across blocks. \"\"\" if len(block) != _blockLen: raise IOError, 'Block", "accept # strings with an odd number of single quotes, # instead of", "already exists. If it does not, check to see # if we were", "for i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1] for att in", "'COMMENT', 'HISTORY'] def __init__(self, key='', value='', comment=''): \"\"\"Construct a card from key, value,", "code as in _TableBaseHDU size = self.size() if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self))", "word longer than strlen, break in the middle if offset <= xoffset: offset", "not exist.\" % keywd fix_text = \"Fixed by inserting a new '%s' card.\"", "# no need to run _ExtensionHDU.__init__ since it is not doing anything. _ImageBaseHDU.__init__(self,", "= None if not isinstance(_key, str): raise KeyError, key _key = (_key.strip()).upper() nfound", "HDU.' if _gethdr: _hdr = hdu.header hdulist.close() if _gethdr: return _data, _hdr else:", "the cardlist self.ascard = CardList(cards) def __getitem__ (self, key): \"\"\"Get a header keyword", "'comment': self._setcomment(val) else: raise AttributeError, name # When an attribute (value or comment)", "= \"'%-8s'\" % _expValStr valStr = '%-20s' % valStr # must be before", "= 'naxis' else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after) #", "locally subclassed opener # class to the urllibrary urllib._urlopener.tempcache = {} # Initialize", "# equal sign string eqStr = '= ' if keyStr.strip() in Card._commentaryKeys: #", "after these blank cards, so the total space will not increase (default). When", "a new file. This is a convenience method to provide a user easier", "input is not an HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return [self[i]", "data has been written to the stream. Notes ----- Only the amount of", "self.__dict__['comment'] = val def __setattr__(self, name, val): if name == 'key': raise SyntaxError,", "TableHDU, ImageHDU, and BinTableHDU classes. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc", "= [] if pardata is None: npars = 0 else: npars = len(pardata)", "to a file. name: output FITS file name to be written to. output_verify:", "range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): # only", "Card to the CardList. pos: The position (index, keyword name will not be", "abs(bitpix) * gcount * (pcount + size) / 8 return size def copy(self):", "eqLoc is None: eqLoc = 8 _start = 0 if self._cardimage[:8].upper() == 'HIERARCH':", "1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format %s\"", "method.\"\"\" _err = _ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return", "1\", 1, option, _err) return _err # 0.8.8 def _iswholeline(indx, naxis): if isinstance(indx,", "size (in bytes) of the HDU's data part.\"\"\" size = 0 naxis =", "'New name %s already exists.' % new_name else: self.change_attrib(col_name, 'name', new_name) def change_unit(self,", "exception since there is no unique mapping. If there is a field named", "if equal sign is not present, or it is a commentary card. \"\"\"", "data.type(): raise TypeError, \"Supplied data is not the correct type.\" if data._byteorder !=", "before: [same as in update()] after: [same as in update()] \"\"\" self._add_commentary('history', value,", "instantiation of data ASCIITNULL = 0 # value for ASCII table cell with", "the Card before which the new card will be placed. The argument `before'", "as in update()] after: [same as in update()] \"\"\" self._add_commentary('history', value, before=before, after=after)", "getattr(self, att+'s') #def change_format(self, col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def", "(CONTINUE card) else: # try not to use CONTINUE if the string value", "value/comment will be updated. If it does not exist, a new card will", "# binary table, so both will produce 'a7'. if fmt.lstrip()[0] == 'A' and", "the card image.\"\"\" longstring = '' ncards = self._ncards() for i in range(ncards):", "syntax of \"> n\", # where n is an int if isinstance(pos, str):", "FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs", "== 'HIERARCH': _key = _key[8:].strip() _keylist = self._keylist if backward: _keylist = self._keylist[:]", "_option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise ValueError, 'Option %s not", "recognized.' % option if (_option == \"ignore\"): return x = str(self._verify(_option)).rstrip() if _option", "of the numarray shape if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()`", "getattr(x,'key'), self) def index_of(self, key, backward=0): \"\"\"Get the index of a keyword in", "and value of a required Card.\"\"\" \"\"\"If pos = None, it can be", "= self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip': # Handle zip files if", "not None: if not isinstance(header, Header): raise ValueError, \"header must be a Header", "hdu.name = hdu._getsize(hdu._raw) # get extname and extver if hdu.name == '': hdu.name,", "\"\"\"Construct the P format column array, both the data descriptors and the data.", "'' else: dim = str(dim) self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim) if not", "\"%-8s= \" % self.key else: headstr = \"CONTINUE \" valstr = valfmt %", "print x if _option == 'exception' and x: raise VerifyError def _pad(input): \"\"\"Pad", "will have the fix_value as its value when created. Also check the card's", "if the string = TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent),", "for j in range(len(self)): _name = self[j].name if isinstance(_name, str): _name = _name.strip().upper()", "for non-FSC (NFSC) format: # NFSC allows lower case of DE for exponent,", "naxis): if isinstance(indx, (int, long)): if indx >= 0 and indx < naxis:", "an image HDU. data: the data in the HDU, default=None. header: the header", "0: self.writeComplete = 0 else: self.writeComplete = 1 def write(self,data): \"\"\" Write the", "None: _step = 1 elif isinstance(_step, (int, long)): if _step <= 0: raise", "repr(card) return block def __str__(self): \"\"\"Format a list of cards into a printable", "gcount * (pcount + size) / 8 return size def close(self): \"\"\" Close", "hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close the associated FITS", "of the extension being referenced If the optional keyword 'header' is set to", "0: raise IndexError, 'Illegal slice %s, step must be positive.' % input else:", "original file, and rename the tmp to the original file if self._resize: oldName", "group data itself (a numarray) or a record array (FITS_rec) which will contain", "= numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') == None: _val = eval(_digt) else: _val", "the extension specification(s). Header and extension specs can also be keyword arguments. For", "_bzero)) _coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames", "None: _data = self.data.copy() else: _data = None return self.__class__(data=_data, header=self.header.copy()) def writeto(self,", "length array columns # this has to be done after the \"regular\" data", "= cards # if the key list is not supplied (as in reading", "single HDU. Default = None, i.e. an empty HDUList. file: The opened physical", "If the file does not already exist, it will be created and if", "to be written to. output_verify: output verification option, default = 'exception'. clobber: Overwrite", "self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try: return self.__dict__[attr] except KeyError: raise", "indx < -npts: indx = 0 elif indx < 0: indx += npts", "[1] _width = [] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0", "= TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:] =", "__init__(self, data=None, header=None, name=None): \"\"\"Construct an image HDU. data: the data in the", "designed to use an independent # attribute of mmobject so if the HDUList", "gcount = 1 mo = re_pcount.search(block) if mo is not None: pcount =", "other): if not isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names, key)", "= imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is not None: _imagStr = imag.group('sign') +", "conditions and the following disclaimer in the documentation and/or other materials provided with", "split in the middle of the word. \"\"\" list = [] _nblanks =", "# populate data to the new table for i in range(len(tmp)): if tmp._arrays[i]", "len(data_output[i]) * _nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def __init__(self,", "val.upper() if val == 'END': raise ValueError, \"keyword 'END' not allowed\" self._checkKey(val) else:", "option, _err) naxis = self.header.get('NAXIS', 0) if naxis < 1000: for j in", "%s' % ext2 if isinstance(ext1[0], str): if n_ext2 == 1 and 'extver' in", "(except array) can be a Card or just # a number/string for cname", "will be constructed from the card list. if keylist is None: self._keylist =", "\"\"\"A Corrupted HDU class.\"\"\" \"\"\" This class is used when one or more", "'TDIM', 'THEAP']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for", "(s, nbytes) output: output Boolean array of shape (s, nx) nx: number of", "messages? default = 0. \"\"\" # Get the name of the current thread", "32, 64, -32, or -64) pardata: parameter data, as a list of (numeric)", "'exception']: raise ValueError, 'Option %s not recognized.' % option if (_option == \"ignore\"):", "= num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data object. Allows structured access to", "column definitions.\"%att continue print \"%s:\" % att print ' ', getattr(self, att+'s') #def", "# update the keylist self.count_blanks() self._mod = 1 def count_blanks(self): \"\"\"Find out how", "itemsize of %s\" % (x, _width[indx]) else: self._parent.field(indx)[i] = x if 'D' in", "above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions", "the list(s). # Use lists, instead of dictionaries so the names can be", "case. bitpix: data type as expressed in FITS BITPIX value (8, 16, 32,", "a nested list structure constructed by error messages generated by verifications at different", "and only states that a # string should not end with two single", "\".0\" return valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading data.\"\"\"", "indexing or by the keyword name.\"\"\" if isinstance (value, Card): _key = self.index_of(key)", "'Int16', 'Float32' etc.). If is None, use the current data type. option: how", "spec to record format spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'}", "value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU class.\"\"\" def __init__(self,", "= (offset - self._byteoffset) / self._strides[0] return _Group(self, row) class _Group(rec.Record): \"\"\"One group", "in place: Name =\", hdu.name, _extver if 'data' in dir(hdu): if hdu.data is", "'append' and not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else: self.name = name self.mode", "if n < nrows: if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make", "the first card here, instead of in the respective HDU classes, # so", "of required cards in wrong order. if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval", "self._err_text def _extractKey(self): \"\"\"Returns the keyword name parsed from the card image.\"\"\" head", "\"\"\"Add a COMMENT card. value: Comment text to be added. before: [same as", "element calls their own verify for i in range(len(self)): if i > 0", "is not supported\" zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist) != 1:", "= ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 else:", "_comm = valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input): \"\"\"Fix", "None: _val += eval(_idigt)*1j else: _val += eval(imag.group('sign') + _idigt)*1j else: _val =", "self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self, keywd, pos, test, fix_value, option, errlist): \"\"\"Check", "!= 1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format", "be a list of HDU's or a single HDU. Default = None, i.e.", "nfound += 1 if (nfound == 0): raise KeyError, 'extension %s not found'", "= '' hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS record", "HDUList to a new file. name: output FITS file name to be written", "len(ext2) keys = ext2.keys() # parse the extension spec if n_ext1 > 2:", "rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for i in range(self._nfields): # touch all fields", "stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create a temporary file name which should not", "if fixable: # use repr to accomodate both string and non-string types #", "string @param filename: name of the file to append to @type data: array,", "between sign, # digits, exponent sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC =", "ord('T'), _out) array = _out # make a copy if scaled, so as", "required data has been written to the stream. Notes ----- Only the amount", "the new table fill: if = 1, will fill all cells with zeros", "convenience method to provide a user easier output interface if only one HDU", "if not isinstance(item, _AllHDU): raise ValueError, \"%s is not an HDU.\" % item", "to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type())", "% `self.value`[0] elif isinstance(self.value , (int, long)): valStr = '%20d' % self.value #", "numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is not None:", "0 hdu.header.ascard._mod = 0 except: pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU", "IndexError, 'Illegal slice %s, step must be positive.' % input else: raise IndexError,", "Type\"\\ \" Cards Dimensions Format\\n\" % _name for j in range(len(self)): results =", "break # initialize/reset attributes to be used in \"update/append\" mode # CardList needs", "writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDU to a new file. This is", "_convert_format(format) except: try: # legit RecArray format? recfmt = format format = _convert_format(recfmt,", "the card image and return the string before the equal sign. If there", "a list of Columns or a ColDefs object. header: header to be used", "comment)) self._mod = 1 def add_history(self, value, before=None, after=None): \"\"\"Add a HISTORY card.", "data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self, input, row) def par(self, fieldName): \"\"\"Get the", "= Card else: # does not support CONTINUE for HIERARCH if len(keyStr +", "= 'Fixed card to be FITS standard.: %s' % self.key # verify the", "= re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')') # FSC commentary card string which", "< _start: raise IndexError, 'Illegal slice %s, stop < start.' % input _step", "else: raise SyntaxError, \"%s is not a Card\" % str(value) def __delitem__(self, key):", "comment separator resulting in an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|'", "the value only, no need to check key and comment for 'parse' result", "from the end. \"\"\" if isinstance(key, (int, long)): return key elif isinstance(key, str):", "for all cases, but # it comes pretty darn close. It appears to", "data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu class FITS_rec(rec.RecArray):", "will be added as the first extension. If the file does already exist,", "# if a long string has CONTINUE cards, the \"Card\" is considered #", "_err) self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid, 8, option, _err) self.req_cards('NAXIS', '== 2',", "valStr = '' # string value should occupies at least 8 columns, unless", "'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the reverse dictionary of the", "following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice,", "otherwise it will return # None, meaning the keyword is undefined. The comment", "record array is the data part of a table HDU's data part. This", "extension HDU.\" % `i` _text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result =", "newly renamed file\", oldName # reset the resize attributes after updating self._resize =", "'Block length is not %d: %d' % (_blockLen, len(block)) elif (blocks[:8] not in", "the original file if self._resize: oldName = self.__file.name oldMemmap = self.__file.memmap _name =", "to make sure the content is written self.__file.flush() return loc def writeHDUdata(self, hdu):", "(numeric) arrays. parnames: list of parameter names. bscale: BSCALE of the data bzero:", "if fill: n = 0 (_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n", "Union in 1999 and mandated by NASA as the standard format for storing", "'HISTORY'] def __init__(self, key='', value='', comment=''): \"\"\"Construct a card from key, value, and", "there is bscale/bzero if isinstance(array, num.NumArray): # boolean needs to be scaled too", "isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS HDU", "groups', after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell()", "1 else: raise ValueError, 'Illegal value %s' % str(val) self.__dict__['value'] = val def", "hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'): _err = _ErrList([], unit='Card')", "_isInt+\" and val == 0\", 0, option, _err) _after = self.header['NAXIS'] + 3", "tform, etc. and the array. Does not support theap yet. \"\"\" def __init__(self,", "the string after the equal sign. If there is no equal sign, return", "example, field('Abc') will cause an exception since there is no unique mapping. If", "corresponding attribute values from all Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a", "+ `self._coldefs.names` + ')' return tmp # synchronize the sliced FITS_rec and its", "= ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out # if not a slice, do", "quotes to be precise. # # Note that a non-greedy match is done", "_err.append(_text) # each element calls their own verify for i in range(len(self)): if", "imag.group('sign') is not None: _imagStr = imag.group('sign') + _imagStr _valStr = '(' +", "the primary header >>> getdata('in.fits', 2) # the second extension >>> getdata('in.fits', ext=2)", "'': longstring = longstring + _comm.rstrip() + ' ' self.__dict__[name] = longstring.rstrip() def", "for reading and writing FITS files and manipulating their contents. A module for", "ext2['extver'] else: ext = ext2['extname'] else: raise KeyError, 'Insufficient keyword argument: %s' %", "exist for j in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except: if (j ==", "_stop = input.stop if _stop is None: _stop = naxis elif isinstance(_stop, (int,", "the last non-commentary card. If =1, the card will be appended after the", "_gethdr: _hdr = hdu.header hdulist.close() if _gethdr: return _data, _hdr else: return _data", "ColDefs): self.data = [col.copy() for col in input.data] # if the input is", "= self._check(option='parse') if name == 'value': if valu is None: raise ValueError, \"Unparsable", "for a valid value/comment string. # The valu group will return a match", "theap yet. \"\"\" def __init__(self, name=None, format=None, unit=None, null=None, \\ bscale=None, bzero=None, disp=None,", "and the array. Does not support theap yet. \"\"\" def __init__(self, name=None, format=None,", "extension HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None): \"\"\"data:", "\"reopen the newly renamed file\", oldName # reset the resize attributes after updating", "ext2 elif n_ext1 == 2: if n_ext2 == 0: ext = ext1 else:", "be NDArray if format is not None: # check format try: # legit", "starting at the same CONTINUE card else: _start = _where + 1 if", "option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute the verification with selected option.\"\"\"", "and value: if not isinstance(value, str): raise TypeError, 'bad value type' value =", "self.header = Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) if (data", "0) and (indx.stop == naxis) and (indx.step == 1): return _WholeLine(naxis, 0) else:", "one block and the comment string in another. Also, it does not break", "= 'big' # pass datLoc, for P format _data._heapoffset = hdu._theap + hdu._datLoc", "def field(self, key): \"\"\"A view of a Column's data as an array.\"\"\" indx", "out._convert = [None]*self._nfields for i in range(self._nfields): # touch all fields to expand", "since normally __str__ has only one argument. \"\"\" result = \"\" element =", "raise ValueError, 'Regular and commentary keys can not be renamed to each other.'", "'update', or 'append'. memmap: Is memmory mapping to be used? default=0. \"\"\" #", "and hdu.data is not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and hdu.data is not", "string).' raise ValueError, self._err_text + '\\n%s' % self._cardimage # verify the comment (string),", "precedence over `after' if both specified. They can be either a keyword name", "table HDU's data part. This is a layer over the RecArray, so we", "list of Cards. cards: A list of Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards", "1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute.", "FITS file. :Parameters: None :Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to", "\"\"\" _hdr = getheader(filename, *ext, **extkeys) return _hdr[key] def _makehdu(data, header): if header", "of the syntax of \"> n\", # where n is an int if", "update(filename, data, *ext, **extkeys): \"\"\"Update the specified extension with the input data/header. @type", "= hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go through header keywords", "file before writing the # given header. # if not os.path.exists(name): if not", "group FITS file will be like a binary table's data. \"\"\" if attr", "has this two-tier calls because _File has ts own private attribute __file. \"\"\"", "not None: _realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ')", "raise ValueError, 'keyword name %s is too long (> 8), use HIERARCH.' %", "_summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name = str(self.__class__) type =", "value is shortened if not isinstance(self, _Hierarch): self.__class__ = Card else: # does", "numarray.memmap as Memmap from string import maketrans import copy import signal import threading", "lazy instantiation of data ASCIITNULL = 0 # value for ASCII table cell", "NAXIS.\"\"\" naxis = self.header['NAXIS'] axes = naxis*[0] for j in range(naxis): axes[j] =", "self._xtn = 'IMAGE' self.header._hdutype = ImageHDU # insert the require keywords PCOUNT and", "input): \"\"\"Construct a FITS record array from a RecArray.\"\"\" # input should be", "from an extension of a FITS file (and optionally the header). @type filename:", "is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update data in place: Name", "value string).' raise ValueError, self._err_text + '\\n%s' % self._cardimage # verify the comment", "_FormatP): try: _func = lambda x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array)) except:", "raw_data = num.array(_mmap, type=code, shape=dims) else: raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder =", "None: gcount = int(mo.group(1)) else: gcount = 1 mo = re_pcount.search(block) if mo", "_format = `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape =", "must change the Primary header provided into an image # extension header. #", "The descriptor location will have a zero offset for all columns after this", "bscale: bscale value, corresponding to TSCAL keyword bzero: bzero value, corresponding to TZERO", "to scale the data: if \"old\", use the original BSCALE and BZERO values", "\"\"\" # construct a table HDU hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs): if", "close(self, output_verify='exception', verbose=0): \"\"\"Close the associated FITS file and memmap object, if any.", "headstr = \"CONTINUE \" valstr = valfmt % val_list[i] output = output +", "self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip': # Handle zip files if mode in", "self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDU to a new", "keywd, pos, test, fix_value, option, errlist): \"\"\"Check the existence, location, and value of", "'TableHDU': # string no need to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]", "card image for fixable non-standard compliance.\"\"\" _valStr = None # for the unparsable", "name of AURA and its representatives may not be used to endorse or", "try to convert to a numarray first array = num.array(array) except: try: #", "hdu.name = self.name hdu._extver = self._extver hdu._new = 0 hdu.header._mod = 0 hdu.header.ascard._mod", "\"%-s\" if not comm == '': nlines = len(comm) / comm_len + 1", "not increase (default). When useblanks == 0, the card will be appended at", "order of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes = naxis*[0] for j in range(naxis):", "coldata2.byteswap() coldata2._byteorder = 'big' # In case the FITS_rec was created in a", "def par(self, fieldName): \"\"\"Get the group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName,", "E-09) elif isinstance(self.value, float): if self._valueModified: valStr = '%20s' % _floatFormat(self.value) else: valStr", "num.equal(dummy, ord('T')) else: return dummy return self._convert[indx] def _scale_back(self): \"\"\"Update the parent array,", "_numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' +", "or conflicting specifications will raise an exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2)", "indx=range(len(self)) for x in _other: indx.remove(x) tmp = [self[i] for i in indx]", "= unit def __str__(self, tab=0): \"\"\"Print out nested structure with corresponding indentations. A", "Note that a non-greedy match is done for a string, # since a", "_bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # for P format if", "card at the wrong place (card %d).\" % (keywd, _index) fix_text = \"Fixed", "number of single quotes, # instead of issuing an error. The FITS standard", "to numarray data type (code) _booltype = 'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2',", "% str(card) def _use_blanks(self, how_many): if self._blanks > 0: for i in range(min(self._blanks,", "will print out all attributes. It forgives plurals and blanks. If there are", "for ASCII table if self._coldefs._tbtype == 'TableHDU': _loc = [1] _width = []", ")|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC +", "break xoffset = offset return list class Header: \"\"\"FITS header class.\"\"\" def __init__(self,", "format try: # legit FITS format? convert to record format (e.g. '3J'->'3i4') recfmt", "not exist, default it to 1 _extver = self[j]._extver if _ver == _extver:", "isinstance(self, _Hierarch): keyStr = 'HIERARCH %s ' % self.key else: keyStr = '%-8s'", "name is a case variant of \"XYZ\", then field('xyz'), field('Xyz'), etc. will get", "mandated by NASA as the standard format for storing high energy astrophysics data.", "= ColDefs(self, tbtype=class_name) elif attr == '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr", "single quotes, # whereas it should not end with an even number of", "the existence, location, and value of a required Card.\"\"\" \"\"\"If pos = None,", "', ' + _imagStr + ')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate", "fix = \"pass\", fixable=1): \"\"\"Execute the verification with selected option.\"\"\" _text = err_text", "expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM value into repeat,", "+= len(data_output[i]) * _nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def", "new data used for appending @type header: L{Header} object or None @param header:", "with the format `%s`.\" % format else: raise ValueError, \"Must specify format to", "table, or group data object @param data: the new data used for appending", "name of the keyword, or index of the Card before which the new", "_SteppedSlice): raise IndexError, 'Subsection data must be contiguous.' for j in range(i+1,naxis): _naxis", "0: offset = -1 except: offset = len(input) # check for one word", "'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping from TFORM data type to", "convert numbers to strings if self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead =", "'header' in keys: header = keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU): hdu", "Boolean type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data", "== 0: break # construct the Header object, using the cards. try: header", "attr return self.__dict__[name] \"\"\" # make sure to consider the case that the", "8 _start = 0 if self._cardimage[:8].upper() == 'HIERARCH': _start = 8 self.__class__ =", "cards are *directly* before the END card.\"\"\" for i in range(1, len(self)): if", "= tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names` + ')' return tmp # synchronize", "a commentary card. \"\"\" # no equal sign for commentary cards (i.e. part", "if numr.group('sign') == None: _val = eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx')", "elif before != None or after != None: _card = Card(key, value, comment)", "None: _str = result.group('comm') if _str is not None: self._checkText(_str) def fromstring(self, input):", "8 return size def close(self): \"\"\" Close the 'physical' FITS file. :Parameters: None", "range(len(self)): if self[i].data is not None: continue def update_tbhdu(self): \"\"\"Update all table HDU's", "format _data._heapoffset = hdu._theap + hdu._datLoc _data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap", "self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype else: _str = self._coldefs.formats[indx][0] == 'A' _bool", "initialize/reset attributes to be used in \"update/append\" mode # CardList needs its own", "return elif option == 'parse': # check the value only, no need to", "once else: del self.ascard[key] self._mod = 1 def __str__(self): return self.ascard.__str__() def ascardlist(self):", "the new table for i in range(len(tmp)): if tmp._arrays[i] is None: size =", "self.__file.tell() # Read the first header block. block = self.__file.read(_blockLen) if block ==", "self._hdutype = _ValidHDU except: self._hdutype = _CorruptedHDU # populate the cardlist self.ascard =", "result is not None: _str = result.group('comm') if _str is not None: self._checkText(_str)", "if _start is None: _start = 0 elif isinstance(_start, (int, long)): _start =", "= Card('SIMPLE', True, 'conforms to FITS standard') _list = CardList([ c0, Card('BITPIX', 8,", "it belongs to try: if cards[0].key == 'SIMPLE': if 'GROUPS' in cards._keylist and", "tmp.spans[i] _itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data", "the keyword name parsed from the card image.\"\"\" head = self._getKeyString() if isinstance(self,", "already exists if os.path.exists(name): if clobber: print \"Overwrite existing file '%s'.\" % name", "keyword names, make # sure to preserve the one-to-one correspondence when updating the", "the order of NAXIS's which is the # reverse of the numarray shape", "'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL',", "for the __setattr__ key case.\"\"\" if isinstance(val, str): val = val.strip() if len(val)", "elif 'ext' in keys: if n_ext2 == 1: ext = ext2['ext'] elif n_ext2", "PrimaryHDU): hdu = ImageHDU(data, header) f = open(filename, mode='update') f.append(hdu) f.close() def update(filename,", "from key, value, and (optionally) comment. Any specifed arguments, except defaults, must be", "have duplicate name. \"\"\" oldkey = oldkey.strip().upper() newkey = newkey.strip().upper() if newkey ==", "# comment out to avoid circular reference of _pcount # pass the attributes", "leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ') if real.group('sign') ==", "the data # bscale and bzero takes priority if (bscale != 1 or", "and (newkey in self.ascard._keylist): raise ValueError, 'Intended keyword %s already exists in header.'", "cname) if value != None: text += cname + ' = ' +", "\"\"\"Delete a slice of HDUs from the HDUList, indexed by number only.\"\"\" super(HDUList,", "place: Name =\", hdu.name, _extver if 'data' in dir(hdu): if hdu.data is not", "os.path.exists(name): if clobber: print \"Overwrite existing file '%s'.\" % name os.remove(name) else: raise", "verification with selected option.\"\"\" _text = err_text if not fixable: option = 'unfixable'", "equal sign. If there is no equal sign, return the string before column", "or _str) bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale = bscale not in", "= _len % Card.length return input + ' ' * (Card.length-strlen) def _floatFormat(value):", "[same as in update()] after: [same as in update()] \"\"\" self._add_commentary('history', value, before=before,", "hdu = header._hdutype(data=DELAYED, header=header) # pass these attributes hdu._file = self._file hdu._hdrLoc =", "self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS header card list class.\"\"\" def __init__(self, cards=[],", "0]: array += -bzero if bscale not in ['', None, 1]: array /=", "> 1: size = 1 for j in range(1, naxis): size = size", "try to match the exact name first, so in the example in (a),", "close the memmap object, it is designed to use an independent # attribute", "import copy import signal import threading # Module variables _blockLen = 2880 #", "_ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] hdr = hdu.header hdulist.close()", "classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\" This class is used", "len(axes) # add NAXISi if it does not exist for j in range(len(axes)):", "the CardList object from a list of Cards. cards: A list of Cards,", "return _indx except: raise KeyError, 'Keyword %s not found.' % `key` else: raise", "npts self.offset = offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass", "= '' for card in self: output += str(card) + '\\n' return output[:-1]", "the content is written self.__file.flush() # return both the location and the size", "to storage values if there is bscale/bzero if isinstance(array, num.NumArray): # boolean needs", "location offset of the heap area for each # variable length column if", "ext2['ext'] elif n_ext2 == 2 and 'extver' in keys: ext = ext2['ext'], ext2['extver']", "output_verify='exception', clobber=False): \"\"\"Write the HDUList to a new file. name: output FITS file", "from the attributes: key, value, and comment. Core code for ascardimage. \"\"\" #", "not in valueStr: valueStr += \".0\" return valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass", "for col in input] # if the format of an ASCII column has", "must be of the syntax of \"> n\", # where n is an", "value.itemsize() == 1: pass elif self._dtype == 'a': value = chararray.array(value, itemsize=1) else:", "dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1]", "not found' % `key` elif (nfound > 1): raise KeyError, 'there are %d", "after: [same as in update()] \"\"\" self._add_commentary(' ', value, before=before, after=after) def get_history(self):", "= lambda x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array)) except: try: # this", "it will put the value string in one block and the comment string", "return x else: return ColDefs(x) def __len__(self): return len(self.data) def __repr__(self): return 'ColDefs'+", "verification method.\"\"\" _err = _ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable))", "name or index. newkey: new keyword, must be a string. force: if new", "[i] self.__dict__[attr] = _unique try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def par(self,", "\"\"\"Get the 'data' or 'columns' attribute.\"\"\" if attr == 'data': size = self.size()", "card length = 80 # String for a FITS standard compliant (FSC) keyword.", "string length of a card length = 80 # String for a FITS", "slice): indx = _normalize_slice(indx, naxis) if (indx.start == 0) and (indx.stop == naxis)", "or after != None: self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0] == ' ':", "len(value)) class Column: \"\"\"Column class which contains the definition of one column, e.g.", "image.\"\"\" # for commentary cards, no need to parse further if self.key in", "# if the file is read the first time, no need to copy,", "name \"\"\" f = open(filename) f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C)", "and bzero takes priority if (bscale != 1 or bzero !=0): _scale =", "dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y:", "FITS file), # it will be constructed from the card list. if keylist", "block block = self.__file.read(_blockLen) if block == '': break else: break hdu._raw +=", "to the next FITS block.\"\"\" return (_blockLen - stringLen%_blockLen) % _blockLen def _tmpName(input):", "to run _ExtensionHDU.__init__ since it is not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn", "eqStr + valStr + commentStr # need this in case card-with-continue's value is", "0: raise ValueError, \"column `%s` starting point overlaps to the previous column\" %", "data to the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return", "boolean in ASCII table _number = not(_bool or _str) bscale = self._coldefs.bscales[indx] bzero", "= header else: # construct a list of cards of minimal header _list", "# this has to be done after the \"regular\" data is written (above)", "column format, corresponding to TFORM keyword unit: column unit, corresponding to TUNIT keyword", "will raise an exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an array,", "result = CardList(_cards, self._keylist[start:end]) return result def __setitem__(self, key, value): \"\"\"Set a Card", "data/header. @type filename: string @param filename: name of the new FITS file to", "string.\"\"\" block = '' for card in self: block = block + repr(card)", "x %dC\" % (_nrows, _ncols) return \"%-10s %-11s %5d %-12s %s\" % \\", "an attribute of the column definitions.\"%att continue print \"%s:\" % att print '", "the extname and extver from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver =", "= reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) * Card.length _bytes = _bytes +", "in the respective HDU classes, # so the checking is in order, in", "eqStr = '= ' if keyStr.strip() in Card._commentaryKeys: # not using self.key eqStr", "import numarray.records as rec import numarray.objects as objects import numarray.memmap as Memmap from", "= '' for card in self: block = block + repr(card) return block", "hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] _data = hdu.data", "name): \"\"\" instanciate specified attribute object.\"\"\" if name == '_cardimage': self.ascardimage() elif name", "mode=mode, memmap=memmap) hduList = HDUList(file=ffo) # read all HDU's while 1: try: hduList.append(ffo._readHDU())", "tmp = hdu.columns # get the right shape for the data part of", "FITS standard.\"\"\" # use repr (not str) in case of control character if", "will pad the string if it is not the length of a card", "data from an extension of a FITS file (and optionally the header). @type", "try variable length array except: if isinstance(recfmt, _FormatP): try: _func = lambda x:", "= self.hdu.header['NAXIS'] if naxis < len(key): raise IndexError, 'too many indices.' elif naxis", "'' _itemsize = 0 for i in range(len(tmp)): _formats += 'a%d,' % tmp.spans[i]", "to consider the case that the starting column of # a field may", "values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC +", "Card(key, value, _comment) elif before != None or after != None: _card =", "should be 0, so we skip NAXIS1. if naxis > 1: size =", "if the comment separator is found, though the # comment maybe an empty", "_val = eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2,", "= _convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr", "value/comment string. # The valu group will return a match if a FITS", "key): \"\"\"A view of a Column's data as an array.\"\"\" indx = _get_index(self._coldefs.names,", "ASCII table, convert numbers to strings if self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip()", "= re_bitpix.search(block) if mo is not None: bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX", "_zero) = self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars] = input else: self._parent.field(npars)[:] =", "self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe out the old", "of the input argument (except array) can be a Card or just #", "not None: # check TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0]", "# populate the cardlist self.ascard = CardList(cards) def __getitem__ (self, key): \"\"\"Get a", "self.__dict__['value'] = _val if '_valuestring' not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified'", "printable ASCII characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks for", "# if pos is a string, it must be of the syntax of", "card) self._keylist.insert(pos, card.key) # update the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod", "HDU (header data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\"", "None: self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val) self.__dict__['_fixable'] = 0 raise", "isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number", "CardList. card: The Card to be appended. useblanks: Use any *extra* blank cards?", "hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update data in place: Name =\", hdu.name, _extver", "\"\"\"FITS ASCII table extension HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None,", "self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU", "added. before: [same as in update()] after: [same as in update()] \"\"\" self._add_commentary('", "updating for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 if singleThread:", "called 'abc' and 'ABC' respectively. (b) When you *refer* to a field (presumably", "in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format in binary tables.\"\"\" pass class", "equal sign is not present, or it is a commentary card. \"\"\" #", "in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 hdu._new = 0 hdu._file =", "if not isinstance(self.value, str): raise ValueError, 'Value in a commentary card must be", "% str(value) def __delitem__(self, key): \"\"\"Delete a Card from the CardList.\"\"\" _key =", "indx = self._unique[parName.lower()] if len(indx) == 1: result = self.field(indx[0]) # if more", "image before column 10 and return its location. It returns None if equal", "returns None if equal sign is not present, or it is a commentary", "the keylist self.count_blanks() self._mod = 1 def count_blanks(self): \"\"\"Find out how many blank", "to match the name with case insensitivity. So, in the last example, field('Abc')", "HDUList(list, _Verify): \"\"\"HDU list class. This is the top-level FITS object. When a", "+ ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys of", "blank cards in front of END. \"\"\" if isinstance (card, Card): super(CardList, self).insert(pos,", "'+`naxis+3`, _isInt+\" and val >= 0\", 0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\"", "_isInt+\" and val >= 1 and val <= 999\", 1, option, _err) self.req_cards('NAXIS1',", "_valStr = numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is not None: _valStr = numr.group('sign')+_valStr", "= re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:])", "firstval = True self.req_cards(firstkey, '== 0', '', firstval, option, _err) self.req_cards('BITPIX', '== 1',", "r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>'", "update the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise", "extension if len(self) > 1: self.update_extend() hduList = open(name, mode=\"append\") for hdu in", "'(' + _realStr + ', ' + _imagStr + ')' self.__dict__['_valuestring'] = _valStr", "return _name else: raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class", "BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related", "sub-string, either an integer or a float in fixed or # scientific notation.", "whereas it should not end with an even number of # quotes to", "if len(val) <= 8: val = val.upper() if val == 'END': raise ValueError,", "_ascardimage(self): \"\"\"Generate a (new) card image from the attributes: key, value, and comment.", "= num.nonzero(arr == ' ')[0] offset = 0 xoffset = 0 for i", "self.ascard.append(new_card, bottom=1) self._mod = 1 def copy(self): \"\"\"Make a copy of the Header.\"\"\"", "self.starts[i] = last_end + 1 _end = self.starts[i] + _width - 1 attr[i]", "bitpix: data type as expressed in FITS BITPIX value (8, 16, 32, 64,", "pad the data to the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1", "= ((nx-1) / 8) + 1 unused = nbytes*8 - nx for i", "num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU", "if simple == 'T' and randomGroups == 'T': groups = 1 else: groups", "positional arguments) are assumed to be the extension specification(s). Header and extension specs", "are case sensitive: you can have two different columns called 'abc' and 'ABC'", "val >= 1 and val <= 999\", 1, option, _err) self.req_cards('NAXIS1', '== 3',", "if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val) self.__dict__['_fixable'] =", "level # must present, even it has nothing. for item in self: if", "elif (nfound > 1): raise KeyError, 'there are %d extensions of %s' %", "= self.header[keywd] if not eval(test): err_text = \"'%s' card has invalid value '%s'.\"", "except: raise KeyError, 'Keyword %s not found.' % `key` else: raise KeyError, 'Illegal", "_loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if _trail < 0: raise ValueError, \"column `%s`", "using the (latest) scaled array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} #", "if isinstance(hdu, _TableBaseHDU) and hdu.data is not None: # check TFIELDS and NAXIS2", "def size(self): \"\"\" Return the size (in bytes) of the data portion of", "self._keylist[start:end]) return result def __setitem__(self, key, value): \"\"\"Set a Card by indexing or", "# write out the heap of variable length array columns # this has", "import zipfile import numarray as num import numarray.generic as ndarray import numarray.strings as", "naxis = self.header.get('NAXIS', 0) if naxis > 0: size = 1 for j", "= len(_keylist) - _indx - 1 return _indx except: raise KeyError, 'Keyword %s", "have a different table type' elif isinstance(input, FITS_rec): # input is a FITS_rec", "simply calls the close method of the _File class. It has this two-tier", "== 'A' _bool = 0 # there is no boolean in ASCII table", "if self._coldefs._tbtype == 'BinTableHDU': _str = 'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] ==", "array. input: input Uint8 array of shape (s, nbytes) output: output Boolean array", "table fill: if = 1, will fill all cells with zeros or blanks", "__delitem__(self, key): \"\"\"Delete a Card from the CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key)", "return _WholeLine(naxis, 0) else: if indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start) else: return", "999\", 1, option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\" and val == 0\", 0,", "corresponding to TTYPE keyword format: column format, corresponding to TFORM keyword unit: column", "and comment. Core code for ascardimage. \"\"\" # keyword string if self.__dict__.has_key('key') or", "in range(len(val_list)): if i == 0: headstr = \"%-8s= \" % self.key else:", "= self._extver hdu._new = 0 hdu.header._mod = 0 hdu.header.ascard._mod = 0 except: pass", "os.path.splitext(self.name)[1] == '.zip': # Handle zip files if mode in ['update', 'append']: raise", "primary HDU if there is extension if len(self) > 1: self.update_extend() def index_of(self,", "file if self._resize: oldName = self.__file.name oldMemmap = self.__file.memmap _name = _tmpName(oldName) _hduList", "col_name) for cname in _commonNames: attr = getattr(self, cname+'s') del attr[indx] del self._arrays[indx]", "be opened. mode: Open mode, 'readonly' (default), 'update', or 'append'. memmap: Is memmory", "end): _cards = super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return result def __setitem__(self,", "not exist for j in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except: if (j", "self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero = _bzero)) _coldefs", "(s, nx) output: output Uint8 array of shape (s, nbytes) nx: number of", "nameList) _count = operator.countOf(_list, _key) # occurrence of _key in _list if _count", "self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy return self._convert[indx]", "self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS')", "the data. It returns the output \"data\" array of data type dtype. The", "data to the stream. :Parameters: data : NumArray Data to stream to the", "\"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each element calls", "_name = dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name else: raise _name,", "\"=\" is before column 9, # since there is no way to communicate", "in header.' % newkey _index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value = self.ascard[_index].value", "overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied data is not", "'' # only append HDU's which are \"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose):", "consistent. if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: # try to convert to", "unit='HDU') # the first (0th) element must be a primary HDU if len(self)", "card does not exist.\" % keywd fix_text = \"Fixed by inserting a new", "real.group('sign') == None: _val = eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag'))", "self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after != None: loc = self.index_of(after) self.insert(loc+1, card,", "add one if tbtype == 'TableHDU': for i in range(len(self)): (type, width) =", "Allows structured access to FITS Group data in a manner analogous to tables", "_ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt = _fits2rec[fits_fmt] # 'E'", "as rec import numarray.objects as objects import numarray.memmap as Memmap from string import", "== 'END': break else: _cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE cards # if", "file), # it will be constructed from the card list. if keylist is", "not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim == 0: dim = '' else:", "extver if hdu.name == '': hdu.name, hdu._extver = hdu._getname() elif hdu.name == 'PRIMARY':", "# strings with an odd number of single quotes, # instead of issuing", "isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or", "first, so in the example in (a), field('abc') will get the first field,", "is one single word which is longer than strlen, then it will be", "%d in the ColDefs input is not a Column.\" % input.index(col) self.data =", "location and value of mandatory keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`,", "\"\"\"Get all histories as a list of string texts.\"\"\" output = [] for", "key, value, and (optionally) comment. Any specifed arguments, except defaults, must be compliant", "j != _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift the unused", "WholeLine must be WholeLine or # OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break", "its ._parent def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out", "[] for _card in self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value) return output def", "to be scaled too if recfmt == _booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0,", "= num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx] += bzero", "self._checkKey(self.key) # verify the value, it may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if", "if tbtype == 'TableHDU': for i in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if", "in extkeys: _gethdr = extkeys['header'] del extkeys['header'] else: _gethdr = False hdulist, _ext", "= longstring + _comm.rstrip() + ' ' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break", "data area # data area size, including padding hdu._datSpan = _size + _padLength(_size)", "+= self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for both ASCII and binary tables", "# that name if _ver == None: found = j nfound += 1", "HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\" header: header to be used", "return self def _ncards(self): return len(self._cardimage) / Card.length def _verify(self, option='warn'): \"\"\"Card class", "else: del self.header['BZERO'] if _scale != 1: self.data /= _scale self.header.update('BSCALE', _scale) else:", "order. if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval = self._xtn else: firstkey =", "function to open a FITS file and return an HDUList object. name: Name", "be the column right after the last field if self._tbtype == 'TableHDU': last_end", "when created. Also check the card's value by using the \"test\" argument. \"\"\"", "if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr = 'HIERARCH %s ' %", "the next FITS block.\"\"\" return (_blockLen - stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create", "_ExtensionHDU else: self._hdutype = _ValidHDU except: self._hdutype = _CorruptedHDU # populate the cardlist", "tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names` + ')' return tmp # synchronize the", "'': break else: break hdu._raw += block _size, hdu.name = hdu._getsize(hdu._raw) # get", "i._type.bytes > 1: if i._byteorder != 'big': i.byteswap() i._byteorder = 'big' else: if", "# In case the FITS_rec was created in a LittleEndian machine hdu.data._byteorder =", "value. \"\"\" hdr = self[0].header if hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend'] =", "if name == 'key': raise SyntaxError, 'keyword name cannot be reset.' elif name", "tmp._arrays[i][:n] if n < nrows: if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): #", "Standards and Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage,", "EXTNAMEs are not case sensitive By combination of EXTNAME and EXTVER, as separate", "def __getattr__(self, attr): if attr == 'data': self.__dict__[attr] = self.field('data') elif attr ==", "'': hdu.name, hdu._extver = hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver = 1 hdu._file", "asked for \"PyFITS\" \"\"\" import re, os, tempfile, exceptions import operator import __builtin__", "hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber)", "list = attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1]", "end with two single quotes, # whereas it should not end with an", "for i in comm_list: commstr = \"CONTINUE '&' / \" + commfmt %", "the indexing. _list = [] for i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key", "return list class Header: \"\"\"FITS header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a Header", "_pc = '%' _fmt = ' '*_lead + _pc + _format[1:] + _dict[_format[0]]", "\"\"\" def __str__(self): \"\"\"Format a list of cards into a printable string.\"\"\" kard", "a list tmp = input[xoffset:offset] list.append(tmp) if len(input) == offset: break xoffset =", "gcount * (pcount + size) / 8 return size def copy(self): \"\"\"Make a", "_scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the data", "self.__dict__[attr] = data elif attr == 'columns': class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:]", "include Object array because there is no guarantee # the elements in the", "if isinstance(key, (int, slice)): return key elif isinstance(key, tuple): _key = key[0] _ver", "nothing to write.\" return self.update_tbhdu() if output_verify == 'warn': output_verify = 'exception' self.verify(option=output_verify)", "I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all docstrings in", "the last example, field('Abc') will cause an exception since there is no unique", "append and update modes only). output_verify: output verification option, default = 'exception'. verbose:", "1 if option == 'ignore': return elif option == 'parse': # check the", "incorrect type\" # set extension name if not name and self.header.has_key('EXTNAME'): name =", "i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name in ['TDISP', 'TDIM',", "= cards.index_of(keywd) except: _index = None fixable = fix_value is not None #", "hdu def __getitem__(self, key): dims = [] if not isinstance(key, tuple): key =", "the Primary header provided into an image # extension header. # self.header.update('XTENSION','IMAGE','Image extension',", "elif dtype == 'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype ==", "'extver' in keys: ext = ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s): %s'", "area hdu._datLoc = self.__file.tell() # beginning of the data area # data area", "else: raise SyntaxError, \"%s is not a Card\" % str(card) def _pos_insert(self, card,", "= '%-20s' % valStr # must be before int checking since bool is", "will have a zero offset for all columns after this call. The final", "IOError exceptions to be raised when a file specified by a URL cannot", "@type filename: string @param filename: input FITS file name @param ext: The rest", "case of control character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal keyword name", "self._check(option='parse') if name == 'value': if valu is None: raise ValueError, \"Unparsable card,", "\"\"\"Add a commentary card. If before and after are None, add to the", "and val >= 1 and val <= 999\", 1, option, _err) self.req_cards('NAXIS1', '==", "has been written to the stream. Notes ----- Only the amount of data", "can also be keyword arguments. For example: >>> update(file, dat, hdr, 'sci') #", "return self+column def del_col(self, col_name): \"\"\"Delete (the definition of) one Column.\"\"\" indx =", "in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy =", "all table HDU's for scaled fields.\"\"\" for hdu in self: if 'data' in", "> 1: if i._byteorder != 'big': i.byteswap() i._byteorder = 'big' else: if coldata._type.bytes", "write of the HDUList back to the file (for append and update modes", "in range(_tfields): del self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS header card list class.\"\"\"", "'' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1 if option == 'ignore': return elif", "keyList.append(_key) if _key == 'END': break def _readHDU(self): \"\"\"Read the skeleton structure of", "loc def writeHDUdata(self, hdu): \"\"\"Write FITS HDU data part.\"\"\" self.__file.flush() loc = self.__file.tell()", "usage, see the I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for", "self.header[keywd] if not eval(test): err_text = \"'%s' card has invalid value '%s'.\" %", "+= self._bzero # delete the keywords BSCALE and BZERO after scaling del self.header['BSCALE']", "= strlen + size else: strfmt = '>' + strfmt[:-1] return strfmt '''", "make sure option is integer else: _repeat = '' if repeat != 1:", "npts: indx = npts return indx _start = input.start if _start is None:", "self.update_header() self._bitpix = self.header['BITPIX'] # delete the keywords BSCALE and BZERO del self.header['BSCALE']", "+= -_zero # 0.9.6.3 to avoid out of range error for BZERO =", "\"\"\"Get the index of a keyword in the CardList. key: the keyword name", "else: raise NameError, \"Illegal key '%s'.\" % `key` return indx def _unwrapx(input, output,", "standard format for storing high energy astrophysics data. For details of the FITS", "output.append(_card.value) return output def _add_commentary(self, key, value, before=None, after=None): \"\"\"Add a commentary card.", "the data bzero: BZERO of the data parbscales: list of bscales for the", "cards['GROUPS'].value == True: self._hdutype = GroupsHDU elif cards[0].value == True: self._hdutype = PrimaryHDU", "0 # value for ASCII table cell with value = TNULL # this", "if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break # combine contiguous CONTINUE cards with its", "TFORM value into data type and width. try: (dtype, width) = _re.match(input_format.strip()).groups() dtype", "header info. else: _shape = () _nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format", "# verify the key, it is never fixable # always fix silently the", "file, # delete the original file, and rename the tmp to the original", "\"PyFITS\" \"\"\" import re, os, tempfile, exceptions import operator import __builtin__ import urllib", "accepted. An attempt to write more data after the stream has been filled", "data : NumArray Data to stream to the file. :Returns: writeComplete : integer", "attributes, some of them may not exist for name in ['key', 'value', 'comment',", "def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDU to a new file. This", "if input._tbtype == tbtype: tmp = hdu.columns = input else: raise ValueError, 'column", "The number of bytes of data required to fill the stream per the", "self.field(i) return result def setpar(self, parName, value): \"\"\"Set the group parameter values.\"\"\" if", "'_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def", "new file with \"update\" mode os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file", "BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"data: data of", "use a throw-away format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column definitions class. It", "# if the keyword EXTVER does not exist, default it to 1 _extver", "of NAXIS's which is the # reverse of the numarray shape if isinstance(self,", "repeat = 1 else: repeat = eval(repeat) return (repeat, dtype, option) def _convert_format(input_format,", "format format = _convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal format `%s`.\" % format", "C/Python does). for i in range(len(dummy)): x = _fmt % dummy[i] if len(x)", "(HDUList, list)): raise \"Invalid input for HDUList.\" for hdu in hdus: if not", "name = self.header['EXTNAME'] self.name = name def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err", "_key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue # skip if there", "_comm = _card.comment if isinstance(_comm, str) and _comm != '': longstring = longstring", "bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data, either the group data itself (a numarray)", "raised when a file specified by a URL cannot be accessed\"\"\" def http_error_default(self,", "A class that provides the capability to stream data to a FITS file", "their contents. A module for reading and writing Flexible Image Transport System (FITS)", "= re_naxis.search(block) if mo is not None: naxis = int(mo.group(1)) pos = mo.end(0)", "hdu.header.ascard._mod = 0 hdu._new = 0 hdu._file = ffo.getfile() # if not resized,", "= self._unique[parName] if len(indx) == 1: self.field(indx[0])[:] = value # if more than", "values of mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 1 and", "else: _val += eval(imag.group('sign') + _idigt)*1j else: _val = UNDEFINED self.__dict__['value'] = _val", "is smaller than specified data size. File may have been truncated.' hdu._ffile =", "del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype,", "If =1, the card will be appended after the last non-blank card. \"\"\"", "[None]*npars if bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32", "last field elif tbtype == 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is", "fix_text=fix_text, fix=fix) _err.append(_text) # each element calls their own verify for i in", "_text += ' ' + fix_text return _text def verify (self, option='warn'): \"\"\"Wrapper", "both ASCII and binary tables if _number and (_scale or _zero): # only", "% key self._resize = 1 def __delitem__(self, key): \"\"\"Delete an HDU from the", "%s is too long (> 8), use HIERARCH.' % val else: raise ValueError,", "= self.columns.formats # if data is not touched yet, use header info. else:", "'' for i in range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n' return output[:-1] def", "*= bscale if _zero: _arr += bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] =", "self.header._hdutype = GroupsHDU self.name = name if self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1", "checking is specified if test: val = self.header[keywd] if not eval(test): err_text =", "length is 80 else: strlen = _len % Card.length return input + '", "there is no match if (keyword in _keyNames): col = eval(_key.group('num')) if col", "str(value) def __delitem__(self, key): \"\"\"Delete a Card from the CardList.\"\"\" _key = self.index_of(key)", "self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def __init__(self,", "for reading and writing Flexible Image Transport System (FITS) files. This file format", "err_text = \"'%s' card has invalid value '%s'.\" % (keywd, val) fix_text =", "be returned. \"\"\" try: return self[key] except: return default def update(self, key, value,", "self._bzero # delete the keywords BSCALE and BZERO after scaling del self.header['BSCALE'] del", "there is no boolean in ASCII table _number = not(_bool or _str) bscale", "hdu=None, field=None): self.hdu = hdu self.field = field # translation table for floating", "HDU except ValueError: print 'Warning: Required keywords missing when trying to read HDU", "__init__(self, hdus=[], file=None): \"\"\"Construct a HDUList object. hdus: Input, can be a list", "%s\\nNo. Name Type\"\\ \" Cards Dimensions Format\\n\" % _name for j in range(len(self)):", "for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 if singleThread: if", "in update()] after: [same as in update()] \"\"\" self._add_commentary(' ', value, before=before, after=after)", "itemsize=1) else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset _offset", "x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`, dummy) elif", "# TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular expression", "the attributes listed in _commonNames. The default is \"all\" which will print out", "== 8', 8, option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\" and val >= 0", "value, comment=None, before=None, after=None): \"\"\"Update one header card.\"\"\" \"\"\" If the keyword already", "may span across blocks. \"\"\" if len(block) != _blockLen: raise IOError, 'Block length", "HDUList input is not an HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return", "'Can not rename to CONTINUE' if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys:", "issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']:", "\"HDUList can only append an HDU\" # make sure the EXTEND keyword is", "HDU classes ------------------------------------ class _AllHDU: \"\"\"Base class for all HDU (header data unit)", "return self.data = data # update the header self.update_header() self._bitpix = self.header['BITPIX'] #", "multiple of 80.\"\"\" _len = len(input) if _len == Card.length: return input elif", "def _makehdu(data, header): if header is None: if isinstance(data, num.NumArray): hdu = ImageHDU(data)", "appended at the end, even if there are blank cards in front of", "# construct a list of cards of minimal header _list = CardList([ Card('XTENSION',", "value) _key = self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc = None else: if", "in dict] self.data = tmp else: raise TypeError, \"input to ColDefs must be", "backward = 1, search from the end. \"\"\" if isinstance(key, (int, long)): return", "if isinstance(val, (str, int, long, float, complex, bool, Undefined)): if isinstance(val, str): self._checkText(val)", "None: # Check for numbers with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt =", "+32768 self.header.update('BZERO', _zero) else: del self.header['BZERO'] if _scale != 1: self.data /= _scale", "ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify the keyword to be FITS standard.\"\"\" #", "in the HDUList must be an HDU.\" for item in hdu: if not", "'silentfix'] and x.find('Unfixable') != -1: raise VerifyError, '\\n'+x if (_option != \"silentfix\") and", "(_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking is specified", "def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the table header: header to", "input): \"\"\"Construct a Card object from a (raw) string. It will pad the", "interput handler keyboardInterruptSent = False def New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush is", "_digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?' +", "are blank cards in front of END. bottom: If =0 (default) the card", "as num import numarray.generic as ndarray import numarray.strings as chararray import numarray.records as", "# 0.9.6.3 to avoid out of range error for BZERO = +32768 self.header.update('BZERO',", "cards after the first one must start with CONTINUE and the whole card", "output = hdu.data output.tofile(self.__file) _size = output.nelements() * output._itemsize # write out the", "ValueError, \"column `%s` ending point overlaps to the next column\" % indx+1 if", "Card.length return input + ' ' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS", "hdu = hdulist[1] _data = hdu.data except IndexError: raise IndexError, 'No data in", "extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1 = len(ext1) n_ext2 = len(ext2) keys =", "isinstance(self.value, str): if self.value == '': valStr = \"''\" else: _expValStr = self.value.replace(\"'\",\"''\")", "_update('tfields', len(_cols), after='gcount') # Wipe out the old table definition keywords. Mark them", "new_value def change_name(self, col_name, new_name): \"\"\"Change a Column's name.\"\"\" if new_name != col_name", "the ASCII table and binary table column # format spec, i.e. A7 in", "def __init__(self, key='', value='', comment=''): \"\"\"Construct a card from key, value, and (optionally)", "\" Cards Dimensions Format\\n\" % _name for j in range(len(self)): results = results", "if val != None: attr[i] = val elif name == '_arrays': attr =", "Uint8 array of shape (s, nbytes) nx: number of bits \"\"\" output[...] =", "= `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats,", "NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage, see the I{PyFITS User's", "array of shape (s, nx) output: output Uint8 array of shape (s, nbytes)", "the file does already exist, but the provided header represents a Primary header,", "BZERO values. \"\"\" if self.data is None: return # Determine the destination (numarray)", "= self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue # skip", "= '' for cname in _commonNames: value = getattr(self, cname) if value !=", "comment=None, before=None, after=None): \"\"\"Update one header card.\"\"\" \"\"\" If the keyword already exists,", "hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: #", "card. If before and after are None, add to the last occurrence of", "= GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt = _fits2rec[fits_fmt] # 'E' -> 'f4'", "table data.' else: hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename, data, header=None, **keys): \"\"\"Create", "key else: # multiple match raise NameError, \"Ambiguous key name '%s'.\" % key", "only swap unswapped # deal with var length table if isinstance(coldata, _VLF): for", "# this handles ['abc'] and [['a','b','c']] # equally, beautiful! _func = lambda x:", "abs(bitpix) * gcount * (pcount + size) / 8 return size def close(self):", "be written at once. The following psudo code illustrates its use: header =", "try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def par(self, parName): \"\"\"Get the group", "FITS file name @type: string @param ext: The rest of the arguments are", "CONTINUE if the string value can fit in one line. # Instead, just", "front of END. \"\"\" if isinstance (card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key)", "is not None or self.key in Card._commentaryKeys: return result else: if option in", "self.key eqStr = '' if self.__dict__.has_key('value'): valStr = str(self.value) # put all parts", "val <= 999\", 0, option, _err) naxis = self.header.get('NAXIS', 0) if naxis <", "ASCII column has no width, add one if tbtype == 'TableHDU': for i", "\"\"\"Get an HDU from the HDUList, indexed by number or name.\"\"\" key =", "use repr (not str) in case of control character if Card._keywd_FSC_RE.match(val) is None:", "== '': nlines = len(comm) / comm_len + 1 comm_list = self._words_group(comm, comm_len)", "list of (numeric) arrays. parnames: list of parameter names. bscale: BSCALE of the", "reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) * Card.length _bytes = _bytes + _padLength(_bytes)", "len(block)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does not", "self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the equal sign in the card", "never fixable if result is not None: _str = result.group('comm') if _str is", "(not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element %s is not an extension HDU.\"", "and field('ABC') will get the second field. If there is no exact name", "else: _limit = 10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc = None return", "8 return size def copy(self): \"\"\"Make a copy of the HDU, both header", "the format of an ASCII column has no width, add one if tbtype", "for the unparsable case if input is None: _tmp = self._getValueCommentString() try: slashLoc", "singleThread = (threading.activeCount() == 1) and (threadName.getName() == 'MainThread') if singleThread: # Define", "a FITS file. @param filename: input FITS file name @type: string @param ext:", "range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx)", "and can no longer be written\" curDataSize = self._ffo.getfile().tell() - self._datLoc if curDataSize", "self.__file.writeHDUdata(hdu) if (verbose): print \"update data in place: Name =\", hdu.name, _extver #", "header, and put each card into a list of cards. Will deal with", "getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent", "string @param filename: input FITS file name @type key: string @param key: keyword", "= self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP):", "bscale, bzero) = self._get_scale_factors(indx) # for P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy =", "= Card(key, value, _comment) elif before != None or after != None: _card", "data elif attr == 'columns': _cols = [] _pnames = [] _pcount =", "_Zero() elif 'ext' in keys: if n_ext2 == 1: ext = ext2['ext'] elif", "is created from files # other than FITS, the close() call can also", "== 1 and 'extver' in keys: ext = ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting", "output FITS file name to be written to. output_verify: output verification option, default", "self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify the keyword to", "dtype, option) = _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is not recognized.' % tform", "_keyNames): _list.append(i) for i in _list: del self.header.ascard[i] del _list # populate the", ":Parameters: None :Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with", "the arguments are for extension specification. See L{getdata} for explanations/examples. @rtype: L{Header} object", "[same as in update()] after: [same as in update()] \"\"\" self._add_commentary(' ', value,", "'ext' in keys: if n_ext2 == 1: ext = ext2['ext'] elif n_ext2 ==", "verifications at different class levels. \"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit", "\"ignore\"): return x = str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix'] and x.find('Unfixable') !=", "1 elif isinstance(_step, (int, long)): if _step <= 0: raise IndexError, 'Illegal slice", "if self.data._type != _type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary", "= _loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if _trail < 0: raise ValueError, \"column", "strings if self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx]", "CardList(cards) def __repr__(self): \"\"\"Format a list of cards into a string.\"\"\" block =", "one single word which is longer than strlen, then it will be split", "None self.__dict__[attr] = data elif attr == 'columns': _cols = [] _pnames =", "0 try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns", "touch the data, so it's defined (in the case of reading from a", "use data info. if 'data' in dir(self): if self.data is None: _shape, _format", "(equal sign not at column 8).' raise ValueError, self._err_text, '\\n%s' % self._cardimage elif", "instead of in the respective HDU classes, # so the checking is in", "doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype = ImageHDU # insert", "classes, # so the checking is in order, in case of required cards", "self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\" if attr", "commentary card must be a string' else: self.__dict__['_cardimage'] = ' '*80 def __repr__(self):", "= self._words_group(comm, comm_len) for i in comm_list: commstr = \"CONTINUE '&' / \"", "after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero) =", "[other] elif isinstance(other, ColDefs): b = list(other.data) else: raise TypeError, 'Wrong type of", "None: _comment = comment else: _comment = self.ascard[j].comment self.ascard[j] = Card(key, value, _comment)", "the _File class. It has this two-tier calls because _File has ts own", "'*8 # value string # check if both value and _cardimage attributes are", "_list = CardList([ c0, Card('BITPIX', 8, 'array data type'), Card('NAXIS', 0, 'number of", "attributes corresponding to the Column attributes (e.g. ColDefs has the attribute .names while", "to be added. before: [same as in update()] after: [same as in update()]", "if naxis > 1: return _SinglePoint(1, indx) elif naxis == 1: return _OnePointAxis(1,", "starting position (ASCII table only), corresponding to TBCOL keyword dim: column dimension corresponding", "for all HDU (header data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU", "data portion of the HDU.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if", "len(self) for i in range(len(self)): val = getattr(self[i], cname) if val != None:", "index. newkey: new keyword, must be a string. force: if new key name", "% _text else: exec(fix) #if option != 'silentfix': _text += ' ' +", "_pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert a Card to the location specified by", "[col.copy() for col in input.data] # if the input is a list of", "have the same name else: result = self.field(indx[0]).astype('f8') for i in indx[1:]: result", "list of bzeros for the parameters \"\"\" if isinstance(input, num.NumArray): _formats = ''", "for arr in tmp._arrays: if arr is not None: dim = arr._shape[0] else:", "hdulist.close() if _gethdr: return _data, _hdr else: return _data def getval(filename, key, *ext,", "_mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file,", "value when created. Also check the card's value by using the \"test\" argument.", "# try to convert to a numarray first array = num.array(array) except: try:", "chararray import numarray.records as rec import numarray.objects as objects import numarray.memmap as Memmap", "_digits_NFSC + ')') # FSC commentary card string which must contain printable ASCII", "pos, test, fix_value, option, errlist): \"\"\"Check the existence, location, and value of a", "to use an independent # attribute of mmobject so if the HDUList object", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "portion of the HDU. :Parameters: None :Returns: size : integer The number of", "from the card image.\"\"\" longstring = '' ncards = self._ncards() for i in", "self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the data attribute.\"\"\"", "raise ValueError, \"number `%s` does not fit into the output's itemsize of %s\"", "_convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max else: val = _convert_format(val, reverse=1) #_update(keyword, val)", "= repeat elif dtype == 'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif", "block = self.__file.read(_blockLen) if block == '': break else: break hdu._raw += block", "as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' +", "# instantiate a FITS file object (ffo) ffo = _File(name, mode=mode, memmap=memmap) hduList", "data=data, header=header, name=name) self._xtn = 'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn", "\"\"\" def __getitem__(self, key): x = self.data[key] if isinstance(key, (int, long)): return x", "_arr.hdu.data._parent.field(_arr.field) # use the largest column shape as the shape of the record", "continue # skip if there is no match if (keyword in _keyNames): _list.append(i)", "the value string in one block and the comment string in another. Also,", "if backward: _keylist = self._keylist[:] # make a copy _keylist.reverse() try: _indx =", "copy of the HDU, both header and data are copied.\"\"\" if self.data is", "data. The rest of the arguments are used only for the first case.", "if indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise", "val): \"\"\"Verify the keyword to be FITS standard.\"\"\" # use repr (not str)", "# Do the first card here, instead of in the respective HDU classes,", "copy of the table HDU, both header and data are copied.\"\"\" # touch", "argument. \"\"\" result = \"\" element = 0 # go through the list", "BSCALE/BZERO. Call to this method will scale self.data and update the keywords of", "output_format = _fits2rec[dtype]+`repeat` # to accomodate both the ASCII table and binary table", "def _getext(filename, mode, *ext1, **ext2): \"\"\"Open the input file, return the HDUList and", "if not memmap, use the space already in memory else: self.data = raw_data", "definition keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM", "keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber =", "key): \"\"\" Get the index of the key in the name list. The", "else: raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big' if (self._bzero != 0", "= open(_name, mode=\"append\") if (verbose): print \"open a temp file\", _name for hdu", "None return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDU to", "num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max, len(value)) class Column: \"\"\"Column class", "real.group('digt').translate(_fix_table, ' ') if real.group('sign') is not None: _realStr = real.group('sign')+_realStr imag =", "in front of END. \"\"\" if isinstance (card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos,", "self._cards = cards # if the key list is not supplied (as in", "url) urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener # class to", "_data = hdu.data if _data is None and isinstance(_ext, _Zero): try: hdu =", "> 0: if isinstance(ext[0], Header): header = ext[0] ext = ext[1:] elif not", "valStr = '%20s' % _tmp else: valStr = '%20s' % self._valuestring elif isinstance(self.value,", "re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') != None: # Check for numbers with leading", "\"\"\"Get the 'data' or 'columns' attribute. The data of random group FITS file", "digits, exponent sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]?", "of the HDU's data part.\"\"\" self._file.seek(0, 2) return self._file.tell() - self._datLoc def _summary(self):", "= hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize # comment out to avoid circular", "key = (key,) naxis = self.hdu.header['NAXIS'] if naxis < len(key): raise IndexError, 'too", "for explanations/examples. @rtype: L{Header} object @return: header \"\"\" hdulist, _ext = _getext(filename, 'readonly',", "_LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu", "self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt) for fmt in self.formats] elif self._tbtype ==", "card will be appended after the last non-blank card. \"\"\" if isinstance (card,", "not os.path.exists(_name): return _name else: raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\"", "''), Card('BITPIX', 8, 'array data type'), Card('NAXIS', 2, 'number of array dimensions'), Card('NAXIS1',", "commonName list) of a Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value", "if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx)", "a keyword's value from a header in a FITS file. @type filename: string", "FITS file to write to @type data: array, record array, or groups data", "['', 'COMMENT', 'HISTORY'] def __init__(self, key='', value='', comment=''): \"\"\"Construct a card from key,", "== 'section': return Section(self) elif attr == 'data': self.__dict__[attr] = None if self.header['NAXIS']", "size from the first block of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix", "0 and _card.key != 'CONTINUE': raise ValueError, 'Long card image must have CONTINUE", "except: offset = len(input) # check for one word longer than strlen, break", "reverse=0): \"\"\"Convert FITS format spec to record format spec. Do the opposite if", "cannot be accessed\"\"\" def http_error_default(self, url, fp, errcode, errmsg, headers): raise IOError, (errcode,", "self.format = format # does not include Object array because there is no", "repr(hdu.header.ascard) + _pad('END') blocks = blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0:", "r._coldefs = self._coldefs f = FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f def _clone(self,", "Only if the card image already exist (to avoid infinite loop), # fix", "== 'HIERARCH': _limit = Card.length else: _limit = 10 try: eqLoc = self._cardimage[:_limit].index(\"=\")", "repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns the keyword name", "!= ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1,", "'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if isinstance(input, ColDefs): self.data = [col.copy()", "to be created (BinTableHDU or TableHDU) \"\"\" # construct a table HDU hdu", "HDU if there is extension if len(self) > 1: self.update_extend() def index_of(self, key):", "the right shape for the data part of the random group, # since", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "self.keys()] else: self._keylist = keylist # find out how many blank cards are", "[] for card in self.ascard: pairs.append((card.key, card.value)) return pairs def has_key(self, key): \"\"\"Check", "'%20s' % `self.value`[0] elif isinstance(self.value , (int, long)): valStr = '%20d' % self.value", "mo.end(0) else: raise ValueError(\"NAXIS not found where expected\") if naxis == 0: datasize", "input.stop if _stop is None: _stop = naxis elif isinstance(_stop, (int, long)): _stop", "self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go through header keywords to pick", "/ self._strides[0] return _Group(self, row) class _Group(rec.Record): \"\"\"One group of the random group", "hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size: print 'Warning: File size", "hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append HDU\", hdu.name, _extver hdu._new = 0 elif", "of image dimensions, reverse the order of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes =", "if bzero not in ['', None, 0] or bscale not in ['', None,", "raise ValueError, \"keyword 'END' not allowed\" self._checkKey(val) else: if val[:8].upper() == 'HIERARCH': val", "!= str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod = 1 else:", "self.__dict__['key'] = val def _setvalue(self, val): \"\"\"Set the value attribute.\"\"\" if isinstance(val, (str,", "error: %s\" % _text else: exec(fix) #if option != 'silentfix': _text += '", "_WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType):", "_unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr] = _unique try: return self.__dict__[attr] except KeyError:", "= self[0].header if hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend'] = True else: if", "self.req_cards('TFIELDS', '== 7', _isInt+\" and val >= 0 and val <= 999\", 0,", "hduList = open(name, mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception',", "If there is a field named \"XYZ\" and no other field name is", "# synchronize the sliced FITS_rec and its ._parent def __getitem__(self, key): tmp =", "has methods to change # the content of header without being able to", ">>> getdata('in.fits', 2) # the second extension >>> getdata('in.fits', ext=2) # the second", "\"\"\"Base class for all HDU (header data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A", "opened and the header appended to the end of the file. If the", "npts, offset): self.npts = npts self.offset = offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType):", "\"One or more header is resized.\" break # Data: if 'data' not in", "the one-to-one correspondence when updating the list(s). # Use lists, instead of dictionaries", "return name, extver def _getsize(self, block): \"\"\"Get the size from the first block", "None, 'val == 2', 2, option, _err) self.req_cards('BITPIX', None, 'val == 8', 8,", "bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data, either the group", "supplied. \"\"\" if not os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data, header) if isinstance(hdu,", "if fmt.lstrip()[0] == 'A' and option != '': output_format = _fits2rec[dtype]+`int(option)` # make", "(presumably with the field method), it will try to match the exact name", "be integer.' % input if _stop < _start: raise IndexError, 'Illegal slice %s,", "val.strip() if len(val) <= 8: val = val.upper() if val == 'END': raise", "be written to a file. name: output FITS file name to be written", "Column.\"\"\" indx = _get_index(self.names, col_name) for cname in _commonNames: attr = getattr(self, cname+'s')", "object).\"\"\" tmp = hdu.columns # get the right shape for the data part", "= ColDefs(_cols) self.parnames = [i.lower() for i in parnames] tmp = FITS_rec(rec.array(None, formats=_formats,", "HDU data part.\"\"\" self.__file.flush() loc = self.__file.tell() _size = 0 if hdu.data is", "+ _shift # pad the FITS data block if _size > 0: self.__file.write(_padLength(_size)*'\\0')", "self._setvalue(val) elif name == 'comment': self._setcomment(val) else: raise AttributeError, name # When an", "argument is optional. @keyword clobber: (optional) if True and if filename already exists,", "AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name = str(self.__class__)", "if (self._convert[indx] is None): # for X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx =", "than strlen, then it will be split in the middle of the word.", "pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This class is the", "'prec') else: raise ValueError, valu size = eval(width)+1 strfmt = strfmt + 's'+str(size)", "self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big' return raw_data class _ImageBaseHDU(_ValidHDU):", "and return the string before the equal sign. If there is no equal", "keyword \"\"\" if header is not None: if not isinstance(header, Header): raise ValueError,", "with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ') if real.group('sign')", "fix=fix)) # if value checking is specified if test: val = self.header[keywd] if", "printable string.\"\"\" output = '' for card in self: output += str(card) +", "def __setitem__(self, key, hdu): \"\"\"Set an HDU to the HDUList, indexed by number", "%s' % type(key) def copy(self): \"\"\"Make a (deep)copy of the CardList.\"\"\" cards =", "'Wrong type of input' if option == 'left': tmp = list(self.data) + b", "input.count(' ') nmax = max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1) # locations", "keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr = 'HIERARCH %s", "_ErrList([], unit='Card') isValid = \"val in [8, 16, 32, 64, -32, -64]\" #", "array indexing work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self): tmp", "0. \"\"\" try: key = key.strip().upper() if key[:8] == 'HIERARCH': key = key[8:].strip()", "and values of mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 1", "if len(self) == 1: dummy = [] else: dummy = map(lambda x, y:", "fmt = input_format (repeat, dtype, option) = _parse_tformat(fmt) if reverse == 0: if", "scaled (physical) array. self._parent = input self._convert = [None]*self._nfields self.names = self._names def", "yet. \"\"\" def __init__(self, name=None, format=None, unit=None, null=None, \\ bscale=None, bzero=None, disp=None, start=None,", "@type header: L{Header} object or None @param header: the header associated with 'data',", "return size def close(self): \"\"\" Close the 'physical' FITS file. :Parameters: None :Returns:", "isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if", "after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe out the old table definition keywords. Mark", "= valStr.strip() # comment string if keyStr.strip() in Card._commentaryKeys: # do NOT use", "will be split in the middle of the word. \"\"\" list = []", "exponent, allows space between sign, # digits, exponent sign, and exponents _digits_FSC =", "self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale or", "= self.__format_RE.match(valu) if fmt: code, width, prec = fmt.group('code', 'width', 'prec') else: raise", "fill all cells with zeros or blanks if = 0, copy the data", "ValueError, 'Can not rename to CONTINUE' if newkey in Card._commentaryKeys or oldkey in", "\"column `%s` starting point overlaps to the previous column\" % indx+1 _trail =", "record array self.__setstate__(input.__getstate__()) # _parent is the original (storage) array, # _convert is", "== 0) and (newkey in self.ascard._keylist): raise ValueError, 'Intended keyword %s already exists", "else: self.name = name self.mode = mode self.memmap = memmap if memmap and", "== 'TableHDU': for i in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if width is", "keyword value from the CardList. If no keyword is found, return the default", "ValueError, 'Cards with CONTINUE must have string value.' if name == 'value': _val", "integer or string. If integer, it is the index in the list. If", "the end, even if there are blank cards in front of END. \"\"\"", "as in _TableBaseHDU size = self.size() if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs", "= _convert_format(format) except: try: # legit RecArray format? recfmt = format format =", "FITS record array from a RecArray.\"\"\" # input should be a record array", "\"\"\" def __init__(self, name, header): \"\"\" Construct a StreamingHDU object given a file", "= 0 elif isinstance(_start, (int, long)): _start = _normalize(_start, naxis) else: raise IndexError,", "(indx.start == 0) and (indx.stop == naxis) and (indx.step == 1): return _WholeLine(naxis,", "expressed in FITS BITPIX value (8, 16, 32, 64, -32, or -64) pardata:", "%s already exists.' % new_name else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit):", "will get the first field, and field('ABC') will get the second field. If", "with corresponding indentations. A tricky use of __str__, since normally __str__ has only", "bzero = bzero)) self._coldefs = ColDefs(_cols) self.parnames = [i.lower() for i in parnames]", "dtype == 'a': data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0]", "IOError exception. If the dtype of the input data does not match what", "the file to append to @type data: array, table, or group data object", "this has to be done after the \"regular\" data is written (above) _where", "issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS']", "simple: groups = 1 else: groups = 0 mo = re_naxis.search(block) if mo", "self._mod = 1 def count_blanks(self): \"\"\"Find out how many blank cards are *directly*", "self._offset, self._datLoc = None, None, None self.header = header self.data = data self._xtn", "80-char \"physical\" cards, the cards after the first one must start with CONTINUE", "not isinstance(col, Column): raise \"Element %d in the ColDefs input is not a", "\"'%s' card has invalid value '%s'.\" % (keywd, val) fix_text = \"Fixed by", "# now build the columns tmp = [Column(**attrs) for attrs in dict] self.data", "Redistributions of source code must retain the above copyright notice, this list of", "data parbscales: list of bscales for the parameters parbzeros: list of bzeros for", "isinstance(coldata, chararray.CharArray): # only swap unswapped # deal with var length table if", "a case variant of \"XYZ\", then field('xyz'), field('Xyz'), etc. will get this field.", "cards, the cards after the first one must start with CONTINUE and the", "index in the list. If string, (a) Field (column) names are case sensitive:", "'*_trail # not using numarray.strings's num2char because the # result is not allowed", "CardList(cards) def __getitem__ (self, key): \"\"\"Get a header keyword value.\"\"\" return self.ascard[key].value def", "raise \"Zip files with multiple members are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name", "def info(self): \"\"\"Summarize the info of the HDU's in this HDUList.\"\"\" if self.__file", "format to construct Column\" # scale the array back to storage values if", "or _zero: _arr = tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if _scale: _arr *=", "self._bscale = self.header.get('BSCALE', 1) if (data is DELAYED): return self.data = data #", "if 'header' in extkeys: _gethdr = extkeys['header'] del extkeys['header'] else: _gethdr = False", ">>> update(file, dat, header=hdr, ext=5) # update the 5th extension \"\"\" # parse", "will be appended after the last non-commentary card. If =1, the card will", "if not fixable: option = 'unfixable' if option in ['warn', 'exception']: #raise VerifyError,", "naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val >= 0\", 0,", "if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) f = open(filename, mode='update') f.append(hdu) f.close()", "actually read here, but the beginning locations are computed. \"\"\" _cardList = []", "'END' cards. A corrupted HDU usually means that the data size cannot be", "% (_blockLen, len(block)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block", "IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s)", "fill: n = 0 (_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n >", "'TableHDU': last_end = 0 attr = [0] * len(self) for i in range(len(self)):", "tmp._arrays[i] is None: size = 0 else: size = len(tmp._arrays[i]) n = min(size,", "arguments. For example: >>> update(file, dat, hdr, 'sci') # update the 'sci' extension", "the input string to be multiple of 80.\"\"\" _len = len(input) if _len", "'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return self def _ncards(self): return len(self._cardimage) /", "to TDISP keyword start: column starting position (ASCII table only), corresponding to TBCOL", "appears to find the # end of a string rather well, but will", "index. \"\"\" if before != None: loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif", "data: array, table, or group data object @param data: the new data used", "keyword EXTEND or if it has the proper value. \"\"\" hdr = self[0].header", "when one or more mandatory Cards are corrupted (unparsable), such as the 'BITPIX',", "USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "surrogate for the __setattr__ key case.\"\"\" if isinstance(val, str): val = val.strip() if", "else: strlen = _len % Card.length return input + ' ' * (Card.length-strlen)", "verification option, default=silentfix. \"\"\" # Only if the card image already exist (to", "hdu in hdus: if not isinstance(hdu, _AllHDU): raise \"Element %d in the HDUList", "= None else: if _key == 'HIERARCH': _limit = Card.length else: _limit =", "format for storing high energy astrophysics data. For details of the FITS standard,", "not support theap yet. \"\"\" def __init__(self, name=None, format=None, unit=None, null=None, \\ bscale=None,", "(_fmt+',') * npars data_fmt = '%s%s' % (`input.shape[1:]`, _fmt) _formats += data_fmt gcount", "arguments are for extension specification. See L{getdata} for explanations/examples. @rtype: L{Header} object @return:", "= '' ncards = self._ncards() for i in range(ncards): # take each 80-char", "than one group parameter have the same name, the # value must be", "chararray.CharArray) and value.itemsize() == 1: pass elif self._dtype == 'a': value = chararray.array(value,", "or more attribute names, they must be separated by comma(s). \"\"\" if attrib.strip().lower()", "else: raise SyntaxError, \"%s is not a Card\" % str(card) def _use_blanks(self, how_many):", "val >= 0 and val <= 999\", 0, option, _err) tfields = self.header['TFIELDS']", "\"\"\"Temporary HDU, used when the file is first opened. This is to speed", "has incorrect type\" # set extension name if not name and self.header.has_key('EXTNAME'): name", "value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'TABLE' if self.header[0].rstrip() != self._xtn:", "else: # the shape will be in the order of NAXIS's which is", "a list of HDU's or a single HDU. Default = None, i.e. an", "cname, value) # if the column data is not NDarray, make it to", "#elif option == 'warn': pass # fix the value elif option == 'unfixable':", "is not correct for all cases, but # it comes pretty darn close.", "we were provided with a Primary Header. If not we will need #", "size (in bytes) of the data portion of the HDU. :Parameters: None :Returns:", "isinstance(hdu, (slice, list)): if isinstance(_key, int): raise ValueError, \"An element in the HDUList", "if there are blank cards directly before END, it will use this space", "(bscale != 1 or bzero !=0): _scale = bscale _zero = bzero else:", "self.data = raw_data.copy() # if not memmap, use the space already in memory", "= num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big' if (self._bzero != 0 or self._bscale", "name in ['TDISP', 'TDIM', 'THEAP']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype", "RecArray, so we can deal with scaled columns. \"\"\" def __init__(self, input): \"\"\"Construct", "hdu = hdulist[_ext] _data = hdu.data if _data is None and isinstance(_ext, _Zero):", "P format column array, both the data descriptors and the data. It returns", "loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = '' # string value should", "make sure it gets the decimal point.\"\"\" valueStr = \"%.16G\" % value if", "two different columns called 'abc' and 'ABC' respectively. (b) When you *refer* to", "header of the appropriate type is created for the supplied data. This argument", "_offset += len(data_output[i]) * _nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\"", "name, corresponding to TTYPE keyword format: column format, corresponding to TFORM keyword unit:", "just list or tuple, not required to be NDArray if format is not", "in the header. oldkey: old keyword, can be a name or index. newkey:", "self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment = 'binary table extension' class StreamingHDU: \"\"\" A", "(header data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\" This", "contains the definition of one column, e.g. ttype, tform, etc. and the array.", "= hdu.data if _data is None and isinstance(_ext, _Zero): try: hdu = hdulist[1]", "'PRIMARY' # insert the keywords EXTEND if header is None: dim = `self.header['NAXIS']`", "self.__dict__.has_key('_cardimage')): valStr = '' # string value should occupies at least 8 columns,", "element %s is not an extension HDU.\" % `i` _text = self.run_option(option, err_text=err_text,", "HDU's data part.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) # for random", "None :Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with urlretrieve", "== 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end", "+= (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount = ' %d Groups", "location is specified if pos is not None: test_pos = '_index '+ pos", "_ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def", "return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU: name, dimensions,", "in range(len(self)): _name = self[j].name if isinstance(_name, str): _name = _name.strip().upper() if _name", "hdu = BinTableHDU(data) else: raise KeyError, 'data must be numarray or table data.'", "hdu = ImageHDU(data) elif isinstance(data, FITS_rec): hdu = BinTableHDU(data) else: raise KeyError, 'data", "\"\"\"Random groups data object. Allows structured access to FITS Group data in a", "'Cards with CONTINUE must have string value.' if name == 'value': _val =", "table data from input (an HDU object).\"\"\" tmp = hdu.columns # get the", "FSC and one for non-FSC (NFSC) format: # NFSC allows lower case of", "Column.\" % input.index(col) self.data = [col.copy() for col in input] # if the", "_getext(filename, 'update', *ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close() def info(filename): \"\"\"Print the summary", "_text = '' _err = _ErrList([], unit='HDU') # the first (0th) element must", "stream per the header provided in the constructor. \"\"\" size = 0 naxis", "KeyError, 'extension %s not found' % `key` elif (nfound > 1): raise KeyError,", "(string), it is never fixable if result is not None: _str = result.group('comm')", "into a string.\"\"\" block = '' for card in self: block = block", "itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale", "the HDU. :Parameters: None :Returns: size : integer The number of bytes of", "after the last field if self._tbtype == 'TableHDU': last_end = 0 attr =", "else: headstr = \"CONTINUE \" valstr = valfmt % val_list[i] output = output", "'Subsection data is not contiguous.' # the offset needs to multiply the length", "result is not None or self.key in Card._commentaryKeys: return result else: if option", "except: if isinstance(recfmt, _FormatP): try: _func = lambda x: num.array(x, type=recfmt._dtype) array =", "may not be the column right after the last field elif tbtype ==", "**extkeys) hdu = hdulist[_ext] _data = hdu.data if _data is None and isinstance(_ext,", "desp_output[i,1] = _offset _offset += len(data_output[i]) * _nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable", "hdu.data._get_scale_factors(i)[3:] if n > 0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n],", "= re_pcount.search(block) if mo is not None: pcount = int(mo.group(1)) else: pcount =", "table if isinstance(coldata, _VLF): for i in coldata: if not isinstance(i, chararray.CharArray): if", "self._coldefs = ColDefs(_cols) self.parnames = [i.lower() for i in parnames] tmp = FITS_rec(rec.array(None,", "list.__init__(self, hdus) def __iter__(self): return [self[i] for i in range(len(self))].__iter__() def __getitem__(self, key):", "\"\"\"Print out nested structure with corresponding indentations. A tricky use of __str__, since", "bzero not in ['', None, 0] or bscale not in ['', None, 1]:", "_itemsize = 0 for i in range(len(tmp)): _formats += 'a%d,' % tmp.spans[i] _itemsize", "-64) pardata: parameter data, as a list of (numeric) arrays. parnames: list of", "if hdr[0] != self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment = 'binary table extension' class", "in the middle of the word. \"\"\" list = [] _nblanks = input.count('", "raw_data._byteorder = 'big' if (self._bzero != 0 or self._bscale != 1): if _bitpix", "= 1, will fill all cells with zeros or blanks if = 0,", "of data type dtype. The descriptor location will have a zero offset for", "return hdu def writeto(filename, data, header=None, **keys): \"\"\"Create a new FITS file using", "hdr = input.header _nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2'] #", "in range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def new_table (input, header=None, nrows=0, fill=0,", "# the second extension By name, i.e., EXTNAME value (if unique): >>> getdata('in.fits',", "not written. Once sufficient data has been written to the stream to satisfy", "+ 1 if loc == 0: offset = -1 except: offset = len(input)", "Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = '' return \"%-10s %-11s %5d %-12s", "little endian arrays before writing # output = data.byteswapped() else: output = data", "0: ext = _Zero() elif 'ext' in keys: if n_ext2 == 1: ext", "go through the next level items, each of the next level # must", "not exist for name in ['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name)", "before=None, after=None): \"\"\"Add a COMMENT card. value: Comment text to be added. before:", "raise SyntaxError, \"%s is not a Card\" % str(value) def __delitem__(self, key): \"\"\"Delete", "parameters')) _list.append(Card('GCOUNT', 1, 'number of groups')) if header is not None: hcopy =", "at the blank space between words. So it may not look pretty. \"\"\"", "tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap,", "output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc == self._size: # # the", "self.data is None: axes = [] else: raise ValueError, \"incorrect array type\" self.header['NAXIS']", "k in self.keys()] else: self._keylist = keylist # find out how many blank", "not None: _data = self.data.copy() else: _data = None return self.__class__(data=_data, header=self.header.copy()) def", "= b + list(self.data) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def", "with zeros or blanks if = 0, copy the data from input, undefined", "_val += eval(_idigt)*1j else: _val += eval(imag.group('sign') + _idigt)*1j else: _val = UNDEFINED", "if not comm == '': nlines = len(comm) / comm_len + 1 comm_list", "i.e. backward? default=0. If backward = 1, search from the end. \"\"\" if", "a later stage as CONTINUE cards may span across blocks. \"\"\" if len(block)", "+ eqStr + valStr + commentStr # need this in case card-with-continue's value", "update(file, dat, hdr, 3) # update the 3rd extension >>> update(file, dat, 'sci',", "i in range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def new_table (input, header=None, nrows=0,", "area size, including padding hdu._datSpan = _size + _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan,", "(int, long)): valStr = '%20d' % self.value # XXX need to consider platform", "= self.field(indx[0]).astype('f8') for i in indx[1:]: result += self.field(i) return result def setpar(self,", "and val >= 0\", 0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val", "option='left'): if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs): b = list(other.data)", "expected\") if naxis == 0: datasize = 0 else: dims = [0]*naxis for", "be an HDU.\" for item in hdu: if not isinstance(item, _AllHDU): raise ValueError,", "_heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart - _tbsize _pcount = hdu.data._heapsize +", "_tmpName(input): \"\"\"Create a temporary file name which should not already exist. Use the", "len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if", "(_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n > 0: if isinstance(tmp._recformats[i], _FormatX):", "is written self.__file.flush() # return both the location and the size of the", "valStr = '%20s' % `self.value`[0] elif isinstance(self.value , (int, long)): valStr = '%20d'", "agree with the data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX'] =", "-1): # locate last non-commentary card if self[i].key not in Card._commentaryKeys: break super(CardList,", "data._coldefs = self.columns else: data = None self.__dict__[attr] = data elif attr ==", "\"test\" argument. \"\"\" _err = errlist fix = '' cards = self.header.ascard try:", "for _card in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue", "it's value/comment will be updated. If it does not exist, a new card", "self.name = name def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option)", "# if not the real CONTINUE card, skip to the next card to", "FITS file to be opened. mode: Open mode, 'readonly' (default), 'update', or 'append'.", "input, undefined cells will still be filled with zeros/blanks. tbtype: table type to", "mktemp() output. \"\"\" dirName = os.path.dirname(input) if dirName != '': dirName += '/'", "= 8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal sign", "= re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups =", "and data will be streamed. header : Header The header object associated with", "oldkey.strip().upper() newkey = newkey.strip().upper() if newkey == 'CONTINUE': raise ValueError, 'Can not rename", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY", "THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "name = mo.group(1).rstrip() else: name = '' mo = re_extver.search(self._raw) if mo: extver", "default def update(self, key, value, comment=None, before=None, after=None): \"\"\"Update one header card.\"\"\" \"\"\"", "not in valueStr and \"E\" not in valueStr: valueStr += \".0\" return valueStr", "_FormatX(str): \"\"\"For X format in binary tables.\"\"\" pass class _FormatP(str): \"\"\"For P format", "last HDU or corrupted HDU except ValueError: print 'Warning: Required keywords missing when", "the arguments are for extension specification. They are flexible and are best illustrated", "= self._convert[indx].copy() if _zero: dummy -= bzero if _scale: dummy /= bscale elif", "be used data: data to be used name: name to be populated in", "name of the file to be updated data: the new data used for", "self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu = self._check(option='parse')", "def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table from the", "and the provided header will be added as the first extension. If the", "self.starts[i] + _width - 1 attr[i] = _end - last_end last_end = _end", "= _commonNames else: list = attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1]", "raise SyntaxError, \"%s is not a Card\" % str(card) def _pos_insert(self, card, before,", "offset *= _naxis if dims == []: dims = [1] npt = 1", "naxis: if naxis > 1: return _SinglePoint(1, indx) elif naxis == 1: return", "'number of groups')) if header is not None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist())", "_coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] = _coldefs elif", "!= 0 or self._bscale != 1): if _bitpix > 0: # scale integers", "bzero = 0 return (_str, _bool, _number, _scale, _zero, bscale, bzero) def field(self,", "0 if hdu.data is not None: # if image, need to deal with", "after=after) def add_blank(self, value='', before=None, after=None): \"\"\"Add a blank card. value: Text to", "= 0 mo = re_groups.search(block) if mo and simple: groups = 1 else:", "is not None: raise ValueError, 'comment %s is not a string' % val", "CardList. If no keyword is found, return the default value. key: keyword name", "_format[1:] + _dict[_format[0]] + ' '*_trail # not using numarray.strings's num2char because the", "0: size = 1 for j in range(naxis): size = size * self.header['NAXIS'+`j+1`]", "the new card will be placed. default=None. \"\"\" if self.has_key(key): j = self.ascard.index_of(key)", "return the string before column 9. \"\"\" eqLoc = self._locateEq() if eqLoc is", "range(nc-1, -1, -1): # locate last non-commentary card if self[i].key not in Card._commentaryKeys:", "= 1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns'", "space between sign, # digits, exponent sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC", "# since binary table does not support ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1]", "key) out._convert = [None]*self._nfields for i in range(self._nfields): # touch all fields to", "if not _zero: bzero = 0 return (_str, _bool, _number, _scale, _zero, bscale,", "option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option == 'fix': self.__dict__['_fix_text']", "module for reading and writing Flexible Image Transport System (FITS) files. This file", "file. \"\"\" self.header = header.copy() # # Check if the file already exists.", "data info. if 'data' in dir(self): if self.data is None: _shape, _format =", "does not exist.\" % keywd fix_text = \"Fixed by inserting a new '%s'", "'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping from TFORM data type to numarray", "of \"> n\", # where n is an int if isinstance(pos, str): _parse", "array)) except: try: # this handles ['abc'] and [['a','b','c']] # equally, beautiful! _func", "size def copy(self): \"\"\"Make a copy of the HDU, both header and data", "backward? default=0. If backward = 1, search from the end. \"\"\" if isinstance(key,", "Any header will not be initialized till the HDU is accessed. \"\"\" def", "hdu.data else: output = hdu.data output.tofile(self.__file) _size = output.nelements() * output._itemsize # write", "a Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value def change_name(self, col_name,", "None: if isinstance(data, num.NumArray): hdu = ImageHDU(data) elif isinstance(data, FITS_rec): hdu = BinTableHDU(data)", "shape will be in the order of NAXIS's which is the # reverse", "= ' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = ''", "and 'ABC' respectively. (b) When you *refer* to a field (presumably with the", "self.columns._pnames else: data = None self.__dict__[attr] = data elif attr == 'columns': _cols", "'E')) else: dummy = self._parent.field(indx) # further conversion for both ASCII and binary", "self._bscale != 1: num.multiply(self.data, self._bscale, self.data) if self._bzero != 0: self.data += self._bzero", "val_len) for i in range(len(val_list)): if i == 0: headstr = \"%-8s= \"", "This regex helps delete leading zeros from numbers, otherwise # Python might evaluate", "self.ascard def items(self): \"\"\"Return a list of all keyword-value pairs from the CardList.\"\"\"", "item has consistent data type to avoid misalignment. \"\"\" if isinstance(value, num.NumArray) and", "the data area # data area size, including padding hdu._datSpan = _size +", "\"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct a HDUList object. hdus: Input, can be", "'data' in dir(hdu): if hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print", "file. This is a convenience method to provide a user easier output interface", "'%-80s' % (headstr + valstr) # do the comment string if self.comment is", "getattr(_cols, cname+'s')[i] if val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format'", "_option in ['fix', 'silentfix'] and x.find('Unfixable') != -1: raise VerifyError, '\\n'+x if (_option", "'', firstval, option, _err) self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid, 8, option, _err)", "anywhere. If the card does not exist, the new card will have the", "axes = [0] + axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes =", "attribute in ColDefs is a list of corresponding attribute values from all Columns.", "coldata: if not isinstance(i, chararray.CharArray): if i._type.bytes > 1: if i._byteorder != 'big':", "name (a string) or the index (an integer). backward: search the index from", "filename exists, create if not. If only data is supplied, a minimal header", "== 'A' and option != '': output_format = _fits2rec[dtype]+`int(option)` # make sure option", "loc, _size+_padLength(_size) def close(self): \"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify):", "if n_ext1 > 2: raise ValueError, \"too many positional arguments\" elif n_ext1 ==", "@rtype: L{Header} object @return: header \"\"\" hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)", "HDU.\" % item else: if not isinstance(hdu, _AllHDU): raise ValueError, \"%s is not", "1: self.field(indx[0])[:] = value # if more than one group parameter have the", "argument's value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname, value.value) else: setattr(self,", "4, 2, 1] nbytes = ((nx-1) / 8) + 1 for i in", "num.nonzero(arr == ' ')[0] offset = 0 xoffset = 0 for i in", "0 else: self.writeComplete = 1 def write(self,data): \"\"\" Write the given data to", "multiple members are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file =", "RecArray format? recfmt = format format = _convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal", "as an array.\"\"\" indx = _get_index(self._coldefs.names, key) if (self._convert[indx] is None): # for", "not None: test_pos = '_index '+ pos if not eval(test_pos): err_text = \"'%s'", "', 'XTENSION']): raise IOError, 'Block does not begin with SIMPLE or XTENSION' for", "_FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset =", "value string valfmt = \"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len)", "= Card.length else: _limit = 10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc =", "return 1 except: return 0 def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a card's", "we can deal with scaled columns. \"\"\" def __init__(self, input): \"\"\"Construct a FITS", "self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc = None else: if _key == 'HIERARCH':", "= array.copy() if bzero not in ['', None, 0]: array += -bzero if", "blank cards in front of END. bottom: If =0 (default) the card will", "hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS", "match if (keyword in _keyNames): _list.append(i) for i in _list: del self.header.ascard[i] del", "expected\") mo = re_gcount.search(block) if mo is not None: gcount = int(mo.group(1)) else:", "hdu = PrimaryHDU(data, header=header) clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename, data,", "re, os, tempfile, exceptions import operator import __builtin__ import urllib import tempfile import", "8 return size def _verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option) # Verify locations", "_hdus = super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return result def __setitem__(self, key, hdu):", "HDUList back to the file (for append and update modes only). output_verify: output", "locations and values of mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and val >=", "output: output Uint8 array of shape (s, nbytes) nx: number of bits \"\"\"", "getdata('in.fits') By extension number: >>> getdata('in.fits', 0) # the primary header >>> getdata('in.fits',", "in range(self._nfields): if (self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx)", "name, output_verify='exception', clobber=False): \"\"\"Write the HDU to a new file. This is a", "'width', 'prec') else: raise ValueError, valu size = eval(width)+1 strfmt = strfmt +", "if there is no match if (keyword in _keyNames): col = eval(_key.group('num')) if", "required to fill the stream per the header provided in the constructor. \"\"\"", "!= None: _card = Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value,", "8) + 1 unused = nbytes*8 - nx for i in range(nbytes): _min", "for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute the", "val == 0\", 0, option, _err) _after = self.header['NAXIS'] + 3 # if", "of an HDU from the HDUList. The key can be an integer, a", "def __str__(self): \"\"\"Format a list of cards into a printable string.\"\"\" output =", "def _add_commentary(self, key, value, before=None, after=None): \"\"\"Add a commentary card. If before and", "last_end = _end self._width = _end else: raise KeyError, 'Attribute %s not defined.'", "or a tuple of (string, integer). \"\"\" if isinstance(key, (int, slice)): return key", "# Header: # Add 1 to .ascard to include the END card _nch80", "but # it comes pretty darn close. It appears to find the #", "longstring + _comm.rstrip() + ' ' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break up", "key.strip().upper() if _key[:8] == 'HIERARCH': _key = _key[8:].strip() _keylist = self._keylist if backward:", "'ignore': return elif option == 'parse': # check the value only, no need", "or a ColDefs object. header: header to be used to populate the non-required", "1 or bzero !=0): _scale = bscale _zero = bzero else: if option", "= len(self.columns.formats) _format = self.columns.formats # if data is not touched yet, use", "_gcount = '' return \"%-10s %-11s %5d %-12s %s%s\" % \\ (self.name, type,", "= self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] = _coldefs elif attr", "re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo: name = mo.group(1).rstrip() else: name", "data_output = _VLF([None]*len(input)) data_output._dtype = dtype if dtype == 'a': _nbytes = 1", "output = \"%-80s\" % output # longstring case (CONTINUE card) else: # try", "delete the keylist item def keys(self): \"\"\"Return a list of all keywords from", "the FITS file to be opened. mode: Open mode, 'readonly' (default), 'update', or", "to insert. The new card will be inserted before it. card: The Card", "_key in _list if _count == 1: indx = _list.index(_key) elif _count ==", "= rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for i in range(self._nfields): # touch all", "so pad the data to the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete =", "ValueError, self._err_text def _extractKey(self): \"\"\"Returns the keyword name parsed from the card image.\"\"\"", "_extver if 'data' in dir(hdu): if hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if", "table), or groups data object depending on the type of the extension being", "- 1 self.spans[i] = _end - last_end last_end = _end self._Formats = self.formats", "super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return result def __setitem__(self, key, value): \"\"\"Set", "default=1. If useblanks != 0, and if there are blank cards directly before", "such as the 'BITPIX', 'NAXIS', or 'END' cards. A corrupted HDU usually means", "name if isinstance(key, str): while 1: try: del self.ascard[key] self._mod = 1 except:", "else: tmp = b + list(self.data) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other,", "\"pass\", fixable=1): \"\"\"Execute the verification with selected option.\"\"\" _text = err_text if not", "the equal sign. If there is no equal sign, return the string after", "other positional arguments) are assumed to be the extension specification(s). Header and extension", "keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val >= 0\",", "variable length table.\"\"\" pass # TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table", "\"\"\" Update header keywords to reflect recent changes of columns.\"\"\" _update = self.header.update", "see the I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all", "string' else: self.__dict__['_cardimage'] = ' '*80 def __repr__(self): return self._cardimage def __getattr__(self, name):", "primary HDU. data: the data in the HDU, default=None. header: the header to", "j in range(_min, _max): if j != _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j],", "is a convenience method to provide a user easier output interface if only", "def __init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit = unit def __str__(self, tab=0): \"\"\"Print", "option == 'left': tmp = list(self.data) + b else: tmp = b +", "\"\"\"Factory function to open a FITS file and return an HDUList object. name:", "you mean: \"Profits\"? - Google Search, when asked for \"PyFITS\" \"\"\" import re,", "boolean, # number, or complex value is found, otherwise it will return #", "to write it to a tmp file, # delete the original file, and", "file specified by a URL cannot be accessed\"\"\" def http_error_default(self, url, fp, errcode,", "'UInt8', 'Int16', 'Float32' etc.). If is None, use the current data type. option:", "card.\" % keywd if fixable: # use repr to accomodate both string and", "+ ' '*_trail # not using numarray.strings's num2char because the # result is", "value of mandatory keywords. # Do the first card here, instead of in", "definition of one column, e.g. ttype, tform, etc. and the array. Does not", "!= 0: raise IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) # flush, to make", "def __repr__(self): return self._cardimage def __getattr__(self, name): \"\"\" instanciate specified attribute object.\"\"\" if", "extension >>> update(file, dat, 'sci', 2) # update the 2nd SCI extension >>>", "to try: if cards[0].key == 'SIMPLE': if 'GROUPS' in cards._keylist and cards['GROUPS'].value ==", "will return a match if a FITS string, boolean, # number, or complex", "need to copy, and keep it unchanged else: self.header = header else: #", "for i in range(nbytes): _min = i*8 _max = min((i+1)*8, nx) for j", "num.array(raw_data, type=num.Float32) else: # floating point cases if self._ffile.memmap: self.data = raw_data.copy() #", "construct a list of cards of minimal header _list = CardList([ Card('XTENSION', '',", "try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def getfile(self): return self.__file def _readheader(self,", "option, _err) return _err # --------------------------Table related code---------------------------------- # lists of column/field definition", "* Card.length _bytes = _bytes + _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize =", "else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] = value def _verify(self, option='warn'): _err =", "(e.g. ColDefs has the attribute .names while Column has .name), Each attribute in", "the comment string in another. Also, it does not break at the blank", "def __setitem__(self, key, value): \"\"\"To make sure the new item has consistent data", "useblanks = new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last =", "NameError, \"Key '%s' does not exist.\" % key else: # multiple match raise", "the format `%s`.\" % format else: raise ValueError, \"Must specify format to construct", "loc == 0: offset = -1 except: offset = len(input) # check for", "one group parameter have the same name, the # value must be a", "return len(self._cardimage) / Card.length def _verify(self, option='warn'): \"\"\"Card class verification method.\"\"\" _err =", "default=None. after: name of the keyword, or index of the Card after which", "shape=dims) else: raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big' if (self._bzero !=", "all histories as a list of string texts.\"\"\" output = [] for _card", "\"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None, None, None self.header", "regular card and use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0", "_err # --------------------------Table related code---------------------------------- # lists of column/field definition common names and", "self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err) self.req_cards('GROUPS', _pos, 'val == True', True, option,", "object @param data: the new data used for appending @type header: L{Header} object", "the file. If the file does not already exist, it will be created", "descriptor location will have a zero offset for all columns after this call.", "if tbtype == 'TableHDU': # string no need to convert, if isinstance(tmp._arrays[i], chararray.CharArray):", "an exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an array, record array", "use an array, even if it is only ONE u1 (i.e. use tuple", "table format spec to record format spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4',", "# extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim", "and its ._parent def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key) if isinstance(key, slice):", "None if equal sign is not present, or it is a commentary card.", "\"Illegal format %s\" % fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format", "to overflow, an IOError exception is raised and the data is not written.", "!= None or after != None: _card = Card(key, value, comment) self.ascard._pos_insert(_card, before=before,", "def __getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\" if attr == '_mm': self.__dict__[attr] =", "Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and _card.key != 'CONTINUE': raise ValueError, 'Long card", "value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name'))", "to be updated data: the new data used for updating The rest of", "False. \"\"\" if header is None: if 'header' in keys: header = keys['header']", "in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS',", "Delayed(input, col) # now build the columns tmp = [Column(**attrs) for attrs in", "change # the content of header without being able to pass it to", "definition keywords for i in range(len(_cols)): for cname in _commonNames: val = getattr(_cols,", "self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO',", "tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype == 'P': output_format", "def _setcomment(self, val): \"\"\"Set the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if val", "> Card.length: self.__class__ = _Card_with_continue # remove the key/value/comment attributes, some of them", "0: coldata.tofile(self.__file) _shift = self.__file.tell() - _where hdu.data._heapsize = _shift - hdu.data._gap _size", "# does not support CONTINUE for HIERARCH if len(keyStr + eqStr + valStr)", "['', None, 1]: array = array.copy() if bzero not in ['', None, 0]:", "else: dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda", "indx def _normalize_slice(input, naxis): \"\"\"Set the slice's start/stop in the regular range.\"\"\" def", "self.ascard[_index] = Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey", "exist, a new card will be created and it will be placed before", "== 'TableHDU': last_end = 0 attr = [0] * len(self) for i in", "_start = input.start if _start is None: _start = 0 elif isinstance(_start, (int,", "# boolean needs to be scaled too if recfmt == _booltype: _out =", "of bytes of data required to fill the stream per the header provided", "in a commentary card must be a string' else: self.__dict__['_cardimage'] = ' '*80", "__repr__(self): text = '' for cname in _commonNames: value = getattr(self, cname) if", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "attributes except format can be optional. name: column name, corresponding to TTYPE keyword", "in _unique: _unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr] = _unique try: return self.__dict__[attr]", "header provided into an image # extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del", "self.__file.tell() - _where hdu.data._heapsize = _shift - hdu.data._gap _size = _size + _shift", "tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for i in range(self._nfields): #", "be done after the \"regular\" data is written (above) _where = self.__file.tell() if", "the name list. The key can be an integer or string. If integer,", "len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does not fit into the output's", "if _key == 'HIERARCH': _limit = Card.length else: _limit = 10 try: eqLoc", "self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long string value/comment into CONTINUE cards.", "before the END card self._blanks = 0 self.count_blanks() def __getitem__(self, key): \"\"\"Get a", "FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\" Do you mean: \"Profits\"? - Google Search,", "for mode `%s`.\" % mode else: if os.path.splitext(self.name)[1] == '.gz': # Handle gzip", "def update_header(self): \"\"\"Update the header keywords to agree with the data.\"\"\" old_naxis =", "\"\"\" # Only if the card image already exist (to avoid infinite loop),", "data is not touched yet, use header info. else: _shape = () for", "\"\"\"Format the floating number to make sure it gets the decimal point.\"\"\" valueStr", "this in case card-with-continue's value is shortened if not isinstance(self, _Hierarch): self.__class__ =", "== 0: offset = -1 except: offset = len(input) # check for one", "The FITS standard # appears vague on this issue and only states that", "object.\"\"\" def __init__(self, input): \"\"\" input: a sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self,", "conversion for both ASCII and binary tables if _number and (_scale or _zero):", "key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format in binary tables.\"\"\" pass", "the memmap object, it is designed to use an independent # attribute of", "<= _nfields and col > 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value #", "the data from input, undefined cells will still be filled with zeros/blanks. tbtype:", "the given data to the stream. :Parameters: data : NumArray Data to stream", "= dummy return self._convert[indx] if _str: return self._parent.field(indx) # ASCII table, convert strings", "dummy, _nx) self._convert[indx] = dummy return self._convert[indx] (_str, _bool, _number, _scale, _zero, bscale,", "= True self.req_cards(firstkey, '== 0', '', firstval, option, _err) self.req_cards('BITPIX', '== 1', _isInt+\"", "no guarantee # the elements in the object array are consistent. if not", "self.__file.mode == 'update': if not self._resize: # determine if any of the HDU", "be created (BinTableHDU or TableHDU) \"\"\" # construct a table HDU hdu =", "%-12s %s%s\" % \\ (self.name, type, len(self.header.ascard), _shape, _format, _gcount) def scale(self, type=None,", "# for random group image, NAXIS1 should be 0, so we skip NAXIS1.", "None: dim = arr._shape[0] else: dim = 0 if dim > nrows: nrows", "dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does not fit into", "value (8, 16, 32, 64, -32, or -64) pardata: parameter data, as a", "'Fixed by inserting one as 0th HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text =", "self.hdu.header['NAXIS'] if naxis < len(key): raise IndexError, 'too many indices.' elif naxis >", "bytes) of the data portion of the HDU. :Parameters: None :Returns: size :", "HDU needs to be written to a file. name: output FITS file name", "return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard def items(self): \"\"\"Return a", "1) if self.__file.tell() > self._size: print 'Warning: File size is smaller than specified", "dir(self): if self.data is None: _shape, _format = (), '' else: # the", "= '%20s' % self._valuestring elif isinstance(self.value, complex): if self._valueModified: _tmp = '(' +", "BITPIX, etc. so the rest of the header can be used to reconstruct", "than strlen, break in the middle if offset <= xoffset: offset = xoffset", "list.append(tmp) if len(input) == offset: break xoffset = offset return list class Header:", "list of cards into a printable string.\"\"\" output = '' for card in", "the index in the list. If string, (a) Field (column) names are case", "must be a sequence with %d arrays/numbers.\" % len(indx) def _getitem(self, offset): row", "variables _blockLen = 2880 # the FITS block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb',", "= fits_fmt, bscale = bscale, bzero = bzero)) self._coldefs = ColDefs(_cols) self.parnames =", "header=None, name=None): \"\"\"data: data of the table header: header to be used for", "here, but the beginning locations are computed. \"\"\" _cardList = [] _keyList =", "not already exist, it will be created and if the header represents a", "= '%20s' % _floatFormat(self.value) else: valStr = '%20s' % self._valuestring elif isinstance(self.value, complex):", "specified if test: val = self.header[keywd] if not eval(test): err_text = \"'%s' card", "tuple): key = (key,) naxis = self.hdu.header['NAXIS'] if naxis < len(key): raise IndexError,", "and BZERO values. \"\"\" if self.data is None: return # Determine the destination", "`after' if both specified. They can be either a keyword name or index.", "# Verify location and value of mandatory keywords. # Do the first card", "P format _data._heapoffset = hdu._theap + hdu._datLoc _data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2']", "keyword already exists, it's value/comment will be updated. If it does not exist,", "_max = _keyList.count('CONTINUE') _start = 0 for i in range(_max): _where = _keyList[_start:].index('CONTINUE')", "def __init__(self, hdu=None, field=None): self.hdu = hdu self.field = field # translation table", "1, 'number of groups'), Card('TFIELDS', 0, 'number of table fields') ]) if header", "self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment) #", "cards if isinstance(self, _Hierarch): valStr = valStr.strip() # comment string if keyStr.strip() in", "all comments as a list of string texts.\"\"\" output = [] for _card", "__getitem__(self, key): \"\"\"Get a Card by indexing or by the keyword name.\"\"\" _key", "attribute.\"\"\" if attr == 'name' and value: if not isinstance(value, str): raise TypeError,", "deal with CONTINUE cards in a later stage as CONTINUE cards may span", "not break at the blank space between words. So it may not look", "even number of # quotes to be precise. # # Note that a", "option, errlist): \"\"\"Check the existence, location, and value of a required Card.\"\"\" \"\"\"If", "+ _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose: print \"One", "elements after the first WholeLine must be WholeLine or # OnePointAxis if isinstance(indx,", "it may be possible to decipher where the last block of the Header", "_fits2rec[dtype]+`repeat` # to accomodate both the ASCII table and binary table column #", "TFORM value into repeat, data type, and option.\"\"\" try: (repeat, dtype, option) =", "self._coldefs._recformats[indx]._dtype is _booltype: for i in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] =", "del_col(self, col_name): \"\"\"Delete (the definition of) one Column.\"\"\" indx = _get_index(self.names, col_name) for", "\"\"\" A class that provides the capability to stream data to a FITS", "if any of the HDU is resized for hdu in self: # Header:", "# 0.6.5.5 def size(self): \"\"\"Returns the size (in bytes) of the HDU's data", "output file, as the data will be scaled and is therefore not very", "force=0): \"\"\"Rename a card's keyword in the header. oldkey: old keyword, can be", "if (_option != \"silentfix\") and x: print 'Output verification result:' print x if", "starting byte location of data block in file (None) \"\"\" # mappings between", "resized.\" break # if the HDUList is resized, need to write it to", "\"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc = 7 return self._cardimage[eqLoc+1:]", "hdu.header._mod = 0 hdu.header.ascard._mod = 0 if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT))", "IOError, 'Block length is not %d: %d' % (_blockLen, len(block)) elif (blocks[:8] not", "block = block + repr(card) return block def __str__(self): \"\"\"Format a list of", "indx] return ColDefs(tmp) def _setup(self): \"\"\" Initialize all attributes to be a list", "object associated with the data to be written to the file. :Returns: None", "_ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes: header: image header data: image data", "HDU. Must seek to the correct location before calling this method. \"\"\" if", "HDU from the HDUList, indexed by number or name.\"\"\" key = self.index_of(key) super(HDUList,", "= _shift - hdu.data._gap _size = _size + _shift # pad the FITS", "_err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err) tfields", "we # must change the Primary header provided into an image # extension", "else: _nrows = len(self.data) _ncols = len(self.columns.formats) _format = self.columns.formats # if data", "if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the", "a TableHDU containing ASCII data. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc", "== '_arrays': attr = [col.array for col in self.data] elif name == '_recformats':", "data has incorrect type\" # set extension name if not name and self.header.has_key('EXTNAME'):", "IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "_last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod = 1 def", "in ['fix', 'silentfix'] and x.find('Unfixable') != -1: raise VerifyError, '\\n'+x if (_option !=", "string) or the index (an integer). backward: search the index from the END,", "_isInt+\" and val >= 0 and val <= 999\", 0, option, _err) naxis", "__format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the", "base class.\"\"\" \"\"\"Attributes: header: image header data: image data _file: file associated with", "equal sign position if self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8: if", "it is # a null string elif isinstance(self.value, str): if self.value == '':", "to enable file cacheing class _File: \"\"\"A file I/O class\"\"\" def __init__(self, name,", "if _str: return self._parent.field(indx) # ASCII table, convert strings to numbers if self._coldefs._tbtype", "at different class levels. \"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit =", "table extension' class StreamingHDU: \"\"\" A class that provides the capability to stream", "memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if", "the HDUList, indexed by number or name.\"\"\" _key = self.index_of(key) if isinstance(hdu, (slice,", "self.parnames[i] if _name in _unique: _unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr] = _unique", "odd number of single quotes, # instead of issuing an error. The FITS", "shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale by", "If the file does not exist and the provided header is not a", "\"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base", "exists if os.path.exists(name): if clobber: print \"Overwrite existing file '%s'.\" % name os.remove(name)", "in this HDU.' if _data is None: raise IndexError, 'No data in this", "newkey, force=0): \"\"\"Rename a card's keyword in the header. oldkey: old keyword, can", "to convert to a numarray first array = num.array(array) except: try: # then", "_FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU': # string", "of the field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str = 'a' in self._coldefs.formats[indx]", "null value, corresponding to TNULL keyword bscale: bscale value, corresponding to TSCAL keyword", "after the last non-blank card. \"\"\" if isinstance (card, Card): nc = len(self)", "self.key else: self.__dict__['_err_text'] = 'Card image is not FITS standard (unparsable value string).'", "order, in case of required cards in wrong order. if isinstance(self, _ExtensionHDU): firstkey", "= _key.group('label') except: continue # skip if there is no match if (keyword", "indx.remove(x) tmp = [self[i] for i in indx] return ColDefs(tmp) def _setup(self): \"\"\"", "or not found.' % key self._resize = 1 def __delitem__(self, key): \"\"\"Delete an", "the data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes", "output + '%-80s' % (headstr + valstr) # do the comment string if", "the index of the key in the name list. The key can be", "% self._valuestring elif isinstance(self.value, Undefined): valStr = '' # conserve space for HIERARCH", "header and data are copied.\"\"\" # touch the data, so it's defined (in", "= {} for i in range(len(self.parnames)): _name = self.parnames[i] if _name in _unique:", "def __str__(self, tab=0): \"\"\"Print out nested structure with corresponding indentations. A tricky use", "blocks until END card is reached while 1: # find the END card", "if more than one group parameter have the same name else: result =", "cards with its parent card if nc > 0: _longstring = _cardList[_where-1]._cardimage for", "card. value: Text to be added. before: [same as in update()] after: [same", "all top level messages for item in self: if not isinstance(item, _ErrList): result", "r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is not correct for all cases,", "use this space first, instead of appending after these blank cards, so the", "slice): out = tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for i", "commentStr = '' # equal sign string eqStr = '= ' if keyStr.strip()", "\"\"\"Change an attribute (in the commonName list) of a Column.\"\"\" indx = _get_index(self.names,", "self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] = value def", "if att not in _commonNames: print \"'%s' is not an attribute of the", "['SIMPLE ', 'XTENSION']): raise IOError, 'Block does not begin with SIMPLE or XTENSION'", "there is no card (or blank card), append at the end. \"\"\" new_card", "= _keyList[_start:].index('CONTINUE') + _start for nc in range(1, _max+1): if _where+nc >= len(_keyList):", "has been filled will raise an IOError exception. If the dtype of the", "_getext(filename, mode, *ext1, **ext2): \"\"\"Open the input file, return the HDUList and the", "method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and val == 0\", 0,", "if self._blanks > 0: for i in range(min(self._blanks, how_many)): del self[-1] # it", "after the specified location. If no \"before\" or \"after\" is specified, it will", "contiguous CONTINUE cards with its parent card if nc > 0: _longstring =", "= False def New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush is complete!\" keyboardInterruptSent =", "= '' if valu is not None: _comm = valu.group('comm') if isinstance(_comm, str):", "value string in one block and the comment string in another. Also, it", "for numbers with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ')", "_strip(self): \"\"\"Strip cards specific to a certain kind of header. Strip cards like", "self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i]", "to the end of the file. \"\"\" self.header = header.copy() # # Check", "_end = self.starts[i] + _width - 1 self.spans[i] = _end - last_end last_end", "in memory else: self.data = raw_data if self._bscale != 1: num.multiply(self.data, self._bscale, self.data)", "if len(self) > 1: self.update_extend() hduList = open(name, mode=\"append\") for hdu in self:", "dummy = num.around(dummy) self._parent.field(indx)[:] = dummy del dummy # ASCII table does not", "> 0: # scale integers to Float32 self.data = num.array(raw_data, type=num.Float32) else: #", "# close the memmap object, it is designed to use an independent #", "HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'): _err = _ErrList([], unit='Card') isValid =", "strfmt, strlen = '', 0 for j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu", "its location. It returns None if equal sign is not present, or it", "_convert is the scaled (physical) array. self._parent = input self._convert = [None]*self._nfields self.names", "comment: comment, default=''. \"\"\" if key != '' or value != '' or", "= self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` ==", "tempfile, exceptions import operator import __builtin__ import urllib import tempfile import gzip import", "if there are blank cards in front of END. bottom: If =0 (default)", "= GroupsHDU self.name = name if self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1',", "there is no equal sign, return the string after column 8. \"\"\" eqLoc", "-~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|'", "# a field may not be the column right after the last field", "try: _indx = _keylist.index(_key) if backward: _indx = len(_keylist) - _indx - 1", "loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after != None: loc = self.index_of(after)", "(_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars] = input else: self._parent.field(npars)[:]", "verification option, default = 'exception'. clobber: Overwrite the output file if exists, default", "also contain the binary data(*). (*) In future it may be possible to", "non-commentary card. If =1, the card will be appended after the last non-blank", "= j nfound += 1 if (nfound == 0): raise KeyError, 'extension %s", "header) tuple. \"\"\" if 'header' in extkeys: _gethdr = extkeys['header'] del extkeys['header'] else:", "if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE',", "= 'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16',", "naxis == 0: datasize = 0 else: dims = [0]*naxis for i in", "can be an integer or string. If integer, it is the index in", "= urllib.urlretrieve(name) else: self.name = name self.mode = mode self.memmap = memmap if", "\"\"\"Exatrct the keyword value or comment from the card image.\"\"\" # for commentary", "complex, bool, Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise ValueError,", "ValueError: # try to match case-insentively, _key = key.lower().rstrip() _list = map(lambda x:", "sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC", "else: raise ValueError, 'Illegal value %s' % str(val) self.__dict__['value'] = val def _setcomment(self,", "mask array indexing work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self):", "header is not None: # Make a \"copy\" (not just a view) of", "== True', True, option, _err) return _err # --------------------------Table related code---------------------------------- # lists", "docstrings in this module. @group Header-related Classes: Card, CardList, _Card_with_continue, Header, _Hierarch @group", "vague on this issue and only states that a # string should not", "confuse the indexing. _list = [] for i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i]", "keyword value (to be used for updating) comment: keyword comment (to be used", "re_groups = re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo is", "= _VLF([None]*len(input)) data_output._dtype = dtype if dtype == 'a': _nbytes = 1 else:", "map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`, dummy)", "of mandatory keywords. # Do the first card here, instead of in the", "_parent is the original (storage) array, # _convert is the scaled (physical) array.", "self.data = raw_data if self._bscale != 1: num.multiply(self.data, self._bscale, self.data) if self._bzero !=", "else: npars = len(pardata) if parbscales is None: parbscales = [None]*npars if parbzeros", "getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an array, record array (i.e. table), or groups", "`repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format %s\" % fmt return", "print \"Overwrite existing file '%s'.\" % name os.remove(name) else: raise IOError, \"File '%s'", "+ _idigt)*1j else: _val = UNDEFINED self.__dict__['value'] = _val if '_valuestring' not in", "# Note that a non-greedy match is done for a string, # since", "**keys): \"\"\"Create a new FITS file using the supplied data/header. @type filename: string", "else: raise AttributeError, name # When an attribute (value or comment) is changed,", "isinstance(self.value, float): if self._valueModified: valStr = '%20s' % _floatFormat(self.value) else: valStr = '%20s'", "'value': self._setvalue(val) elif name == 'comment': self._setcomment(val) else: raise AttributeError, name # When", "try: # try to convert to a numarray first array = num.array(array) except:", "0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val >= 0\", 0, option, _err) self.req_cards('GCOUNT',", "self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu = self._check(option='parse') if name == 'value': if", "occurrence of cards of the same name (except blank card). If there is", "table type to be created (BinTableHDU or TableHDU) \"\"\" # construct a table", "isinstance(value, num.NumArray) and value.type() == self._dtype: pass elif isinstance(value, chararray.CharArray) and value.itemsize() ==", "def _ascardimage(self): \"\"\"Generate a (new) card image from the attributes: key, value, and", "self.__file._mm if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the memmap", "data part. This is a layer over the RecArray, so we can deal", "writing to the output file, as the data will be scaled and is", "\"Key '%s' does not exist.\" % key else: # multiple match raise NameError,", "dummy = [] else: dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1)", "self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\" def __init__(self,", "_ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes: ColDefs,", "comments as a list of string texts.\"\"\" output = [] for _card in", "-= 1 def change_attrib(self, col_name, attrib, new_value): \"\"\"Change an attribute (in the commonName", "EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'BINTABLE' hdr = self.header", "= '' if repeat != 1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else:", "then it will be split in the middle of the word. \"\"\" list", "a write of the HDUList back to the file (for append and update", "elif self.__file.mode == 'update': if not self._resize: # determine if any of the", "if it has the proper value. \"\"\" hdr = self[0].header if hdr.has_key('extend'): if", "else: print 'card is too long, comment is truncated.' output = output[:Card.length] self.__dict__['_cardimage']", "data must be contiguous.' for j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx =", "field elif tbtype == 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '':", "comma(s). \"\"\" if attrib.strip().lower() in ['all', '']: list = _commonNames else: list =", "VLdata = self.data.field(i) VLdata._max = max(map(len, VLdata)) val = 'P' + _convert_format(val._dtype, reverse=1)", "\"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str = 'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:]", "7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the card image with the specified", "_unique = {} for i in range(len(self.parnames)): _name = self.parnames[i] if _name in", "If there is no equal sign, return the string after column 8. \"\"\"", "ord('T')) else: return dummy return self._convert[indx] def _scale_back(self): \"\"\"Update the parent array, using", "writeHDUdata(self, hdu): \"\"\"Write FITS HDU data part.\"\"\" self.__file.flush() loc = self.__file.tell() _size =", "data size. File may have been truncated.' hdu._ffile = self return hdu def", "it (and other positional arguments) are assumed to be the extension specification(s). Header", "else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after) # delete extra", "mapping from TFORM data type to numarray data type (code) _booltype = 'i1'", "disclaimer in the documentation and/or other materials provided with the distribution. 3. The", "header appended to the end of the file. If the file does not", "__init__(self, input, row=0): rec.Record.__init__(self, input, row) def par(self, fieldName): \"\"\"Get the group parameter", "if reverse == 0: if dtype in _fits2rec.keys(): # FITS format if dtype", "in ['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return self def _ncards(self):", "shape if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape =", "take one HDU, as well as a list of HDU's as input if", "HDU tbtype: which table HDU, 'BinTableHDU' (default) or 'TableHDU' (text table). \"\"\" ascii_fmt", "= 0 naxis = self.header.get('NAXIS', 0) # for random group image, NAXIS1 should", "_append(Card(keyword, val)) def copy(self): \"\"\"Make a copy of the table HDU, both header", "copy the data from input, undefined cells will still be filled with zeros/blanks.", "This will not be the first extension in the file so we #", "more data after the stream has been filled will raise an IOError exception.", "not begin with SIMPLE or XTENSION' for i in range(0, len(_blockLen), Card.length): _card", "hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big' output = hdu.data else: output = hdu.data", "append HDU's which are \"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append HDU\",", "%s' % ext2 return hdulist, ext def getheader(filename, *ext, **extkeys): \"\"\"Get the header", "nx) for j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx):", "card will have the fix_value as its value when created. Also check the", "80-char \"physical\" cards. _max = _keyList.count('CONTINUE') _start = 0 for i in range(_max):", "def __repr__(self): \"\"\"Format a list of cards into a string.\"\"\" block = ''", "= \"%.16G\" % value if \".\" not in valueStr and \"E\" not in", "_zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the data attribute.\"\"\" if attr ==", "size = 0 naxis = self.header.get('NAXIS', 0) if naxis > 0: simple =", "name def setupHDU(self): \"\"\"Read one FITS HDU, data portions are not actually read", "i > 0 and (not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element %s is", "isinstance(hdu._ffile, _File): _data._byteorder = 'big' # pass datLoc, for P format _data._heapoffset =", "attribute of mmobject so if the HDUList object is created from files #", "return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more than one 80-char \"physical\" cards, the", "% name self.__dict__[name] = attr return self.__dict__[name] \"\"\" # make sure to consider", "len(blocks)%_blockLen != 0: raise IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) # flush, to", "indx = _list.index(_key) elif _count == 0: raise NameError, \"Key '%s' does not", "n # Now, get the data (does not include bscale/bzero for now XXX)", "len(self) for i in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '':", "# ensure bscale/bzero are numbers if not _scale: bscale = 1 if not", "def _locateEq(self): \"\"\"Locate the equal sign in the card image before column 10", "name == 'spans': # make sure to consider the case that the starting", "in dir(self): if self.data is None: _shape, _format = (), '' _nrows =", "_format = `self.data.type()` _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.')+1:] # if data", "hduList = HDUList(file=ffo) # read all HDU's while 1: try: hduList.append(ffo._readHDU()) except EOFError:", "useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is not a", "_cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break # combine contiguous CONTINUE cards with its parent", "the HDU, both header and data are copied.\"\"\" if self.data is not None:", "= 'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes: header:", "str(dim) self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number", "indexed by number or name.\"\"\" _key = self.index_of(key) if isinstance(hdu, (slice, list)): if", "code---------------------------------- # lists of column/field definition common names and keyword names, make #", "= self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS'] for i in", "L{Header} object @return: header \"\"\" hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu", "card.key) # update the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1", "!= '' or value != '' or comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment)", "of data block in file (None) \"\"\" # mappings between FITS and numarray", "= _fits2rec[fits_fmt] # 'E' -> 'f4' _formats = (_fmt+',') * npars data_fmt =", "% tform if repeat == '': repeat = 1 else: repeat = eval(repeat)", "ext1 else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 ==", "> 1): raise KeyError, 'there are %d extensions of %s' % (nfound, `key`)", "data.' else: hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename, data, header=None, **keys): \"\"\"Create a", "updated data: the new data used for updating The rest of the arguments", "self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale = bscale not in ['', None, 1] _zero", "b = [other] elif isinstance(other, ColDefs): b = list(other.data) else: raise TypeError, 'Wrong", "as expressed in FITS BITPIX value (8, 16, 32, 64, -32, or -64)", "isinstance(self.value, Undefined): valStr = '' # conserve space for HIERARCH cards if isinstance(self,", "of single quotes, # instead of issuing an error. The FITS standard #", "else: data = None self.__dict__[attr] = data elif attr == 'columns': _cols =", "= num.equal(dummy, ord('T')) else: return dummy return self._convert[indx] def _scale_back(self): \"\"\"Update the parent", "= map(lambda y: 'a'+`y`, dummy) elif name == 'spans': # make sure to", "= header self.data = data self._xtn = ' ' def __setattr__(self, attr, value):", "0 elif self.__file.mode == 'update': if not self._resize: # determine if any of", "proper value. \"\"\" hdr = self[0].header if hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend']", "return self._convert[indx] def _scale_back(self): \"\"\"Update the parent array, using the (latest) scaled array.\"\"\"", "in range(nc-1, -1, -1): # locate last non-commentary card if self[i].key not in", "scaling if _zero != 0: self.data += -_zero # 0.9.6.3 to avoid out", "return ColDefs(x) def __len__(self): return len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self,", "infinite loop), # fix it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def", "not None: _imagStr = imag.group('sign') + _imagStr _valStr = '(' + _realStr +", "= int(mo.group(1)) pos = mo.end(0) else: raise ValueError(\"NAXIS not found where expected\") if", "if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update header in place:", "the ColDefs input is not a Column.\" % input.index(col) self.data = [col.copy() for", "result = self.field(indx[0]) # if more than one group parameter have the same", "name %s is too long (> 8), use HIERARCH.' % val else: raise", "from a CardList. cards: A list of Cards, default=[]. \"\"\" # decide which", "file. @type filename: string @param filename: input FITS file name @type key: string", "self._convert[indx] = dummy for i in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i] =", "from TFORM data type to numarray data type (code) _booltype = 'i1' _fits2rec", "type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data object.", "self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch # for card image longer than 80,", "0 mo = re_naxis.search(block) if mo is not None: naxis = int(mo.group(1)) pos", "self.__file != None: if self.__file.memmap == 1: self.mmobject = self.__file._mm if self.__file.mode in", "to provide a user easier output interface if only one HDU needs to", "else: raise KeyError, 'Attribute %s not defined.' % name self.__dict__[name] = attr return", "one HDU needs to be written to a file. name: output FITS file", "block == '': raise EOFError hdu = _TempHDU() hdu._raw = '' # continue", "mode != 'append' and not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else: self.name =", "self._cardimage[:_limit].index(\"=\") except: eqLoc = None return eqLoc def _getKeyString(self): \"\"\"Locate the equal sign", "== Card.length: return input elif _len > Card.length: strlen = _len % Card.length", "to be used data: data to be used name: name to be populated", "key[8:].strip() _index = self.ascard._keylist.index(key) return 1 except: return 0 def rename_key(self, oldkey, newkey,", "and if the header represents a Primary header, it will be written to", "format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy,", "attributes for attr in ['formats', 'names']: setattr(_data, attr, getattr(tmp, attr)) for i in", "data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc == self._size: # # the stream is", "None): # for X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy =", "element in the HDUList must be an HDU.\" for item in hdu: if", "minimal Header will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY' #", "SyntaxError, 'keyword name cannot be reset.' elif name == 'value': self._setvalue(val) elif name", "pairs.append((card.key, card.value)) return pairs def has_key(self, key): \"\"\"Check for existence of a keyword.", "fill the stream per the header provided in the constructor. \"\"\" size =", "dimension corresponding to TDIM keyword \"\"\" # any of the input argument (except", "\"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class", "\"%s is not a Card\" % str(value) def __delitem__(self, key): \"\"\"Delete a Card", "or None @param header: the header associated with 'data', if None, an appropriate", "real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ') if real.group('sign') is not None:", "None: _val = valu.group('bool')=='T' elif valu.group('strg') != None: _val = re.sub(\"''\", \"'\", valu.group('strg'))", "else: # input is a list of Columns tmp = hdu.columns = ColDefs(input,", "\"Element %d in the HDUList input is not an HDU.\" % hdus.index(hdu) list.__init__(self,", "tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err)", "else: _gethdr = False hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu =", "Cards, default=[]. \"\"\" # decide which kind of header it belongs to try:", "8 if simple and not groups: name = 'PRIMARY' else: name = ''", "hdu.header.ascard)) _bytes = (_nch80+1) * Card.length _bytes = _bytes + _padLength(_bytes) if _bytes", "not supplied (as in reading in the FITS file), # it will be", "for i in range(len(self)): if i > 0 and (not isinstance(self[i], _ExtensionHDU)): err_text", "like a binary table's data. \"\"\" if attr == 'data': # same code", "or the index (an integer). backward: search the index from the END, i.e.", "array dimensions'), Card('NAXIS1', 0, 'length of dimension 1'), Card('NAXIS2', 0, 'length of dimension", "input + ' ' * (Card.length-strlen) # minimum length is 80 else: strlen", "__init__(self, data=None, header=None): \"\"\"Construct a primary HDU. data: the data in the HDU,", "the previous column\" % indx+1 _trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if", "column 9. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc = 8", "delete the keywords BSCALE and BZERO del self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update", "the HDU: name, dimensions, and formats.\"\"\" class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:] #", "float): if self._valueModified: valStr = '%20s' % _floatFormat(self.value) else: valStr = '%20s' %", "_ExtensionHDU.__init__ since it is not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE'", "the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError,", "input, tbtype='BinTableHDU'): \"\"\"input: a list of Columns, an (table) HDU tbtype: which table", "else: if os.path.splitext(self.name)[1] == '.gz': # Handle gzip files if mode in ['update',", "self._keylist = keylist # find out how many blank cards are *directly* before", "Card('NAXIS1', 0, 'length of dimension 1'), Card('NAXIS2', 0, 'length of dimension 2'), Card('PCOUNT',", "old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`)", "table for i in range(len(tmp)): if tmp._arrays[i] is None: size = 0 else:", "an UInt8 array. input: input Boolean array of shape (s, nx) output: output", "'SIMPLE': if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True: self._hdutype = GroupsHDU elif", "after='gcount') # Wipe out the old table definition keywords. Mark them first, #", "Card before which the new card will be placed. The argument `before' takes", "10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc = None return eqLoc def _getKeyString(self):", "the card image and return the string after the equal sign. If there", "last HDU or the file is corrupted.' % (len(hduList)+1) break # initialize/reset attributes", "= [''] * len(self) for i in range(len(self)): val = getattr(self[i], cname) if", "name, type, length of header, data shape and type for each extension. @type", "able to pass it to the header object hduList._resize = 0 return hduList", "-~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' +", "this to make mask array indexing work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return", "r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|'", "coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes > 1: # do the _parent too,", "start.' % input _step = input.step if _step is None: _step = 1", "to be used in \"update/append\" mode # CardList needs its own _mod attribute", "common names and keyword names, make # sure to preserve the one-to-one correspondence", "= _end self._Formats = self.formats self._arrays[i] = input[i].array \"\"\" def __getitem__(self, key): x", "class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr ==", "is created for the supplied data. This argument is optional. @keyword clobber: (optional)", "coldata2 = hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): # only swap unswapped # deal", "(int, long))\" # Functions def _padLength(stringLen): \"\"\"Bytes needed to pad the input stringLen", "array (FITS_rec) which will contain both group parameter info and the data. The", "= self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan hdu._ffile =", "sequence with %d arrays/numbers.\" % len(indx) def _getitem(self, offset): row = (offset -", "Will deal with CONTINUE cards in a later stage as CONTINUE cards may", "the HDUList, indexed by number or name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize", "= hdu.header hdulist.close() if _gethdr: return _data, _hdr else: return _data def getval(filename,", "value @rtype: string, integer, or float \"\"\" _hdr = getheader(filename, *ext, **extkeys) return", "parameter values.\"\"\" if isinstance(parName, (int, long)): result = self.field(parName) else: indx = self._unique[parName.lower()]", "class for all HDUs which are not corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size", "image HDU base class.\"\"\" \"\"\"Attributes: header: image header data: image data _file: file", "if exists, default = False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self])", "column definition.\"\"\" return self+column def del_col(self, col_name): \"\"\"Delete (the definition of) one Column.\"\"\"", "hdu.data.byteswapped() else: output = hdu.data # Binary table byteswap elif isinstance(hdu, BinTableHDU): for", "If the keyword already exists, it's value/comment will be updated. If it does", "isinstance(dtype, _FormatX): print 'X format' elif dtype+option in _rec2fits.keys(): # record format _repeat", "* self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0)", "a \"view\" (for now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if", "hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete the original file\",", "option == 'warn': pass # fix the value elif option == 'unfixable': _text", "= self.NumCode[self._bitpix] _type = getattr(num, type) # Determine how to scale the data", "hdr, 3) # update the 3rd extension >>> update(file, dat, 'sci', 2) #", "1 def copy(self): \"\"\"Make a copy of the Header.\"\"\" tmp = Header(self.ascard.copy()) #", "of the record if nrows == 0: for arr in tmp._arrays: if arr", "for k in self.keys()] else: self._keylist = keylist # find out how many", "'': raise EOFError hdu = _TempHDU() hdu._raw = '' # continue reading header", "list class. This is the top-level FITS object. When a FITS file is", "of each field for ASCII table if self._coldefs._tbtype == 'TableHDU': _loc = [1]", "keyword arguments. For example: >>> update(file, dat, hdr, 'sci') # update the 'sci'", "if fmt: code, width, prec = fmt.group('code', 'width', 'prec') else: raise ValueError, valu", "will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY' # insert the", "field('Abc') will cause an exception since there is no unique mapping. If there", "self.__file.memmap == 1: self.mmobject = self.__file._mm if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose)", "= map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`,", "to be FITS standard.: %s' % self.key # verify the key, it is", "_name = self.__file.name results = \"Filename: %s\\nNo. Name Type\"\\ \" Cards Dimensions Format\\n\"", "random group, # since binary table does not support ND yet if isinstance(hdu,", "print 'Warning: File size is smaller than specified data size. File may have", "self[j]._extver if _ver == _extver: found = j nfound += 1 if (nfound", "_booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array = _out #", "= len(self.data) _ncols = len(self.columns.formats) _format = self.columns.formats # if data is not", "file name which should not already exist. Use the directory of the input", "if keyStr.strip() in Card._commentaryKeys: # do NOT use self.key commentStr = '' elif", "elif attr == '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr]", "area return loc, _size+_padLength(_size) def close(self): \"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close() class", "default it to 1 _extver = self[j]._extver if _ver == _extver: found =", "> 0: for i in range(min(self._blanks, how_many)): del self[-1] # it also delete", "of issuing an error. The FITS standard # appears vague on this issue", "= data # update the header self.update_header() self._bitpix = self.header['BITPIX'] # delete the", "to check key and comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not", "_name.strip().upper() if _name == _key: # if only specify extname, can only have", "attr == 'data': # same code as in _TableBaseHDU size = self.size() if", "Each attribute in ColDefs is a list of corresponding attribute values from all", "def del_col(self, col_name): \"\"\"Delete (the definition of) one Column.\"\"\" indx = _get_index(self.names, col_name)", "BZERO values when the data was read/created. If \"minmax\", use the minimum and", "Module variables _blockLen = 2880 # the FITS block size _python_mode = {'readonly':'rb',", "the output file, as the data will be scaled and is therefore not", "is None: _tmp = self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment']", "_zero) else: del self.header['BZERO'] if _scale != 1: self.data /= _scale self.header.update('BSCALE', _scale)", "'number of array dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if", "into a list of cards. Will deal with CONTINUE cards in a later", "\"\"\"Get the group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set the", "'==']: insert_pos = eval(_parse[1]) # if the card does not exist if _index", "with a Primary Header. If not we will need # to prepend a", "cards into a printable string.\"\"\" output = '' for card in self: output", "def _unwrapx(input, output, nx): \"\"\"Unwrap the X format column into a Boolean array.", "card, the Header may also contain the binary data(*). (*) In future it", "'a7'. if fmt.lstrip()[0] == 'A' and option != '': output_format = _fits2rec[dtype]+`int(option)` #", "result class _Verify: \"\"\"Shared methods for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix", "verification (for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more than one 80-char", "cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype = TableHDU elif xtension == 'IMAGE': self._hdutype", "pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale or _zero:", "'HIERARCH': key = key[8:].strip() _index = self.ascard._keylist.index(key) return 1 except: return 0 def", "attributes (e.g. ColDefs has the attribute .names while Column has .name), Each attribute", "num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for both ASCII and", "self._blanks > 0: for i in range(min(self._blanks, how_many)): del self[-1] # it also", "key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1 def __delslice__(self, i, j): \"\"\"Delete", "if verbose: print \"One or more data area is resized.\" break # if", "for hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if", "hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n < nrows: if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i),", "nothing. for item in self: if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) # print", "the following conditions are met: 1. Redistributions of source code must retain the", "not a string' % val self.__dict__['comment'] = val def __setattr__(self, name, val): if", "i in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del", "raise ValueError, \"Illegal format %s\" % fmt else: if dtype == 'a': output_format", "'TBCOL', 'TDIM'] # mapping from TFORM data type to numarray data type (code)", "(table) HDU tbtype: which table HDU, 'BinTableHDU' (default) or 'TableHDU' (text table). \"\"\"", "Boolean array into an UInt8 array. input: input Boolean array of shape (s,", "if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] = dummy del dummy # ASCII", "n_ext1 > 2: raise ValueError, \"too many positional arguments\" elif n_ext1 == 1:", "keyword argument(s): %s' % ext2 else: if 'extname' in keys: if 'extver' in", "not None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct a Card object from a (raw)", "range.' % indx elif isinstance(indx, slice): indx = _normalize_slice(indx, naxis) if (indx.start ==", "type and width. try: (dtype, width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width", "'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size = self.size() if self._size", "when updating the list(s). # Use lists, instead of dictionaries so the names", "= 1 _zero = 0 else: # flat the shape temporarily to save", "range(1, _max+1): if _where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break", "hdu.data except IndexError: raise IndexError, 'No data in this HDU.' if _data is", "_len % Card.length return input + ' ' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format", "index of a keyword in the CardList. key: the keyword name (a string)", "\"header must be a Header object\" if data is DELAYED: # this should", "a copy if scaled, so as not to corrupt the original array if", "from a RecArray.\"\"\" # input should be a record array self.__setstate__(input.__getstate__()) # _parent", "of mmobject so if the HDUList object is created from files # other", "class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct a primary", "\"The keyword %s with its value is too long.\" % self.key if len(output)", "with zeros/blanks. tbtype: table type to be created (BinTableHDU or TableHDU) \"\"\" #", "self._nfields -= 1 def change_attrib(self, col_name, attrib, new_value): \"\"\"Change an attribute (in the", "out a message only if there is something if _dummy.strip(): if self.unit: result", "option, default = 'exception'. clobber: Overwrite the output file if exists, default =", "isinstance(key, str): while 1: try: del self.ascard[key] self._mod = 1 except: return #", "= '' # set extension name if (name is None) and self.header.has_key('EXTNAME'): name", "1: if i._byteorder != 'big': i.byteswap() i._byteorder = 'big' else: if coldata._type.bytes >", "not isinstance(value, str): raise TypeError, 'bad value type' value = value.upper() if self.header.has_key('EXTNAME'):", "pairs def has_key(self, key): \"\"\"Check for existence of a keyword. Returns 1 if", "hdu.data is not None: # check TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2']", "keylist self.count_blanks() self._mod = 1 def count_blanks(self): \"\"\"Find out how many blank cards", "self._coldefs.bzeros[indx] _scale = bscale not in ['', None, 1] _zero = bzero not", "is None: pass else: raise TypeError, \"table data has incorrect type\" # set", "_zero): # only do the scaling the first time and store it in", "0): print \"There is nothing to write.\" return self.update_tbhdu() if output_verify == 'warn':", "hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber = keys.get('clobber',", "fix_text=fix_text, fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the file", "> len(key): key = key + (slice(None),) * (naxis-len(key)) offset = 0 for", "# table definition keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse", "variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max = 0 def __setitem__(self, key, value): \"\"\"To", "self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value, comment)) self._mod = 1 def add_history(self, value,", "loc = self.__file.tell() self.__file.write(blocks) # flush, to make sure the content is written", "filename: input FITS file name @type: string @param ext: The rest of the", "in a FITS file. @type filename: string @param filename: input FITS file name", "groups = 1 else: groups = 0 size = 1 for j in", "byteorder if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder = 'big' # In case the", "i, j): \"\"\"Delete a slice of HDUs from the HDUList, indexed by number", "0: raise NameError, \"Key '%s' does not exist.\" % key else: # multiple", "fix_text return _text def verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option = option.lower()", "input is not a Column.\" % input.index(col) self.data = [col.copy() for col in", "columns). If the card image is longer than 80, assume it contains CONTINUE", "len(ext) > 0: if isinstance(ext[0], Header): header = ext[0] ext = ext[1:] elif", "does not have Boolean type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec):", "# digits, exponent sign, and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE]", "= data.byteswapped() else: output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc == self._size:", "def __init__(self, cards=[], keylist=None): \"\"\"Construct the CardList object from a list of Cards.", "= self.ascard._keylist.index(key) return 1 except: return 0 def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename", "if verbose: print \"One or more header is resized.\" break # Data: if", "_format = GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero =", "string texts.\"\"\" output = [] for _card in self.ascardlist(): if _card.key == 'COMMENT':", "int checking since bool is also int elif isinstance(self.value , bool): valStr =", "error messages generated by verifications at different class levels. \"\"\" def __init__(self, val,", "repeat, data type, and option.\"\"\" try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except: print", "32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def", "be one or more of the attributes listed in _commonNames. The default is", "is not supported\" zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file", "KeyError, key _key = (_key.strip()).upper() nfound = 0 for j in range(len(self)): _name", "extension HDU class. This class is the base class for the TableHDU, ImageHDU,", "the value to be returned. \"\"\" try: return self[key] except: return default def", "_shape, _format, _gcount) def scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data by", "not recognized.' % option if (_option == \"ignore\"): return x = str(self._verify(_option)).rstrip() if", "= `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes = ((repeat-1) /", "return out # if not a slice, do this because Record has no", "it is the index in the list. If string, (a) Field (column) names", "in the regular range.\"\"\" def _normalize(indx, npts): if indx < -npts: indx =", "elif name == 'comment': self.__dict__['comment'] = '' if valu is not None: _comm", "both string and non-string types # Boolean is also OK in this constructor", "well, but will accept # strings with an odd number of single quotes,", "the END, i.e. backward? default=0. If backward = 1, search from the end.", "value, and comment. Core code for ascardimage. \"\"\" # keyword string if self.__dict__.has_key('key')", "else: offset *= _naxis if dims == []: dims = [1] npt =", "'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the reverse dictionary of the above _rec2fits", "default=[]. \"\"\" list.__init__(self, cards) self._cards = cards # if the key list is", "naxis elif isinstance(_stop, (int, long)): _stop = _normalize(_stop, naxis) else: raise IndexError, 'Illegal", "f = open(filename, mode='update') f.append(hdu) f.close() def update(filename, data, *ext, **extkeys): \"\"\"Update the", "but this task may be difficult when the extension is a TableHDU containing", "type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy return self._convert[indx] (_str, _bool, _number, _scale,", "Card('NAXIS2', 0, 'length of dimension 2'), Card('PCOUNT', 0, 'number of group parameters'), Card('GCOUNT',", "= '' return \"%-10s %-11s %5d %-12s %s%s\" % \\ (self.name, type, len(self.header.ascard),", "the index from the END, i.e. backward? default=0. If backward = 1, search", "indx = npts return indx _start = input.start if _start is None: _start", "in the file so we # must change the Primary header provided into", "= _bytes + _padLength(_bytes) if _bytes != hdu._datSpan: self._resize = 1 if verbose:", "new table fill: if = 1, will fill all cells with zeros or", "this issue and only states that a # string should not end with", "\"\"\"Force a write of the HDUList back to the file (for append and", "self._keylist = [k.upper() for k in self.keys()] else: self._keylist = keylist # find", "min((i+1)*8, nx) for j in range(_min, _max): if j != _min: num.lshift(output[...,i], 1,", "del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if", "_bool = 0 # there is no boolean in ASCII table _number =", "is no boolean in ASCII table _number = not(_bool or _str) bscale =", "object or None @param header: the header associated with 'data', if None, an", "or without modification, are permitted provided that the following conditions are met: 1.", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR", "field object.\"\"\" def __init__(self, input): \"\"\" input: a sequence of variable-sized elements. \"\"\"", "corresponding to TBCOL keyword dim: column dimension corresponding to TDIM keyword \"\"\" #", "= self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr valStr = '%-20s' % valStr #", "TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular expression _tdef_re", "binary data(*). (*) In future it may be possible to decipher where the", "None and isinstance(_ext, _Zero): try: hdu = hdulist[1] _data = hdu.data except IndexError:", "if self.comment in [None, '']: commentStr = '' else: commentStr = ' /", "# record format _repeat = '' if repeat != 1: _repeat = `repeat`", "so as not to corrupt the original array if bzero not in ['',", ": NumArray Data to stream to the file. :Returns: writeComplete : integer Flag", "dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU", "dummy return self._convert[indx] (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) #", "dtype = ascii2rec[dtype] if width == '': width = None else: width =", "_scale or _zero: for i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical)", "not isinstance(key, tuple): key = (key,) naxis = self.hdu.header['NAXIS'] if naxis < len(key):", "cname in _commonNames: val = getattr(_cols, cname+'s')[i] if val != '': keyword =", "the equal sign in the card image and return the string before the", "'' return size, name def setupHDU(self): \"\"\"Read one FITS HDU, data portions are", "== 'comment': _comm = _card.comment if isinstance(_comm, str) and _comm != '': longstring", "= header._hdutype(data=DELAYED, header=header) # pass these attributes hdu._file = self._file hdu._hdrLoc = self._hdrLoc", "the parameters parbzeros: list of bzeros for the parameters \"\"\" if isinstance(input, num.NumArray):", "_dict[_format[0]] + ' '*_trail # not using numarray.strings's num2char because the # result", "'disp', 'start', 'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL',", "and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT, for variable", "for hdu in self: # Header: # Add 1 to .ascard to include", "the end. key: keyword name value: keyword value (to be used for updating)", "'>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0, option,", "ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 2:", "if value checking is specified if test: val = self.header[keywd] if not eval(test):", "1 def add_history(self, value, before=None, after=None): \"\"\"Add a HISTORY card. value: History text", "has to be done after the \"regular\" data is written (above) _where =", "a list of cards into a string.\"\"\" block = '' for card in", "eval(test_pos): err_text = \"'%s' card at the wrong place (card %d).\" % (keywd,", "- 1 return _indx except: raise KeyError, 'Keyword %s not found.' % `key`", "if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: # # This will not be the", "append to @type data: array, table, or group data object @param data: the", "represents a Primary header, it will be written to the beginning of the", "= _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif self.data is None: axes = []", "fix_text = \"Fixed by moving it to the right place (card %d).\" %", "+ _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')') # FSC commentary", "platform dependence of the format (e.g. E-009 vs. E-09) elif isinstance(self.value, float): if", "states that a # string should not end with two single quotes, #", "# to accomodate both the ASCII table and binary table column # format", "data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields self.data = data self.columns = data._coldefs", "skip if there is no match if (keyword in _keyNames): _list.append(i) for i", "the comment separator resulting in an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|'", "FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\"", "class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\" header: header to be used data:", "this constructor _card = \"Card('%s', %s)\" % (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\"", "delayed for col in range(_nfields): dict[col]['array'] = Delayed(input, col) # now build the", "attr[i] = _end - last_end last_end = _end self._width = _end else: raise", "Deal with CONTINUE cards # if a long string has CONTINUE cards, the", "the header, the stream is padded to fill a complete FITS block and", "method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err)", "separator resulting in an incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>'", "cname, value.value) else: setattr(self, cname, value) # if the column data is not", "the stream is full so pad the data to the next FITS block", "%d' % (_blockLen, len(block)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError,", "to expand (as C/Python does). for i in range(len(dummy)): x = _fmt %", "value, and (optionally) comment. Any specifed arguments, except defaults, must be compliant to", "\"'%s' card at the wrong place (card %d).\" % (keywd, _index) fix_text =", "(_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # for P format", "string before the equal sign. If there is no equal sign, return the", "The comment field will # return a match if the comment separator is", "_keylist.reverse() try: _indx = _keylist.index(_key) if backward: _indx = len(_keylist) - _indx -", "in ['', None, 0] # ensure bscale/bzero are numbers if not _scale: bscale", "_TempHDU, _ValidHDU @group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\"", "== 0): raise KeyError, 'extension %s not found' % `key` elif (nfound >", "raise TypeError, \"Supplied data is not the correct type.\" if data._byteorder != 'big':", "with .verify('fix').\" if valu.group('bool') != None: _val = valu.group('bool')=='T' elif valu.group('strg') != None:", "= None return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDU", "\"\"\" fmt = input_format (repeat, dtype, option) = _parse_tformat(fmt) if reverse == 0:", "destination data type, use numarray attribute format, (e.g. 'UInt8', 'Int16', 'Float32' etc.). If", "not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out # if not", "= _ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class", "\" valstr = valfmt % val_list[i] output = output + '%-80s' % (headstr", "is resized.\" break # if the HDUList is resized, need to write it", "n in dims: npt *= n # Now, get the data (does not", "elif indx < 0: indx += npts elif indx > npts: indx =", "ValueError, 'column definitions have a different table type' elif isinstance(input, FITS_rec): # input", "raise SyntaxError, \"%s is not a Card\" % str(card) def _use_blanks(self, how_many): if", "self._mod = 1 else: raise SyntaxError, \"%s is not a Card\" % str(value)", "string = TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:]", "bscale=None, bzero=None, disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct a Column by specifying attributes.", "there is no equal sign, return the string before column 9. \"\"\" eqLoc", "if mo is not None: pcount = int(mo.group(1)) else: pcount = 0 mo", "to ColDefs must be a table HDU or a list of Columns\" def", "spec, i.e. A7 in ASCII table is the same as 7A in #", "it to a strings array array = chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable", "and val <= 999\", 1, option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\" and val", "single quotes, # instead of issuing an error. The FITS standard # appears", "+ _numr_FSC + ') *, *(?P<imag>' + _numr_FSC + ') *\\))' r')? *)'", "for c in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del", "== 'minmax': if isinstance(_type, num.FloatingType): _scale = 1 _zero = 0 else: #", "= ((repeat-1) / 8) + 1 # use an array, even if it", "True: self._hdutype = PrimaryHDU else: self._hdutype = _ValidHDU elif cards[0].key == 'XTENSION': xtension", "a list of Columns elif isinstance(input, (list, tuple)): for col in input: if", "== tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a table parent data, just", "key else: raise NameError, \"Illegal key '%s'.\" % `key` return indx def _unwrapx(input,", "% (nfound, `key`) else: return found def readall(self): \"\"\"Read data of all HDU's", "keywod EXTNAME, default=None. \"\"\" # no need to run _ExtensionHDU.__init__ since it is", "shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns # populate", "class CardList(list): \"\"\"FITS header card list class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct the", "if self.comment is None: comm = '' else: comm = self.comment commfmt =", "== 'append': for hdu in self: if (verbose): try: _extver = `hdu.header['extver']` except:", "of the column definitions.\"%att continue print \"%s:\" % att print ' ', getattr(self,", "else: if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: # # This will not be", "# always fix silently the case where \"=\" is before column 9, #", "provided. name: The name of the HDU, will be the value of the", "for i in range(_tfields): del self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS header card", "0 and val <= 999\", 0, option, _err) tfields = self.header['TFIELDS'] for i", "header=None): \"\"\"Append the header/data to FITS file if filename exists, create if not.", "using the \"test\" argument. \"\"\" _err = errlist fix = '' cards =", "before: [same as in update()] after: [same as in update()] \"\"\" self._add_commentary('comment', value,", "+ 1 unused = nbytes*8 - nx for i in range(nbytes): _min =", "def __init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data,", "@param header: the header associated with 'data', if None, an appropriate header will", "None # if pos is a string, it must be of the syntax", "1 return name, extver def _getsize(self, block): \"\"\"Get the size from the first", "array, even if it is only ONE u1 (i.e. use tuple always) output_format", "def copy(self): \"\"\"Make a copy of the Header.\"\"\" tmp = Header(self.ascard.copy()) # also", "input Boolean array of shape (s, nx) output: output Uint8 array of shape", "FITS standard') _list = CardList([ c0, Card('BITPIX', 8, 'array data type'), Card('NAXIS', 0,", "the arguments header = None if len(ext) > 0: if isinstance(ext[0], Header): header", "self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode]) # For", "reproduce the above copyright notice, this list of conditions and the following disclaimer", "BSCALE and BZERO del self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update the header keywords", "parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars):", "value can only be strings and there # is no comment if self.key", "= Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is not None: _imagStr", "\"An element in the HDUList must be an HDU.\" for item in hdu:", "if isinstance(self, GroupsHDU): _gcount = ' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT'])", "flat the shape temporarily to save memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min =", "indx def _unwrapx(input, output, nx): \"\"\"Unwrap the X format column into a Boolean", "output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format %s\" % fmt return output_format", "has a nested list structure constructed by error messages generated by verifications at", "The file will be opened and the header appended to the end of", "standard') _list = CardList([ c0, Card('BITPIX', 8, 'array data type'), Card('NAXIS', 0, 'number", "the attribute .names while Column has .name), Each attribute in ColDefs is a", "point and width of each field for ASCII table if self._coldefs._tbtype == 'TableHDU':", "extension base HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\" header: header to", "unused, output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct the P format column array, both", "class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"data: data", "index_of(self, key): \"\"\"Get the index of an HDU from the HDUList. The key", "to populate the non-required keywords nrows: number of rows in the new table", "array from a RecArray.\"\"\" # input should be a record array self.__setstate__(input.__getstate__()) #", "is padded to fill a complete FITS block and no more data will", "data object depending on the type of the extension being referenced If the", "_digt = numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') == None: _val = eval(_digt) else:", "copy import signal import threading # Module variables _blockLen = 2880 # the", "def update(self, key, value, comment=None, before=None, after=None): \"\"\"Update one header card.\"\"\" \"\"\" If", "if more than one group parameter have the same name, the # value", "mode, *ext1, **ext2): \"\"\"Open the input file, return the HDUList and the extension.\"\"\"", "to be multiple of 80.\"\"\" _len = len(input) if _len == Card.length: return", "32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype", "output[...] = 0 # reset the output nbytes = ((nx-1) / 8) +", "-1: raise VerifyError, '\\n'+x if (_option != \"silentfix\") and x: print 'Output verification", "before column 10 and return its location. It returns None if equal sign", "one as 0th HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text,", "\"\"\"Shared methods for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1):", "name parsed from the card image.\"\"\" head = self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key']", "Default = None, i.e. an empty HDUList. file: The opened physical file associated", "_card.key cardList.append(_card) keyList.append(_key) if _key == 'END': break def _readHDU(self): \"\"\"Read the skeleton", "default = 'exception'. clobber: Overwrite the output file if exists, default = False.", "self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute. The", "file associated with this HDUList)' else: _name = self.__file.name results = \"Filename: %s\\nNo.", "_naxis + indx.offset # all elements after the first WholeLine must be WholeLine", "data, so it's defined (in the case of reading from a # FITS", "open in # Linux, but is at the beginning in Solaris. self.__file.seek(0, 2)", "if os.path.splitext(self.name)[1] == '.gz': # Handle gzip files if mode in ['update', 'append']:", "% Card.length return input + ' ' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format the", "index default: if no keyword is found, the value to be returned. \"\"\"", "None: _tmp = self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] =", "import gzip import zipfile import numarray as num import numarray.generic as ndarray import", "= _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data reading will be delayed for col", "markup used for all docstrings in this module. @group Header-related Classes: Card, CardList,", "cards _commentaryKeys = ['', 'COMMENT', 'HISTORY'] def __init__(self, key='', value='', comment=''): \"\"\"Construct a", "a Primary header, a default Primary HDU will be inserted at the beginning", "be keyword arguments. For example: >>> update(file, dat, hdr, 'sci') # update the", "keylist=None): \"\"\"Construct the CardList object from a list of Cards. cards: A list", "etc. will get this field. \"\"\" if isinstance(key, (int, long)): indx = int(key)", "output. \"\"\" dirName = os.path.dirname(input) if dirName != '': dirName += '/' _name", "nc = len(self) - self._blanks i = nc - 1 if not bottom:", "== '_cardimage': self.ascardimage() elif name == 'key': self._extractKey() elif name in ['value', 'comment']:", "card, skip to the next card to search # to avoid starting at", ".verify('fix').\" if valu.group('bool') != None: _val = valu.group('bool')=='T' elif valu.group('strg') != None: _val", "name = 'PRIMARY' else: name = '' return size, name def setupHDU(self): \"\"\"Read", "= [other] _other = [_get_index(self.names, key) for key in other] indx=range(len(self)) for x", "will contain both group parameter info and the data. The rest of the", "def index_of(self, key, backward=0): \"\"\"Get the index of a keyword in the CardList.", "is DELAYED: # this should never happen if header is None: raise ValueError,", "eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc = None return eqLoc def _getKeyString(self): \"\"\"Locate the", "and col > 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data reading", "set to True, this function will return a (data, header) tuple. \"\"\" if", "record format spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re =", "\"\"\"Update the specified extension with the input data/header. @type filename: string @param filename:", "in update()] \"\"\" self._add_commentary(' ', value, before=before, after=after) def get_history(self): \"\"\"Get all histories", "val else: raise ValueError, 'keyword name %s is not a string' % val", "default=None. header: the header to be used (as a template), default=None. If header=None,", "mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 1 and val <=", "to the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new = 1 self._resize =", "err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the", "raise KeyError, 'there are %d extensions of %s' % (nfound, `key`) else: return", "exact name first, so in the example in (a), field('abc') will get the", "')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' + _numr_FSC +", "def _iswholeline(indx, naxis): if isinstance(indx, (int, long)): if indx >= 0 and indx", "''' def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None,", "as well as a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus", "val = getattr(_cols, cname+'s')[i] if val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname", "with var length table if isinstance(coldata, _VLF): for i in coldata: if not", "val == 0\", 0, option, _err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups", "output, nx): \"\"\"Wrap the X format column Boolean array into an UInt8 array.", "8: if option in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card image is not FITS", "'data': size = self.size() if size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs = self.columns", "after the last HDU or corrupted HDU except ValueError: print 'Warning: Required keywords", "if n_ext2 == 0: ext = ext1 else: raise KeyError, 'Redundant/conflicting keyword argument(s):", "ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 else: if", "*directly* before the END card.\"\"\" for i in range(1, len(self)): if str(self[-i]) !=", "_card.key != 'CONTINUE': raise ValueError, 'Long card image must have CONTINUE cards after", "to setup HDU.\" # if the file is read the first time, no", "_keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname, value.value) else: setattr(self, cname, value) # if", "+ '(%d)' % VLdata._max else: val = _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val))", "= _ValidHDU._verify(self, option=option) # Verify location and value of mandatory keywords. naxis =", "one or more of the attributes listed in _commonNames. The default is \"all\"", "ValueError, \"No header to setup HDU.\" # if the file is read the", "in the HDU, default=None. header: the header to be used (as a template),", "the card EXTEND exists, must be after it. try: _dum = self.header['EXTEND'] #_after", "'%s'.\" % fix_value if fixable: fix = \"self.header['%s'] = %s\" % (keywd, `fix_value`)", "output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype == 'F': output_format = 'f8'", "is the index of the field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str =", "a Card\" % str(value) def __delitem__(self, key): \"\"\"Delete a Card from the CardList.\"\"\"", "['readonly', 'copyonwrite', 'update']: raise \"Memory mapping is not implemented for mode `%s`.\" %", "equivalent Ambiguous or conflicting specifications will raise an exception, e.g., >>> getdata('in.fits', ext=('sci',1),", "# quotes to be precise. # # Note that a non-greedy match is", "pieces. But if there is one single word which is longer than strlen,", "= dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name else: raise _name, \"exists\"", "is closed and can no longer be written\" curDataSize = self._ffo.getfile().tell() - self._datLoc", "not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] =", "out nested structure with corresponding indentations. A tricky use of __str__, since normally", "hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n < nrows: if tbtype", "same name, the # value must be a list (or tuple) containing arrays", "will be appended at the end, even if there are blank cards in", "_bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims =", "= _name.strip().upper() if _name == _key: # if only specify extname, can only", "name @type key: string @param key: keyword name @param ext: The rest of", "# input should be a record array self.__setstate__(input.__getstate__()) # _parent is the original", "for cname in _commonNames: val = getattr(_cols, cname+'s')[i] if val != '': keyword", "bzero) = self._get_scale_factors(indx) # for P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent))", "_bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder", "'comment %s is not a string' % val self.__dict__['comment'] = val def __setattr__(self,", "`self.value`[0] elif isinstance(self.value , (int, long)): valStr = '%20d' % self.value # XXX", "\"\"\" def __init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input", "the data size cannot be calculated or the 'END' card is not found.", "'big' # pass datLoc, for P format _data._heapoffset = hdu._theap + hdu._datLoc _data._file", "least 8 columns, unless it is # a null string elif isinstance(self.value, str):", "pardata: parameter data, as a list of (numeric) arrays. parnames: list of parameter", "1 def _verify (self, option='warn'): _text = '' _err = _ErrList([], unit='HDU') #", "**extkeys) hdu = hdulist[_ext] hdr = hdu.header hdulist.close() return hdr def getdata(filename, *ext,", "\"+isValid, 8, option, _err) self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 0 and", "'Index %s out of range.' % indx elif isinstance(indx, slice): indx = _normalize_slice(indx,", "pass _pos = '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err) self.req_cards('PCOUNT', _pos,", "the file. If the file does not exist and the provided header is", "__str__(self, tab=0): \"\"\"Print out nested structure with corresponding indentations. A tricky use of", "strfmt + 's'+str(size) + ',' strlen = strlen + size else: strfmt =", "_zero = (max + min) / 2. # throw away -2^N _scale =", "change the Primary header provided into an image # extension header. # self.header.update('XTENSION','IMAGE','Image", "Linux, but is at the beginning in Solaris. self.__file.seek(0, 2) self._size = self.__file.tell()", "% (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking is", "_option == 'exception' and x: raise VerifyError def _pad(input): \"\"\"Pad balnk space to", "tmp = Header(self.ascard.copy()) # also copy the class tmp._hdutype = self._hdutype return tmp", "self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data object. Allows structured access", "isinstance(_step, (int, long)): if _step <= 0: raise IndexError, 'Illegal slice %s, step", "as a list of (numeric) arrays. parnames: list of parameter names. bscale: BSCALE", "non-greedy match is done for a string, # since a greedy match will", "name: Name of the FITS file to be opened. mode: Open mode, 'readonly'", "if the key list is not supplied (as in reading in the FITS", "dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size = abs(bitpix) * gcount *", "with CONTINUE cards # if a long string has CONTINUE cards, the \"Card\"", "name = self.header['EXTNAME'] self.name = name def __getattr__(self, attr): \"\"\"Get the 'data' or", "\"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc = 8 _start =", "non-blank card. \"\"\" if isinstance (card, Card): nc = len(self) - self._blanks i", "groups data object depending on the type of the extension being referenced If", "None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct a Card object from a (raw) string.", "# update the 5th extension \"\"\" # parse the arguments header = None", "zipfile import numarray as num import numarray.generic as ndarray import numarray.strings as chararray", "mm object. try: self.mmobject.close() except: pass def info(self): \"\"\"Summarize the info of the", "else: raise ValueError, \"Illegal format %s\" % fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert", "for i in range(nmax): try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1]", "comment is not None: _comment = comment else: _comment = self.ascard[j].comment self.ascard[j] =", "def _pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert a Card to the location specified", "each 80-char card as a regular card and use its methods. _card =", "= self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx] if _lead < 0: raise ValueError,", "def __delslice__(self, i, j): \"\"\"Delete a slice of HDUs from the HDUList, indexed", "flexible: the 3rd argument can be the header associated with the data. If", "def size(self): \"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\" self._file.seek(0,", "IndexError, 'too many indices.' elif naxis > len(key): key = key + (slice(None),)", "range(_nfields)] # definition dictionaries for each field for _card in hdr.ascardlist(): _key =", "just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n],", "_repeat = '' if repeat != 1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype]", "the table data from input (an HDU object).\"\"\" tmp = hdu.columns # get", "r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *'", "(and other positional arguments) are assumed to be the extension specification(s). Header and", "\"HDUList's element %s is not an extension HDU.\" % `i` _text = self.run_option(option,", "NDarray, make it to be one, i.e. # input arrays can be just", "go through header keywords to pick out column definition keywords dict = [{}", "else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns # populate data", "self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new) card image from the attributes:", "# populate the new table definition keywords for i in range(len(_cols)): for cname", "data_output class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def __init__(self, input): \"\"\" input: a", "data is still a \"view\" (for now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header", "self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8':", "will find a single-quote after # the comment separator resulting in an incorrect", "recfmt = format format = _convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal format `%s`.\"", "if len(input) == offset: break xoffset = offset return list class Header: \"\"\"FITS", "<filename>External/astrometry.net/astrometry/python/pyfits/NA_pyfits.py<gh_stars>1-10 #!/usr/bin/env python # $Id: NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A", "equal sign, return the string before column 9. \"\"\" eqLoc = self._locateEq() if", "clobber=False): \"\"\"Write the HDU to a new file. This is a convenience method", "of __str__, since normally __str__ has only one argument. \"\"\" result = \"\"", "= _convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal format `%s`.\" % format self.format =", "may not look pretty. \"\"\" val_len = 67 comm_len = 64 output =", "class _Zero(int): def __init__(self): self = 0 def _getext(filename, mode, *ext1, **ext2): \"\"\"Open", "del self.ascard[key] self._mod = 1 except: return # for integer key only delete", "hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale", "hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDUList to a", "class HDUList(list, _Verify): \"\"\"HDU list class. This is the top-level FITS object. When", "hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0: # # This will", "infinite loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = '' # string value", "not bottom: for i in range(nc-1, -1, -1): # locate last non-commentary card", "scaled data = 0, not the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:]", "longstring = longstring + _val elif name == 'comment': _comm = _card.comment if", "option, default='exception'. clobber: Overwrite the output file if exists, default = False. \"\"\"", "the shape of the record if nrows == 0: for arr in tmp._arrays:", "group parameter info and the data. The rest of the arguments are used", "user specified bscale/bzero values. bscale/bzero: user specified BSCALE and BZERO values. \"\"\" if", "in this HDUList.\"\"\" if self.__file is None: _name = '(No file associated with", "are not case sensitive By combination of EXTNAME and EXTVER, as separate arguments", "with 'data', if None, an appropriate header will be created for the data", "with CONTINUE and the whole card must have string value. \"\"\" def __str__(self):", "mapping to be used? default=0. \"\"\" # instantiate a FITS file object (ffo)", "'fix': self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key #", "only append an HDU\" # make sure the EXTEND keyword is in primary", "import numarray.objects as objects import numarray.memmap as Memmap from string import maketrans import", "compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A number", "the size (in bytes) of the HDU's data part.\"\"\" self._file.seek(0, 2) return self._file.tell()", "and no more data will be accepted. An attempt to write more data", "need to parse further if self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] =", "self._add_commentary(' ', value, before=before, after=after) def get_history(self): \"\"\"Get all histories as a list", "after=None): \"\"\"Update one header card.\"\"\" \"\"\" If the keyword already exists, it's value/comment", "tmp = hdu.columns = ColDefs(input, tbtype) # read the delayed data for i", "else: raise KeyError, 'Insufficient keyword argument: %s' % ext2 return hdulist, ext def", "self._convert[indx].copy() if _zero: dummy -= bzero if _scale: dummy /= bscale elif self._coldefs._tbtype", "table for floating value string _fix_table = maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE')", "table if self._coldefs._tbtype == 'TableHDU': _loc = [1] _width = [] for i", "self.__dict__[attr] = 0 try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) # 0.6.5.5 def", "in self.names: raise ValueError, 'New name %s already exists.' % new_name else: self.change_attrib(col_name,", "= num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx] = dummy for i in range(len(self._parent)):", "- _tbsize # comment out to avoid circular reference of _pcount # pass", "if newkey == 'CONTINUE': raise ValueError, 'Can not rename to CONTINUE' if newkey", "a (deep)copy of the CardList.\"\"\" cards = [None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i]))", "class constructor may be written to the stream. If the provided data would", "= hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart - _tbsize _pcount =", "% option if (_option == \"ignore\"): return x = str(self._verify(_option)).rstrip() if _option in", "1 def __delitem__(self, key): \"\"\"Delete card(s) with the name 'key'.\"\"\" # delete ALL", "# the first (0th) element must be a primary HDU if len(self) >", "of AURA and its representatives may not be used to endorse or promote", "hdus = [] # can take one HDU, as well as a list", "after updating for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 if", "named \"XYZ\" and no other field name is a case variant of \"XYZ\",", "_loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx in range(self._nfields): if (self._convert[indx] is not", "# -32 -> 'E' _fmt = _fits2rec[fits_fmt] # 'E' -> 'f4' _formats =", "keyword name (a string) or the index (an integer). backward: search the index", "extver = int(mo.group(1)) else: extver = 1 return name, extver def _getsize(self, block):", "_tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is not recognized.' % tform if repeat ==", "the total space will not increase (default). When useblanks == 0, the card", "elif name == 'key': self._extractKey() elif name in ['value', 'comment']: self._extractValueComment(name) else: raise", "0: continue _shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount =", "size of the data area return loc, _size+_padLength(_size) def close(self): \"\"\"Close the 'physical'", "'sci', 2) # update the 2nd SCI extension >>> update(file, dat, 3, header=hdr)", "data object. Allows structured access to FITS Group data in a manner analogous", "option='warn'): _err = _ValidHDU._verify(self, option=option) # Verify location and value of mandatory keywords.", "this method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self,", "_func = lambda x: chararray.array(x, itemsize=1) array = _VLF(map(_func, array)) except: raise ValueError,", "class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"Construct an image HDU. data: the data", "attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except KeyError: raise", "= bscale _zero = bzero else: if option == 'old': _scale = self._bscale", "'Regular and commentary keys can not be renamed to each other.' elif (force", "None: hdu._raw += block block = self.__file.read(_blockLen) if block == '': break else:", "if len(keyStr + eqStr + valStr) > Card.length: raise ValueError, \"The keyword %s", "data: the data in the HDU, default=None. header: the header to be used", "place else: for hdu in self: if (verbose): try: _extver = `hdu.header['extver']` except:", "!= 1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes", "longer than 80, assume it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if", "if naxis == 0: datasize = 0 else: dims = [0]*naxis for i", "and mode not in ['readonly', 'copyonwrite', 'update']: raise \"Memory mapping is not implemented", "!= 'big': output = hdu.data.byteswapped() else: output = hdu.data # Binary table byteswap", "= None if len(ext) > 0: if isinstance(ext[0], Header): header = ext[0] ext", "= \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text,", "= Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def getfile(self): return", "self.field = field # translation table for floating value string _fix_table = maketrans('de',", "isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" % item # second time go through the", "data_output._dtype = dtype if dtype == 'a': _nbytes = 1 else: _nbytes =", "if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields self.data", "in coldata: if not isinstance(i, chararray.CharArray): if i._type.bytes > 1: if i._byteorder !=", "None: eqLoc = 8 _start = 0 if self._cardimage[:8].upper() == 'HIERARCH': _start =", "Astronomy (AURA) Redistribution and use in source and binary forms, with or without", "`%s`.\" % mode else: if os.path.splitext(self.name)[1] == '.gz': # Handle gzip files if", "which allows keyword name longer than 8 characters. \"\"\" def _verify(self, option='warn'): \"\"\"No", "hdu._theap - _tbsize # comment out to avoid circular reference of _pcount #", "_data, _hdr else: return _data def getval(filename, key, *ext, **extkeys): \"\"\"Get a keyword's", "name=None): \"\"\"data: data of the table header: header to be used for the", "list of Columns elif isinstance(input, (list, tuple)): for col in input: if not", "# if the string = TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy =", "no need to copy, and keep it unchanged else: self.header = header else:", "groups = 0 size = 1 for j in range(groups,naxis): size = size", ".ascard to include the END card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes =", "`key` else: raise KeyError, 'Illegal key data type %s' % type(key) def copy(self):", "else: # from a table parent data, just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]", "\"too many positional arguments\" elif n_ext1 == 1: if n_ext2 == 0: ext", "is None: raise ValueError, \"No header to setup HDU.\" # if the file", "and os.path.getsize(name) > 0: # # This will not be the first extension", "_ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY' # insert the keywords EXTEND if header", "what is expected by the header, a TypeError exception is raised. \"\"\" if", "a FITS file (and optionally the header). @type filename: string @param filename: input", "must retain the above copyright notice, this list of conditions and the following", "dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return _name else: raise _name, \"exists\" class", "before != None or after != None: self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0]", "'' # set extension name if (name is None) and self.header.has_key('EXTNAME'): name =", "columns as # the original dummy = self.field(i) if self._convert[i] is not None:", "By name, i.e., EXTNAME value (if unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci')", "self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size = self.size() if self._size != 0: self.writeComplete =", "keyword dim: column dimension corresponding to TDIM keyword \"\"\" # any of the", "for i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword =", "0: if n_ext2 == 0: ext = _Zero() elif 'ext' in keys: if", "keyword bscale: bscale value, corresponding to TSCAL keyword bzero: bzero value, corresponding to", "output = [] for _card in self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value) return", "= self._keylist[:] # make a copy _keylist.reverse() try: _indx = _keylist.index(_key) if backward:", "the input file and the base name of the mktemp() output. \"\"\" dirName", "self._shape = hdr['NAXIS2'] # go through header keywords to pick out column definition", "here, instead of in the respective HDU classes, # so the checking is", "result += _dummy element += 1 return result class _Verify: \"\"\"Shared methods for", "keyword's value from a header in a FITS file. @type filename: string @param", "tuple)): raise KeyError, 'Input argument has wrong data type.' if 'header' in extkeys:", "the constructor. \"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis >", "self._file.tell() - self._datLoc def _summary(self): return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def verify(self):", "too long, comment is truncated.' output = output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self,", "column 8).' raise ValueError, self._err_text, '\\n%s' % self._cardimage elif option in ['fix', 'silentfix']:", "of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis =", "(int, long)): if _step <= 0: raise IndexError, 'Illegal slice %s, step must", "data, just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n],", "first one must start with CONTINUE and the whole card must have string", "= 0 naxis = self.header.get('NAXIS', 0) if naxis > 0: simple = self.header.get('SIMPLE','F')", "is at the end after the open in # Linux, but is at", "def has_key(self, key): \"\"\"Check for existence of a keyword. Returns 1 if found,", "card, useblanks=1, bottom=0): \"\"\"Append a Card to the CardList. card: The Card to", "min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8': # UInt8", "or tuple, not required to be NDArray if format is not None: #", "and [['a','b','c']] # equally, beautiful! _func = lambda x: chararray.array(x, itemsize=1) array =", "Notes ----- The file will be opened and the header appended to the", "dim == '0': dim = '' # set extension name if (name is", "option) = _parse_tformat(fmt) if reverse == 0: if dtype in _fits2rec.keys(): # FITS", "class _KeyType: def __init__(self, npts, offset): self.npts = npts self.offset = offset class", "the data: if \"old\", use the original BSCALE and BZERO values when the", "= _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err) tfields =", "None, use the current data type. option: how to scale the data: if", "of Universities for Research in Astronomy (AURA) Redistribution and use in source and", "def append(self, hdu): \"\"\"Append a new HDU to the HDUList.\"\"\" if isinstance(hdu, _AllHDU):", "def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\"", "i in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i] =", "i in range(len(tmp)): if tmp._arrays[i] is None: size = 0 else: size =", "self.__dict__[attr] = _coldefs elif attr == '_theap': self.__dict__[attr] = 0 try: return self.__dict__[attr]", "not have Boolean type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random", "+ ',' strlen = strlen + size else: strfmt = '>' + strfmt[:-1]", "ffo if (verbose): print \"reopen the newly renamed file\", oldName # reset the", "_zero = bzero not in ['', None, 0] # ensure bscale/bzero are numbers", "value.' if name == 'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip() # drop the", "+ ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE =", "_parse_tformat(fmt) if reverse == 0: if dtype in _fits2rec.keys(): # FITS format if", "table only), corresponding to TBCOL keyword dim: column dimension corresponding to TDIM keyword", "pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars] = input else:", "length columns for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`]", "there is a field named \"XYZ\" and no other field name is a", "None or self.key in Card._commentaryKeys: return result else: if option in ['fix', 'silentfix']:", "using BSCALE/BZERO. Call to this method will scale self.data and update the keywords", "column/field definition common names and keyword names, make # sure to preserve the", "a keyword. Returns 1 if found, otherwise, 0. key: keyword name. If given", "- min) / (2.**8 - 1) else: _zero = (max + min) /", "string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr = 'HIERARCH %s '", "long)): valStr = '%20d' % self.value # XXX need to consider platform dependence", "self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale,", "header to be used to populate the non-required keywords nrows: number of rows", "to be inserted. useblanks: Use any *extra* blank cards? default=1. If useblanks !=", "@type filename: string @param filename: name of the file to be updated data:", "blocks. \"\"\" if len(block) != _blockLen: raise IOError, 'Block length is not %d:", "oldName # reset the resize attributes after updating self._resize = 0 for hdu", "None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero = self.header.get('BZERO', 0)", "the scaling if _zero != 0: self.data += -_zero # 0.9.6.3 to avoid", "AttributeError, name # When an attribute (value or comment) is changed, will reconstructe", "allowed to expand (as C/Python does). for i in range(len(dummy)): x = _fmt", "hdu): \"\"\"Set an HDU to the HDUList, indexed by number or name.\"\"\" _key", "of groups')) if header is not None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header", "in _commonNames. The default is \"all\" which will print out all attributes. It", "% (_nrows, _ncols) return \"%-10s %-11s %5d %-12s %s\" % \\ (self.name, type,", "hdr.update('extend', True, after='naxis') else: n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name,", "not isinstance(hdu, _AllHDU): raise ValueError, \"%s is not an HDU.\" % hdu try:", "if arr is not None: dim = arr._shape[0] else: dim = 0 if", "KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 else: if 'extname' in keys: if", "'old': _scale = self._bscale _zero = self._bzero elif option == 'minmax': if isinstance(_type,", "0: return input else: return input + ' ' * (Card.length-strlen) # minimum", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "balnk space to the input string to be multiple of 80.\"\"\" _len =", "a string, it must be of the syntax of \"> n\", # where", "specification. See L{getdata} for explanations/examples. @rtype: L{Header} object @return: header \"\"\" hdulist, _ext", "1 for i in range(nbytes): _min = i*8 _max = min((i+1)*8, nx) for", "mo = re_pcount.search(block) if mo is not None: pcount = int(mo.group(1)) else: pcount", "any *extra* blank cards? default=1. If useblanks != 0, and if there are", "# Determine the destination (numarray) data type if type is None: type =", "list tmp = input[xoffset:offset] list.append(tmp) if len(input) == offset: break xoffset = offset", "in _list: del self.header.ascard[i] del _list # populate the new table definition keywords", "or the file is corrupted.' % (len(hduList)+1) break # initialize/reset attributes to be", "table HDU or a list of Columns\" def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\"", "isinstance(hdu, _TableBaseHDU) and hdu.data is not None: # check TFIELDS and NAXIS2 hdu.header['TFIELDS']", "if 'extver' in keys: ext = ext2['extname'], ext2['extver'] else: ext = ext2['extname'] else:", "if _key == 'END': break else: _cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE cards", "and extension specs can also be keyword arguments. For example: >>> update(file, dat,", "hdu._ffile = self._ffile hdu.name = self.name hdu._extver = self._extver hdu._new = 0 hdu.header._mod", "= getattr(self[i], cname) if val != None: attr[i] = val elif name ==", "stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied data is not the correct", "used when one or more mandatory Cards are corrupted (unparsable), such as the", "def __init__(self, cards=[]): \"\"\"Construct a Header from a CardList. cards: A list of", "placed before or after the specified location. If no \"before\" or \"after\" is", "'No data in this HDU.' if _gethdr: _hdr = hdu.header hdulist.close() if _gethdr:", "self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for both ASCII and binary tables if _number", "of minimal header _list = CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8, 'array data", "Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key) if _key == 'END':", "+= 1 return result class _Verify: \"\"\"Shared methods for verification.\"\"\" def run_option(self, option=\"warn\",", "argument(s): %s' % ext2 elif n_ext1 == 0: if n_ext2 == 0: ext", "')[0] offset = 0 xoffset = 0 for i in range(nmax): try: loc", "# if data is touched, use data info. if 'data' in dir(self): if", "for ASCII table.' % input_format return (dtype, width) def _get_index(nameList, key): \"\"\" Get", "getdata(filename, *ext, **extkeys): \"\"\"Get the data from an extension of a FITS file", "getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will raise an exception,", "modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True # deprecated FALSE =", "used right before writing to the output file, as the data will be", "zeros from numbers, otherwise # Python might evaluate them as octal values. _number_FSC_RE", "and _card.key != 'CONTINUE': raise ValueError, 'Long card image must have CONTINUE cards", "output.tofile(self.__file) _size = output.nelements() * output._itemsize # write out the heap of variable", "> self._size: print 'Warning: File size is smaller than specified data size. File", "hdr.ascard[0].comment = 'binary table extension' class StreamingHDU: \"\"\" A class that provides the", "def __getitem__(self, key): x = self.data[key] if isinstance(key, (int, long)): return x else:", "inserted at the beginning of the file and the provided header will be", "and return the string after the equal sign. If there is no equal", "in case card-with-continue's value is shortened if not isinstance(self, _Hierarch): self.__class__ = Card", "= _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize +=", "if isinstance(input, num.NumArray): _formats = '' _cols = [] if pardata is None:", "(self.name, type, len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\" return", "'sci' extension >>> update(file, dat, 3) # update the 3rd extension >>> update(file,", "\"Supplied data will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied", "= _bscale, bzero = _bzero)) data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format", "the comment string if self.comment is None: comm = '' else: comm =", "ASCII table _number = not(_bool or _str) bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx]", "array are consistent. if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: # try to", "tables if _number or _str: if _number and (_scale or _zero): dummy =", "+ data.itemsize()*data._size > self._size: raise IOError, \"Supplied data will overflow the stream\" if", "option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option = option.lower() if _option not in ['fix', 'silentfix',", "found = j nfound += 1 else: # if the keyword EXTVER does", "_tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)')", "file to be opened. mode: Open mode, 'readonly' (default), 'update', or 'append'. memmap:", "= len(ext2) keys = ext2.keys() # parse the extension spec if n_ext1 >", "(_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # add the location", "raise ValueError, 'column definitions have a different table type' elif isinstance(input, FITS_rec): #", "= self.index_of(key) # only set if the value is different from the old", "1 hdu._file = self.__file hdu._hdrLoc = _hdrLoc # beginning of the header area", "pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image", "mapping is not implemented for mode `%s`.\" % mode else: if os.path.splitext(self.name)[1] ==", "Write the given data to the stream. :Parameters: data : NumArray Data to", "to see # if we were provided with a Primary Header. If not", "dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical) column if self._coldefs._recformats[indx]._dtype is _booltype: for i", "+ '\\n' return output[:-1] # ----------------------------- HDU classes ------------------------------------ class _AllHDU: \"\"\"Base class", "\"\"\"Wrap the X format column Boolean array into an UInt8 array. input: input", "of the table HDU, both header and data are copied.\"\"\" # touch the", "'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows))", "data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu = hdu self.field = field # translation", "the object array are consistent. if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: #", "found.' % key self._resize = 1 def __delitem__(self, key): \"\"\"Delete an HDU from", "@group Header-related Classes: Card, CardList, _Card_with_continue, Header, _Hierarch @group HDU Classes: _AllHDU, BinTableHDU,", "delete leading zeros from numbers, otherwise # Python might evaluate them as octal", "value from the CardList. If no keyword is found, return the default value.", "input.index(col) self.data = [col.copy() for col in input] # if the format of", "type as expressed in FITS BITPIX value (8, 16, 32, 64, -32, or", "0: if isinstance(ext[0], Header): header = ext[0] ext = ext[1:] elif not isinstance(ext[0],", "the extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1 = len(ext1) n_ext2 = len(ext2) keys", "_after) # delete extra NAXISi's for j in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`]", "_FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale, _zero, bscale, bzero) =", "+ 1 _end = self.starts[i] + _width - 1 self.spans[i] = _end -", "_Verify: \"\"\"Shared methods for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\",", "'right') def __sub__(self, other): if not isinstance(other, (list, tuple)): other = [other] _other", "self._cardimage[:8].upper() == 'HIERARCH': _start = 8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self):", "_FormatP): for j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file)", "= val def _setvalue(self, val): \"\"\"Set the value attribute.\"\"\" if isinstance(val, (str, int,", "nrows: number of rows in the new table fill: if = 1, will", "None: _shape, _format = (), '' else: # the shape will be in", "del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename, 'update', *ext, **extkeys) hdulist[_ext] =", "input FITS file name @type key: string @param key: keyword name @param ext:", "list of cards of minimal header if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE',", "by the header, a TypeError exception is raised. \"\"\" if self.writeComplete: raise IOError,", "System (FITS) files. This file format was endorsed by the International Astronomical Union", "must be an HDU.\" for item in hdu: if not isinstance(item, _AllHDU): raise", "= '' cards = self.header.ascard try: _index = cards.index_of(keywd) except: _index = None", "if isinstance(val, _FormatX): val = `val._nx` + 'X' elif isinstance(val, _FormatP): VLdata =", "1 _extver = self[j]._extver if _ver == _extver: found = j nfound +=", "space after the last HDU or corrupted HDU except ValueError: print 'Warning: Required", "key: keyword name or index default: if no keyword is found, the value", "'Float64':-64} def __init__(self, data=None, header=None): self._file, self._datLoc = None, None if header is", "dim = str(dim) self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT',", "be accessed\"\"\" def http_error_default(self, url, fp, errcode, errmsg, headers): raise IOError, (errcode, errmsg,", "\"\"\"Generate a (new) card image from the attributes: key, value, and comment, or", "\"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors list class.", "== None: _val = eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx') != None:", "header = Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) # pass these attributes hdu._file", "'\\n'+x if (_option != \"silentfix\") and x: print 'Output verification result:' print x", "None, a header of the appropriate type is created for the supplied data.", "r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys of commentary cards _commentaryKeys", "string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is not", "hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] hdr = hdu.header", "_convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr =", "option='warn'): _err = PrimaryHDU._verify(self, option=option) # Verify locations and values of mandatory keywords.", "= _parse_tformat(fmt) if reverse == 0: if dtype in _fits2rec.keys(): # FITS format", "col_name, new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'): \"\"\"Get", "the above copyright notice, this list of conditions and the following disclaimer. 2.", "for explanations/examples. @return: keyword value @rtype: string, integer, or float \"\"\" _hdr =", "keyword is in primary HDU if there is extension if len(self) > 1:", "data, header=None, **keys): \"\"\"Create a new FITS file using the supplied data/header. @type", "+ 1 if _keyList[_start:].count('CONTINUE') == 0: break # construct the Header object, using", "= 0 hdu.header.ascard._mod = 0 if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def", "in ['', None, 1]: array /= bscale self.array = array def __repr__(self): text", "group parameter have the same name, the # value must be a list", "_key == 'END': break def _readHDU(self): \"\"\"Read the skeleton structure of the HDU.\"\"\"", "if self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx] if", "= len(axes) # add NAXISi if it does not exist for j in", "header=hdr, ext=5) # update the 5th extension \"\"\" # parse the arguments header", "= {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate the starting point and width", "many blank cards are *directly* before the END card self._blanks = 0 self.count_blanks()", "if naxis < 1000: for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\"", "reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def copy(self): \"\"\"Make a copy of the table", "# ----------------------------- HDU classes ------------------------------------ class _AllHDU: \"\"\"Base class for all HDU (header", "a string' % val self.__dict__['comment'] = val def __setattr__(self, name, val): if name", "default=None): \"\"\"Get a keyword value from the CardList. If no keyword is found,", "del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype,", "break def append(self, card, useblanks=1, bottom=0): \"\"\"Append a Card to the CardList. card:", "scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try:", "datasize = 0 else: dims = [0]*naxis for i in range(naxis): mo =", "'silentfix', 'ignore', 'warn', 'exception']: raise ValueError, 'Option %s not recognized.' % option if", "comm_len = 64 output = '' # do the value string valfmt =", "not the length of a card image (80 columns). If the card image", "this space first, instead of appending after these blank cards, so the total", "_dimShape(self): \"\"\"Returns a tuple of image dimensions, reverse the order of NAXIS.\"\"\" naxis", "== 'unfixable': _text = \"Unfixable error: %s\" % _text else: exec(fix) #if option", "`val._nx` + 'X' elif isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max = max(map(len, VLdata))", "to the beginning of the file. If the file does not exist and", "# None, meaning the keyword is undefined. The comment field will # return", "= num.around(dummy) self._parent.field(indx)[:] = dummy del dummy # ASCII table does not have", "copy of the Header.\"\"\" tmp = Header(self.ascard.copy()) # also copy the class tmp._hdutype", "no unique mapping. If there is a field named \"XYZ\" and no other", "try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU: name,", "if isinstance(input, ColDefs): if input._tbtype == tbtype: tmp = hdu.columns = input else:", "be an integer or string. If integer, it is the index in the", "= self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip': # Handle", "str): raise KeyError, key _key = (_key.strip()).upper() nfound = 0 for j in", "= self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc = None else: if _key ==", "self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars):", "card mo = end_RE.search(block) if mo is None: hdu._raw += block block =", "mean: \"Profits\"? - Google Search, when asked for \"PyFITS\" \"\"\" import re, os,", "structure constructed by error messages generated by verifications at different class levels. \"\"\"", "'key': raise SyntaxError, 'keyword name cannot be reset.' elif name == 'value': self._setvalue(val)", "format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column definitions class. It has attributes corresponding", "axes[j], after = _after) # delete extra NAXISi's for j in range(len(axes)+1, old_naxis+1):", "amount of data specified in the header provided to the class constructor may", "arguments\" elif n_ext1 == 1: if n_ext2 == 0: ext = ext1[0] else:", "else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 else: if 'extname' in", "data, header=None): \"\"\"Append the header/data to FITS file if filename exists, create if", "for i in range(len(dummy)): x = _fmt % dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]):", "file is written. input: input object array desp_output: output \"descriptor\" array of data", "BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "in range(ncards): # take each 80-char card as a regular card and use", "else: if val is not None: raise ValueError, 'comment %s is not a", "= _iswholeline(key[i], _naxis) offset = offset * _naxis + indx.offset # all elements", "')' valStr = '%20s' % _tmp else: valStr = '%20s' % self._valuestring elif", "number of rows in the new table fill: if = 1, will fill", "isinstance (card, Card): nc = len(self) - self._blanks i = nc - 1", "else: break hdu._raw += block _size, hdu.name = hdu._getsize(hdu._raw) # get extname and", "# second time go through the next level items, each of the next", "return tmp def _strip(self): \"\"\"Strip cards specific to a certain kind of header.", "if issubclass(self._hdutype == TableHDU): for i in range(_tfields): del self['TBCOL'+`i+1`] except: pass class", "`before' takes precedence over `after' if both specified. They can be either a", "used for updating), default=None. before: name of the keyword, or index of the", "= _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\" Get the table data from", "CONTINUE must have string value.' if name == 'value': _val = re.sub(\"''\", \"'\",", "\"\"\"HDU list class. This is the top-level FITS object. When a FITS file", "exists, it will overwrite the file. Default is False. \"\"\" if header is", "else: if val[:8].upper() == 'HIERARCH': val = val[8:].strip() self.__class__ = _Hierarch else: raise", "_commonNames: attr = [''] * len(self) for i in range(len(self)): val = getattr(self[i],", "hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) f = open(filename, mode='update')", "class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\" _dict = {8:'B', 16:'I', 32:'J', 64:'K',", "r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A number sub-string, either an integer or", "_scale != 1: self.data /= _scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if self.data._type", "for cname in _commonNames: attr = getattr(self, cname+'s') del attr[indx] del self._arrays[indx] self._nfields", "def _get_scale_factors(self, indx): \"\"\" Get the scaling flags and factors for one field.", "given header. # if not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name,", "see the NASA/Science Office of Standards and Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE", "being able to pass it to the header object hduList._resize = 0 return", "= '>' + strfmt[:-1] return strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\"", "strfmt = strfmt + 's'+str(size) + ',' strlen = strlen + size else:", "= naxis*[0] for j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def", "in EXTNAME keyword \"\"\" if header is not None: if not isinstance(header, Header):", "(default), 'update', or 'append'. memmap: Is memmory mapping to be used? default=0. \"\"\"", "verify the key, it is never fixable # always fix silently the case", "0\", 0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val == 1\", 1,", "is None and isinstance(_ext, _Zero): try: hdu = hdulist[1] _data = hdu.data except", "TRUE = True # deprecated FALSE = False # deprecated _INDENT = \"", "pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data, either the group data", "verbose messages? default = 0. This simply calls the close method of the", "# to avoid starting at the same CONTINUE card else: _start = _where", "bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n > 0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1]", "'a': value = chararray.array(value, itemsize=1) else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value)", "matched, it will try to match the name with case insensitivity. So, in", "if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise ValueError, 'Illegal value %s'", "card string which must contain printable ASCII characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE", "list class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct the CardList object from a list", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,", "standard. key: keyword name, default=''. value: keyword value, default=''. comment: comment, default=''. \"\"\"", "hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update header in place: Name", "self.data = [col.copy() for col in input] # if the format of an", "on the type of the extension being referenced If the optional keyword 'header'", "self._mod = 1 except: return # for integer key only delete once else:", "to 1 _extver = self[j]._extver if _ver == _extver: found = j nfound", "== 1: self.mmobject = self.__file._mm if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close()", "self._xtn = ' ' def __setattr__(self, attr, value): \"\"\"Set an HDU attribute.\"\"\" if", "_key.group('label') except: continue # skip if there is no match if (keyword in", "_nbytes = 1 else: _nbytes = num.getType(dtype).bytes for i in range(len(input)): if dtype", "can have two different columns called 'abc' and 'ABC' respectively. (b) When you", "bottom: If =0 (default) the card will be appended after the last non-commentary", "'CONTINUE': raise ValueError, 'Long card image must have CONTINUE cards after the first", "2', _isInt+\" and val >= 1 and val <= 999\", 1, option, _err)", "def _verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option) # Verify locations and values of", "_err) _after = self.header['NAXIS'] + 3 # if the card EXTEND exists, must", "self]) elif isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'):", "\"\"\"Update one header card.\"\"\" \"\"\" If the keyword already exists, it's value/comment will", "num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def __init__(self, data=None, header=None):", "'big': i.byteswap() i._byteorder = 'big' else: if coldata._type.bytes > 1: if coldata._byteorder !=", "\"\"\" if self.data is None: return # Determine the destination (numarray) data type", "% indx elif isinstance(indx, slice): indx = _normalize_slice(indx, naxis) if (indx.start == 0)", "PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT,", "else: hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array class.", "self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name) > 0:", "\"One or more data area is resized.\" break # if the HDUList is", "hdulist, ext def getheader(filename, *ext, **extkeys): \"\"\"Get the header from an extension of", "input[i].array \"\"\" def __getitem__(self, key): x = self.data[key] if isinstance(key, (int, long)): return", "\"\"\" Close the 'physical' FITS file. :Parameters: None :Returns: None \"\"\" self._ffo.close() class", "in Card._commentaryKeys or oldkey in Card._commentaryKeys: if not (newkey in Card._commentaryKeys and oldkey", "fix = \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each", "header). @type filename: string @param filename: input FITS file name @param ext: The", "\"\"\"Get the size from the first block of the HDU.\"\"\" re_simple = re.compile(r'SIMPLE", "and extver if hdu.name == '': hdu.name, hdu._extver = hdu._getname() elif hdu.name ==", "ext2['extname'] else: raise KeyError, 'Insufficient keyword argument: %s' % ext2 return hdulist, ext", "Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is not None: _imagStr =", "zeros or blanks if = 0, copy the data from input, undefined cells", "and BinTableHDU classes. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None,", "_arr *= bscale if _zero: _arr += bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n]", "type(key) def copy(self): \"\"\"Make a (deep)copy of the CardList.\"\"\" cards = [None]*len(self) for", "['formats', 'names']: setattr(_data, attr, getattr(tmp, attr)) for i in range(len(tmp)): tmp._arrays[i] = _data.field(i)", "fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute the verification with selected option.\"\"\" _text =", "there # is no comment if self.key in Card._commentaryKeys: if not isinstance(self.value, str):", "\"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch # for", "elif isinstance(indx, slice): indx = _normalize_slice(indx, naxis) if (indx.start == 0) and (indx.stop", "# if not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else:", "elif hdu.name == 'PRIMARY': hdu._extver = 1 hdu._file = self.__file hdu._hdrLoc = _hdrLoc", "= input[xoffset:offset] list.append(tmp) if len(input) == offset: break xoffset = offset return list", "in range(_min, _max): if j != _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i])", "a keyword in the CardList. key: the keyword name (a string) or the", "n_ext1 == 2: if n_ext2 == 0: ext = ext1 else: raise KeyError,", "'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype else: _str = self._coldefs.formats[indx][0] ==", "return self._cardimage def __getattr__(self, name): \"\"\" instanciate specified attribute object.\"\"\" if name ==", "field for _card in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except:", "the type of the extension being referenced If the optional keyword 'header' is", "*ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close() def info(filename): \"\"\"Print the summary information on", "is no longer than strlen and no word is cut into two pieces.", "tuple: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2)", "item def keys(self): \"\"\"Return a list of all keywords from the CardList.\"\"\" return", "dimensions, reverse the order of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes = naxis*[0] for", "dim if tbtype == 'TableHDU': _formats = '' _itemsize = 0 for i", "+ _numr_NFSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') #", "of dimension 2'), Card('PCOUNT', 0, 'number of group parameters'), Card('GCOUNT', 1, 'number of", "the syntax of \"> n\", # where n is an int if isinstance(pos,", "cards into a printable string.\"\"\" kard = self._cardimage output = '' for i", "TableHDU elif xtension == 'IMAGE': self._hdutype = ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'):", "if it is only ONE u1 (i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1')", "be possible to decipher where the last block of the Header ends, but", "return self.__dict__[attr] except KeyError: raise AttributeError(attr) def par(self, parName): \"\"\"Get the group parameter", "\"%dR x %dC\" % (_nrows, _ncols) return \"%-10s %-11s %5d %-12s %s\" %", "except KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT',", "updating), default=None. before: name of the keyword, or index of the Card before", "else: _pc = '%' _fmt = ' '*_lead + _pc + _format[1:] +", "FITS file will be like a binary table's data. \"\"\" if attr ==", "cases, but # it comes pretty darn close. It appears to find the", "== 'value': self._setvalue(val) elif name == 'comment': self._setcomment(val) else: raise AttributeError, name #", "keyStr = 'HIERARCH %s ' % self.key else: keyStr = '%-8s' % self.key", "BZERO = +32768 self.header.update('BZERO', _zero) else: del self.header['BZERO'] if _scale != 1: self.data", "option: how to scale the data: if \"old\", use the original BSCALE and", "be inserted at the beginning of the file and the provided header will", "hdu._extver = hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver = 1 hdu._file = self.__file", "dictionaries for each field for _card in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword", "if self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII table extension' '''", "elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in [None, '']: commentStr = '' else:", "second extension >>> getdata('in.fits', ext=2) # the second extension By name, i.e., EXTNAME", "without modification, are permitted provided that the following conditions are met: 1. Redistributions", "are permitted provided that the following conditions are met: 1. Redistributions of source", "conditions are met: 1. Redistributions of source code must retain the above copyright", "the card will be appended at the end, even if there are blank", "for both ASCII and binary tables if _number and (_scale or _zero): #", "be the extension specification(s). Header and extension specs can also be keyword arguments.", "the open. Any header will not be initialized till the HDU is accessed.", "not in ['readonly', 'copyonwrite', 'update']: raise \"Memory mapping is not implemented for mode", "keyStr = '%-8s' % self.key else: keyStr = ' '*8 # value string", "output = '' for i in range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n' return", "def _words_group(self, input, strlen): \"\"\"Split a long string into parts where each part", "repeat elif dtype == 'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype", "header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data is not DELAYED): if isinstance(data,", "*, *(?P<imag>' + _numr_NFSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)'", "if header is not None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list)", "'_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append one Column to the column definition.\"\"\" return", "if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero: self._convert[indx] += bzero elif _bool: self._convert[indx]", "Dimensions Format\\n\" % _name for j in range(len(self)): results = results + \"%-3d", "AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "'append']: raise \"Writing to gzipped fits files is not supported\" zfile = gzip.GzipFile(self.name)", "end_RE.search(block) if mo is None: hdu._raw += block block = self.__file.read(_blockLen) if block", "def add_history(self, value, before=None, after=None): \"\"\"Add a HISTORY card. value: History text to", "f = FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f def _clone(self, shape): \"\"\"Overload this", "to the end of the file. If the file does not already exist,", "the header to be used (as a template), default=None. If header=None, a minimal", "correct location before calling this method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),)", "arrays can be just list or tuple, not required to be NDArray if", "_name == _key: # if only specify extname, can only have one extension", "type = self.NumCode[self._bitpix] _type = getattr(num, type) # Determine how to scale the", "some of them may not exist for name in ['key', 'value', 'comment', '_valueModified']:", "if isinstance (value, Card): _key = self.index_of(key) # only set if the value", "[self[i] for i in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an HDU from the", "to parse further if self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = ''", "header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU self.name = name if self.header['NAXIS']", "raise ValueError, \"too many positional arguments\" elif n_ext1 == 1: if n_ext2 ==", "_hdr[key] def _makehdu(data, header): if header is None: if isinstance(data, num.NumArray): hdu =", "commfmt % i output = output + '%-80s' % commstr return output def", "cards=[]): \"\"\"Construct a Header from a CardList. cards: A list of Cards, default=[].", "= (max + min) / 2. # throw away -2^N _scale = (max", "'Image extension') else: c0 = Card('SIMPLE', True, 'conforms to FITS standard') _list =", "# # the stream is full so pad the data to the next", "Card to be inserted. useblanks: Use any *extra* blank cards? default=1. If useblanks", "# the primary header >>> getdata('in.fits', 2) # the second extension >>> getdata('in.fits',", "avoid starting at the same CONTINUE card else: _start = _where + 1", "self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) # each element calls their own verify for", "hdu def writeHDU(self, hdu): \"\"\"Write *one* FITS HDU. Must seek to the correct", "CONTINUE cards # if a long string has CONTINUE cards, the \"Card\" is", "1 if found, otherwise, 0. key: keyword name. If given an index, always", "self.data[key] if isinstance(key, (int, long)): return x else: return ColDefs(x) def __len__(self): return", "(0th) element must be a primary HDU if len(self) > 0 and (not", "self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) if (data is DELAYED): return", "ending \"&\" if _val[-1] == '&': _val = _val[:-1] longstring = longstring +", "'' else: comm = self.comment commfmt = \"%-s\" if not comm == '':", "new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append', 'update'): print \"flush for '%s'", "*, *(?P<imag>' + _numr_FSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][", "raise KeyError, 'Insufficient keyword argument: %s' % ext2 return hdulist, ext def getheader(filename,", "type (string): destination data type, use numarray attribute format, (e.g. 'UInt8', 'Int16', 'Float32'", "extver from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo =", "need to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows,", "rather well, but will accept # strings with an odd number of single", "together output = keyStr + eqStr + valStr + commentStr # need this", "case of reading from a # FITS file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype)", "errmsg, headers): raise IOError, (errcode, errmsg, url) urllib._urlopener = ErrorURLopener() # Assign the", "field('ABC') will get the second field. If there is no exact name matched,", "range(self._nfields): # touch all fields to expand the original ._convert list # so", "memmap object, if any. output_verify: output verification option, default = 'exception'. verbose: print", "err_text = \"'%s' card does not exist.\" % keywd fix_text = \"Fixed by", "0 naxis = self.header.get('NAXIS', 0) # for random group image, NAXIS1 should be", "provided that the following conditions are met: 1. Redistributions of source code must", "== offset: break xoffset = offset return list class Header: \"\"\"FITS header class.\"\"\"", "not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys): raise ValueError, 'Regular and commentary", "= 0 # reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype =", "of parameter names. bscale: BSCALE of the data bzero: BZERO of the data", "class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def __init__(self, input): \"\"\" input: a sequence", "for n in dims: npt *= n # Now, get the data (does", "(type, width) = _convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input,", "None: type = self.NumCode[self._bitpix] _type = getattr(num, type) # Determine how to scale", "None, None if header is not None: if not isinstance(header, Header): raise ValueError,", "_number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # for P format if isinstance(self._coldefs._recformats[indx],", "3 # if the card EXTEND exists, must be after it. try: _dum", "does not exist, default it to 1 _extver = self[j]._extver if _ver ==", "the binary data(*). (*) In future it may be possible to decipher where", "real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ') if real.group('sign') == None: _val", "memmap=0): if mode not in _python_mode.keys(): raise \"Mode '%s' not recognized\" % mode", "_scale: bscale = 1 if not _zero: bzero = 0 return (_str, _bool,", "re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')') # FSC", "= 0 hdu._file = ffo.getfile() # if not resized, update in place else:", "r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks for a valid value/comment string. It", "primary HDU class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct a primary HDU. data: the", "a # FITS file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU", "run _ExtensionHDU.__init__ since it is not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn =", "valstr) # do the comment string if self.comment is None: comm = ''", "\"\"\" new_card = Card(key, value) if before != None or after != None:", "the case of a missing 'END' card, the Header may also contain the", "for i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset =", "@param filename: name of the file to append to @type data: array, table,", "dat_format = `int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0)", "keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s", "elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data object. Allows", "self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr = 'HIERARCH %s ' % self.key", "HDU.\" for item in hdu: if not isinstance(item, _AllHDU): raise ValueError, \"%s is", "cards you need in the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each piece", "This is to speed up the open. Any header will not be initialized", "must have string value.' if name == 'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip()", "if mode in ['update', 'append']: raise \"Writing to gzipped fits files is not", "def __add__(self, other, option='left'): if isinstance(other, Column): b = [other] elif isinstance(other, ColDefs):", "' '*8 # value string # check if both value and _cardimage attributes", "takes priority if (bscale != 1 or bzero !=0): _scale = bscale _zero", "raise KeyError, 'Keyword %s not found.' % `key` else: raise KeyError, 'Illegal key", "card.' if not isinstance(_card.value, str): raise ValueError, 'Cards with CONTINUE must have string", "match what is expected by the header, a TypeError exception is raised. \"\"\"", "_zero: for i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical) column if", "'_valuestring' not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not in self.__dict__: self.__dict__['_valueModified']", "= '', 0 for j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`]", "re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo: name = mo.group(1).rstrip()", "_tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif input.group('numr')", "axes = [] else: raise ValueError, \"incorrect array type\" self.header['NAXIS'] = len(axes) #", "_card in self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value) return output def get_comment(self): \"\"\"Get", "try to match the name with case insensitivity. So, in the last example,", "HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix) _err.append(_text) #", "fix=fix) _err.append(_text) # each element calls their own verify for i in range(len(self)):", "comment out to avoid circular reference of _pcount # pass the attributes for", "1 if verbose: print \"One or more header is resized.\" break # Data:", "naxis) if (indx.start == 0) and (indx.stop == naxis) and (indx.step == 1):", "_ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype = ImageHDU # insert the require", "if the supposed location is specified if pos is not None: test_pos =", "_File class. It has this two-tier calls because _File has ts own private", "card image must have CONTINUE cards after the first card.' if not isinstance(_card.value,", "+ 1 # use an array, even if it is only ONE u1", "do NOT use self.key commentStr = '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment", "\"'\", _card.value).rstrip() # drop the ending \"&\" if _val[-1] == '&': _val =", "of the keywod EXTNAME, default=None. \"\"\" # no need to run _ExtensionHDU.__init__ since", "Header and extension specs can also be keyword arguments. For example: >>> update(file,", "input + ' ' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating number to", "but will accept # strings with an odd number of single quotes, #", "SyntaxError, \"%s is not a Card\" % str(card) def _use_blanks(self, how_many): if self._blanks", "notation. One for FSC and one for non-FSC (NFSC) format: # NFSC allows", "0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data reading will be delayed", "nx: number of bits \"\"\" pow2 = [128, 64, 32, 16, 8, 4,", "FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns # populate data to the new", "zero offset for all columns after this call. The final offset will be", "it is only ONE u1 (i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx", "% type(key) def copy(self): \"\"\"Make a (deep)copy of the CardList.\"\"\" cards = [None]*len(self)", "too long (> 8), use HIERARCH.' % val else: raise ValueError, 'keyword name", "def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname = name[:-1] if cname in _commonNames:", "definition keywords. Mark them first, # then delete from the end so as", "may have wrong byteorder if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder = 'big' #", "_key = key _ver = None if not isinstance(_key, str): raise KeyError, key", "_extver # reset the modification attributes after updating for hdu in self: hdu.header._mod", "= self._bzero elif option == 'minmax': if isinstance(_type, num.FloatingType): _scale = 1 _zero", "has only one argument. \"\"\" result = \"\" element = 0 # go", "and (_scale or _zero): dummy = self._convert[indx].copy() if _zero: dummy -= bzero if", "option='warn'): \"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val == 0',", "self.__file.write(blocks) # flush, to make sure the content is written self.__file.flush() return loc", "of a required Card.\"\"\" \"\"\"If pos = None, it can be anywhere. If", "or 'TableHDU' (text table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype", "_err) # verify each card for _card in self.header.ascard: _err.append(_card._verify(option)) return _err def", "comm_list = self._words_group(comm, comm_len) for i in comm_list: commstr = \"CONTINUE '&' /", "return result else: # verify the equal sign position if self.key not in", "__file. \"\"\" if self.__file != None: if self.__file.memmap == 1: self.mmobject = self.__file._mm", "return input else: return input + ' ' * (Card.length-strlen) # minimum length", "_number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC + ')')", "of the HDU is resized for hdu in self: # Header: # Add", "in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype else: _str = self._coldefs.formats[indx][0] == 'A'", "table HDU hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype == tbtype: tmp", "(x, _width[indx]) else: self._parent.field(indx)[i] = x if 'D' in _format: self._parent.field(indx).sub('E', 'D') #", "a card from key, value, and (optionally) comment. Any specifed arguments, except defaults,", "is different from the old one if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value)", "=0 (default) the card will be appended after the last non-commentary card. If", "can be either a keyword name or index. \"\"\" if before != None:", "header self.data = data self.name = None def size(self): \"\"\"Returns the size (in", "first card here, instead of in the respective HDU classes, # so the", "= eval(_key.group('num')) if col <= _nfields and col > 0: cname = _commonNames[_keyNames.index(keyword)]", "keyword is found, return the default value. key: keyword name or index default:", "mo = re_gcount.search(block) if mo is not None: gcount = int(mo.group(1)) else: gcount", "cards, the \"Card\" is considered # to be more than one 80-char \"physical\"", "parbscales is None: parbscales = [None]*npars if parbzeros is None: parbzeros = [None]*npars", "= open(filename, mode=mode) n_ext1 = len(ext1) n_ext2 = len(ext2) keys = ext2.keys() #", "for Research in Astronomy (AURA) Redistribution and use in source and binary forms,", "a binary table's data. \"\"\" if attr == 'data': # same code as", "indx < 0: indx += npts elif indx > npts: indx = npts", "the FITS_rec was created in a LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder =", "fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None or self.key in Card._commentaryKeys:", "self.ascard.index_of(key) if comment is not None: _comment = comment else: _comment = self.ascard[j].comment", "data size cannot be calculated or the 'END' card is not found. In", "if only one HDU needs to be written to a file. name: output", "to avoid misalignment. \"\"\" if isinstance(value, num.NumArray) and value.type() == self._dtype: pass elif", "\"\"\" Initialize all attributes to be a list of null strings.\"\"\" for cname", "definition.\"\"\" return self+column def del_col(self, col_name): \"\"\"Delete (the definition of) one Column.\"\"\" indx", "pass class Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu = hdu", "in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>= 0\", 1, option, _err)", "_bool: self._convert[indx] = num.equal(dummy, ord('T')) else: return dummy return self._convert[indx] def _scale_back(self): \"\"\"Update", "self._convert[i] = pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale", "from the HDUList, indexed by number or name.\"\"\" key = self.index_of(key) _item =", "object @return: header \"\"\" hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu =", "ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val)", "num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8': # UInt8 case _zero = min _scale", "numr.group('sign') == None: _val = eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx') !=", "mandatory keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val >=", "*' r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' + _numr_FSC + ') *\\))'", "in range(1, naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount =", "curDataSize = self._ffo.getfile().tell() - self._datLoc if curDataSize + data.itemsize()*data._size > self._size: raise IOError,", "64, 32, 16, 8, 4, 2, 1] nbytes = ((nx-1) / 8) +", "0 raise ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify the keyword to be FITS", "if _scale: _arr *= bscale if _zero: _arr += bzero hdu.data._convert[i][:n] = _arr", "not None: bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX not found where expected\") mo", "be accepted. An attempt to write more data after the stream has been", "1, search from the end. \"\"\" if isinstance(key, (int, long)): return key elif", "self._resize = 1 def __delitem__(self, key): \"\"\"Delete an HDU from the HDUList, indexed", "strlen = _len % Card.length if strlen == 0: return input else: return", "_datLoc: starting byte location of data block in file (None) \"\"\" # mappings", "renamed new file with \"update\" mode os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap)", "'No data in this HDU.' if _data is None: raise IndexError, 'No data", "_list.index(_key) elif _count == 0: raise NameError, \"Key '%s' does not exist.\" %", "== 1\", 1, option, _err) return _err # 0.8.8 def _iswholeline(indx, naxis): if", "required to be NDArray if format is not None: # check format try:", "itemsize=1) # locations of the blanks blank_loc = num.nonzero(arr == ' ')[0] offset", "if val == 'END': raise ValueError, \"keyword 'END' not allowed\" self._checkKey(val) else: if", "be reset.' elif name == 'value': self._setvalue(val) elif name == 'comment': self._setcomment(val) else:", "Mark them first, # then delete from the end so as not to", "= self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse()", "- nx for i in range(nbytes): _min = i*8 _max = min((i+1)*8, nx)", "= name self.mode = mode self.memmap = memmap if memmap and mode not", "value self._mod = 1 def __delitem__(self, key): \"\"\"Delete card(s) with the name 'key'.\"\"\"", "= [] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx", "(2.**(8*_type.bytes) - 2) # Do the scaling if _zero != 0: self.data +=", "the output's itemsize of %s\" % (x, _width[indx]) else: self._parent.field(indx)[i] = x if", "used in \"update/append\" mode # CardList needs its own _mod attribute since it", "data in the HDU, default=None. header: the header to be used (as a", "None: _card = Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value, comment))", "in ['readonly', 'copyonwrite', 'update']: raise \"Memory mapping is not implemented for mode `%s`.\"", "%s\" % \\ (self.name, type, len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns the table's", "'format' and isinstance(self, BinTableHDU): val = _cols._recformats[i] if isinstance(val, _FormatX): val = `val._nx`", "= map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:]", "output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc == self._size: # # the stream is full", "data is not contiguous.' # the offset needs to multiply the length of", "in reading in the FITS file), # it will be constructed from the", "Groups HDU class.\"\"\" _dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self,", "if mode in ['update', 'append']: raise \"Writing to zipped fits files is not", "Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) if (data is DELAYED):", "string in another. Also, it does not break at the blank space between", "for j in range(_ncols): _format += self.header['TFORM'+`j+1`] + ', ' _format = _format[:-2]", "if self.__file.mode not in ('append', 'update'): print \"flush for '%s' mode is not", "= Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') == None: _val =", "not in self.__dict__: self.__dict__['_valueModified'] = 0 elif name == 'comment': self.__dict__['comment'] = ''", "data.parnames = self.columns._pnames else: data = None self.__dict__[attr] = data elif attr ==", "file will be opened and the header appended to the end of the", "EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent", "attributes: key, value, and comment, or from raw string. option: verification option, default=silentfix.", "_tbsize _pcount = hdu.data._heapsize + hdu.data._gap if _pcount > 0: hdu.header['PCOUNT'] = _pcount", "[] else: dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr =", "\"\"\"A view of a Column's data as an array.\"\"\" indx = _get_index(self._coldefs.names, key)", "of bscales for the parameters parbzeros: list of bzeros for the parameters \"\"\"", "can be used to reconstruct another kind of header. \"\"\" try: # have", "= array def __repr__(self): text = '' for cname in _commonNames: value =", "threading # Module variables _blockLen = 2880 # the FITS block size _python_mode", "use: header = pyfits.Header() for all the cards you need in the header:", "'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None): self._file, self._datLoc = None, None if", "mode os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo if (verbose):", "= _format, bscale = _bscale, bzero = _bzero)) data_shape = self._dimShape()[:-1] dat_format =", "size : integer The number of bytes of data required to fill the", "return hdr def getdata(filename, *ext, **extkeys): \"\"\"Get the data from an extension of", "to write more data after the stream has been filled will raise an", "flush(self, output_verify='exception', verbose=0): \"\"\"Force a write of the HDUList back to the file", "no longer than strlen and no word is cut into two pieces. But", "for updating), default=None. before: name of the keyword, or index of the Card", "not an attribute of the column definitions.\"%att continue print \"%s:\" % att print", "image from the attributes: key, value, and comment, or from raw string. option:", "commentStr = '' else: commentStr = ' / ' + self.comment else: commentStr", "in keys: if n_ext2 == 1: ext = ext2['ext'] elif n_ext2 == 2", "self._fixValue(result) if option == 'fix': self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.:", "re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80])", "if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes = [0]", "1 _zero = 0 else: # flat the shape temporarily to save memory", "may have been truncated.' hdu._ffile = self return hdu def writeHDU(self, hdu): \"\"\"Write", "_getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] hdr = hdu.header hdulist.close() return hdr", "be written to. output_verify: output verification option, default = 'exception'. clobber: Overwrite the", "output file already exists if os.path.exists(name): if clobber: print \"Overwrite existing file '%s'.\"", "block def __str__(self): \"\"\"Format a list of cards into a printable string.\"\"\" output", "= {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if isinstance(input, ColDefs): self.data", "fix=fix, fixable=fixable)) else: # if the supposed location is specified if pos is", "(if unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs are", "defaults, must be compliant to FITS standard. key: keyword name, default=''. value: keyword", "val is not None: raise ValueError, 'comment %s is not a string' %", "def __getslice__(self, start, end): _cards = super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return", "to a strings array array = chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable length", "__getattr__(self, name): \"\"\" instanciate specified attribute object.\"\"\" if name == '_cardimage': self.ascardimage() elif", "not end with two single quotes, # whereas it should not end with", "%s\\n\"%(j, self[j]._summary()) results = results[:-1] print results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function", "bscale, bzero) = self._get_scale_factors(indx) # add the location offset of the heap area", "\"\"\"Delete a Card from the CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key]", "== '': raise EOFError hdu = _TempHDU() hdu._raw = '' # continue reading", "use an independent # attribute of mmobject so if the HDUList object is", "\"File '%s' already exist.\" % name # make sure the EXTEND keyword is", "list of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not", "<= 0: raise IndexError, 'Illegal slice %s, step must be positive.' % input", "if _key[:8] == 'HIERARCH': _key = _key[8:].strip() _keylist = self._keylist if backward: _keylist", "copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in", "\"\"\" if self.has_key(key): j = self.ascard.index_of(key) if comment is not None: _comment =", "= \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: #", "decide which kind of header it belongs to try: if cards[0].key == 'SIMPLE':", "(_option == \"ignore\"): return x = str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix'] and", "an (table) HDU tbtype: which table HDU, 'BinTableHDU' (default) or 'TableHDU' (text table).", "This method should only be used right before writing to the output file,", "tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype ==", "in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max`", "the list twice, first time print out all top level messages for item", "0 for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 hdu._new =", "num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] = dummy del dummy # ASCII table does", "!= '': dirName += '/' _name = dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name):", "to make mask array indexing work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data", "own _mod attribute since it has methods to change # the content of", "and binary tables if _number or _str: if _number and (_scale or _zero):", "file does not exist and the provided header is not a Primary header,", "r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the table header: header", "= self.data.copy() else: _data = None return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception',", "_format = self.columns.formats # if data is not touched yet, use header info.", "CardList(_cards, self._keylist[start:end]) return result def __setitem__(self, key, value): \"\"\"Set a Card by indexing", "Flag that when true indicates that all of the required data has been", "= i*8 _max = min((i+1)*8, nx) for j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8],", "right after the last field if self._tbtype == 'TableHDU': last_end = 0 attr", "+ _width - 1 self.spans[i] = _end - last_end last_end = _end self._Formats", "_scale = (max - min) / (2.**(8*_type.bytes) - 2) # Do the scaling", "can be a Card or just # a number/string for cname in _commonNames:", "\"\"\"Set an HDU attribute.\"\"\" if attr == 'name' and value: if not isinstance(value,", "layer over the RecArray, so we can deal with scaled columns. \"\"\" def", "'+ pos if not eval(test_pos): err_text = \"'%s' card at the wrong place", "parse the arguments header = None if len(ext) > 0: if isinstance(ext[0], Header):", "is still a \"view\" (for now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header =", "header=header) self.name = 'PRIMARY' # insert the keywords EXTEND if header is None:", "= _TempHDU() hdu._raw = '' # continue reading header blocks until END card", "argument(s): %s' % ext2 elif n_ext1 == 2: if n_ext2 == 0: ext", "False hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] _data =", "\"\"\" self._add_commentary('comment', value, before=before, after=after) def add_blank(self, value='', before=None, after=None): \"\"\"Add a blank", "valfmt % val_list[i] output = output + '%-80s' % (headstr + valstr) #", "a (new) card image from the attributes: key, value, and comment. Core code", "__getattr__(self, attr): \"\"\"Get the data attribute.\"\"\" if attr == 'section': return Section(self) elif", "Functions def _padLength(stringLen): \"\"\"Bytes needed to pad the input stringLen to the next", "in self.keys()] else: self._keylist = keylist # find out how many blank cards", "if not the real CONTINUE card, skip to the next card to search", "BZERO in self.header. This method should only be used right before writing to", "python # $Id: NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A module for", "= item.__str__(tab=tab+1) # print out a message only if there is something if", "0, so we skip NAXIS1. if naxis > 1: size = 1 for", "\"\"\" # decide which kind of header it belongs to try: if cards[0].key", "card is reached while 1: # find the END card mo = end_RE.search(block)", "_coldefs elif attr == '_theap': self.__dict__[attr] = 0 try: return self.__dict__[attr] except KeyError:", "the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if val is not None: raise", "location. If no \"before\" or \"after\" is specified, it will be appended at", "self.data /= _scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if self.data._type != _type: self.data", "long.\" % self.key if len(output) <= Card.length: output = \"%-80s\" % output #", "_fmt) _formats += data_fmt gcount = input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`, format", "of requiring data to all be written at once. The following psudo code", "example: >>> update(file, dat, hdr, 'sci') # update the 'sci' extension >>> update(file,", "or float \"\"\" _hdr = getheader(filename, *ext, **extkeys) return _hdr[key] def _makehdu(data, header):", "= self['TFIELDS'] del self['NAXIS'] for i in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU):", "data will be accepted. An attempt to write more data after the stream", "# it will be constructed from the card list. if keylist is None:", "groups data object. Allows structured access to FITS Group data in a manner", "_python_mode.keys(): raise \"Mode '%s' not recognized\" % mode if mode != 'append' and", "def __init__(self, data=None, header=None): self._file, self._datLoc = None, None if header is not", "break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1", "next FITS block.\"\"\" return (_blockLen - stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create a", "table is the same as 7A in # binary table, so both will", "i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] +", "type is created for the supplied data. This argument is optional. @keyword clobber:", "return result def setpar(self, parName, value): \"\"\"Set the group parameter values.\"\"\" if isinstance(parName,", "TZERO if _scale or _zero: for i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero #", "self._keylist.insert(pos, card.key) # update the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod =", "for i in range(_max): _where = _keyList[_start:].index('CONTINUE') + _start for nc in range(1,", "the shape temporarily to save memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data)", "# not using numarray.strings's num2char because the # result is not allowed to", "class.\"\"\" _dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self, data=None, header=None,", "= 'unfixable' if option in ['warn', 'exception']: #raise VerifyError, _text #elif option ==", "implies the primary header >>> getdata('in.fits') By extension number: >>> getdata('in.fits', 0) #", "_keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping", "\"descriptor\" array of data type 2Int32 dtype: data type of the variable array", "'== '+`naxis+3`, _isInt+\" and val >= 0\", 0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`,", "\"\"\" if attr == 'data': # same code as in _TableBaseHDU size =", "as the first extension. If the file does already exist, but the provided", "_text else: exec(fix) #if option != 'silentfix': _text += ' ' + fix_text", "parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero =", "a Primary Header. If not we will need # to prepend a default", "each extension. @type filename: string @param filename: input FITS file name \"\"\" f", "the string if it is not the length of a card image (80", "class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu = hdu", "cases if self._ffile.memmap: self.data = raw_data.copy() # if not memmap, use the space", "to the column definition.\"\"\" return self+column def del_col(self, col_name): \"\"\"Delete (the definition of)", "and appended to the end of the file. \"\"\" self.header = header.copy() #", "FITS file name to be written to. output_verify: output verification option, default =", "self.names: raise ValueError, 'New name %s already exists.' % new_name else: self.change_attrib(col_name, 'name',", "key: keyword name. If given an index, always returns 0. \"\"\" try: key", "self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val == 1\", 1, option, _err) return _err", "a header of the appropriate type is created for the supplied data. This", "(new) card image from the attributes: key, value, and comment. Core code for", "'' ncards = self._ncards() for i in range(ncards): # take each 80-char card", "after it. try: _dum = self.header['EXTEND'] #_after += 1 except: pass _pos =", "if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) # print out a message only if", "_CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes:", "after=after) def get_history(self): \"\"\"Get all histories as a list of string texts.\"\"\" output", "= getattr(self, cname+'s') del attr[indx] del self._arrays[indx] self._nfields -= 1 def change_attrib(self, col_name,", "if mo is not None: gcount = int(mo.group(1)) else: gcount = 1 mo", "= list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.')+1:] #", "get_history(self): \"\"\"Get all histories as a list of string texts.\"\"\" output = []", "\"before\" or \"after\" is specified, it will be appended at the end. key:", "def __getattr__(self, attr): \"\"\"Get the data attribute.\"\"\" if attr == 'section': return Section(self)", "self.array = array def __repr__(self): text = '' for cname in _commonNames: value", "self.data = [col.copy() for col in input.data] # if the input is a", "printable string.\"\"\" kard = self._cardimage output = '' for i in range(len(kard)/80): output", "rest of the header can be used to reconstruct another kind of header.", "'comment': _comm = _card.comment if isinstance(_comm, str) and _comm != '': longstring =", "respective HDU classes, # so the checking is in order, in case of", "None if header is not None: if not isinstance(header, Header): raise ValueError, \"header", "return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new) card image from the attributes: key,", "exists, must be after it. try: _dum = self.header['EXTEND'] #_after += 1 except:", "= \"%-s\" if not comm == '': nlines = len(comm) / comm_len +", "rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a card's keyword in the header. oldkey: old", "re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform):", "output = output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self, val): \"\"\"Verify val to be", "# each element calls their own verify for i in range(len(self)): if i", "in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j == 0: continue _shape += (self.header['NAXIS'+`j+1`],)", "num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale by TSCAL and TZERO if", "return (repeat, dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec to record", "their own verify for i in range(len(self)): if i > 0 and (not", "'s': list[i]=list[i][:-1] for att in list: if att not in _commonNames: print \"'%s'", "return self.update_tbhdu() if output_verify == 'warn': output_verify = 'exception' self.verify(option=output_verify) # check if", "_longstring = _cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring)", "_File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo if (verbose): print \"reopen the newly renamed", "and type for each extension. @type filename: string @param filename: input FITS file", "\"\"\"Wrapper for _verify.\"\"\" _option = option.lower() if _option not in ['fix', 'silentfix', 'ignore',", "standard # appears vague on this issue and only states that a #", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED", "primary header needs the keyword EXTEND or if it has the proper value.", "be reset by user. _isInt = \"isinstance(val, (int, long))\" # Functions def _padLength(stringLen):", "if result is not None: _str = result.group('comm') if _str is not None:", "0 def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a card's keyword in the header.", "of a card length = 80 # String for a FITS standard compliant", "bscale = bscale, bzero = bzero)) self._coldefs = ColDefs(_cols) self.parnames = [i.lower() for", "self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] = value def _verify(self, option='warn'): _err = _ValidHDU._verify(self,", "pass the attributes for attr in ['formats', 'names']: setattr(_data, attr, getattr(tmp, attr)) for", "for _card in self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value) return output def get_comment(self):", "RecArray.\"\"\" # input should be a record array self.__setstate__(input.__getstate__()) # _parent is the", "Only the amount of data specified in the header provided to the class", "True and if filename already exists, it will overwrite the file. Default is", "the file to be updated data: the new data used for updating The", "'warn': pass # fix the value elif option == 'unfixable': _text = \"Unfixable", "the opposite if reverse = 1. \"\"\" fmt = input_format (repeat, dtype, option)", "not, check to see # if we were provided with a Primary Header.", "*)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>'", "0 for i in range(nmax): try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset =", "self.ascard._keylist): raise ValueError, 'Intended keyword %s already exists in header.' % newkey _index", "i in _list: del self.header.ascard[i] del _list # populate the new table definition", "'' else: commentStr = ' / ' + self.comment else: commentStr = ''", "type) # Determine how to scale the data # bscale and bzero takes", "new table from the input column definitions.\"\"\" \"\"\" input: a list of Columns", "self.__dict__[attr] = data elif attr == 'columns': _cols = [] _pnames = []", "string # check if both value and _cardimage attributes are missing, # to", "support CONTINUE for HIERARCH if len(keyStr + eqStr + valStr) > Card.length: raise", "\"CONTINUE \" valstr = valfmt % val_list[i] output = output + '%-80s' %", "elif isinstance(_step, (int, long)): if _step <= 0: raise IndexError, 'Illegal slice %s,", "= zfile.namelist() if len(namelist) != 1: raise \"Zip files with multiple members are", "fix it first with .verify('fix').\" if valu.group('bool') != None: _val = valu.group('bool')=='T' elif", "else: dim = 0 if dim > nrows: nrows = dim if tbtype", "into a printable string.\"\"\" output = '' for card in self: output +=", "= bzero else: if option == 'old': _scale = self._bscale _zero = self._bzero", "= class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr == '_theap': self.__dict__[attr] = self.header.get('THEAP',", "= self.index_of(key) if isinstance(hdu, (slice, list)): if isinstance(_key, int): raise ValueError, \"An element", "Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def getfile(self): return self.__file", "the following disclaimer in the documentation and/or other materials provided with the distribution.", "_verify(self, option='warn'): \"\"\"Card class verification method.\"\"\" _err = _ErrList([]) try: self._check(option) except: pass", "of commentary cards _commentaryKeys = ['', 'COMMENT', 'HISTORY'] def __init__(self, key='', value='', comment=''):", "if eqLoc is None: eqLoc = 8 _start = 0 if self._cardimage[:8].upper() ==", "I/O class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0): if mode not in _python_mode.keys(): raise", "is full so pad the data to the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0')", "END card is reached while 1: # find the END card mo =", "a list of all keywords from the CardList.\"\"\" return map(lambda x: getattr(x,'key'), self)", "None, meaning the keyword is undefined. The comment field will # return a", "@return: header \"\"\" hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext]", "binary tables.\"\"\" pass class _FormatP(str): \"\"\"For P format in variable length table.\"\"\" pass", "the appropriate type is created for the supplied data. This argument is optional.", "keywords to reflect recent changes of columns.\"\"\" _update = self.header.update _append = self.header.ascard.append", "\"\"\"Pad balnk space to the input string to be multiple of 80.\"\"\" _len", "self._datLoc == self._size: # # the stream is full so pad the data", "the end, even if there are blank cards in front of END. bottom:", "ends, but this task may be difficult when the extension is a TableHDU", "stage as CONTINUE cards may span across blocks. \"\"\" if len(block) != _blockLen:", "+= kard[i*80:(i+1)*80] + '\\n' return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the keyword value", "self.header.get('NAXIS', 0) if naxis > 0: size = 1 for j in range(naxis):", "(self._convert[indx] is None): # for X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx", "print \"flush for '%s' mode is not supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify)", "not using numarray.strings's num2char because the # result is not allowed to expand", "str): _name = _name.strip().upper() if _name == _key: # if only specify extname,", "_tbsize) hdu.data._gap = _heapstart - _tbsize _pcount = hdu.data._heapsize + hdu.data._gap if _pcount", "self.writeComplete: raise IOError, \"The stream is closed and can no longer be written\"", "have a zero offset for all columns after this call. The final offset", "n = min(size, nrows) if fill: n = 0 (_scale, _zero, bscale, bzero)", "self._parent.field(indx)[i] = x if 'D' in _format: self._parent.field(indx).sub('E', 'D') # binary table else:", "value attribute.\"\"\" if isinstance(val, (str, int, long, float, complex, bool, Undefined)): if isinstance(val,", "and value of mandatory keywords. # Do the first card here, instead of", "str, tuple)): raise KeyError, 'Input argument has wrong data type.' if 'header' in", "length of a card image (80 columns). If the card image is longer", "output file if exists, default = False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist =", "\"\"\"Get all comments as a list of string texts.\"\"\" output = [] for", "file is opened, a HDUList object is returned. \"\"\" def __init__(self, hdus=[], file=None):", "_ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card):", "isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT', 1, 'number of groups'))", "_key = key[0] _ver = key[1] else: _key = key _ver = None", "FITS_rec will view the same scaled columns as # the original dummy =", "naxis == 1: return _OnePointAxis(1, 0) else: raise IndexError, 'Index %s out of", "keywords PCOUNT and GCOUNT dim = `self.header['NAXIS']` if dim == '0': dim =", "where expected\") mo = re_gcount.search(block) if mo is not None: gcount = int(mo.group(1))", "if not isinstance(header, Header): raise ValueError, \"header must be a Header object\" if", "sure the EXTEND keyword is there if there is extension if len(self) >", "as in update()] after: [same as in update()] \"\"\" self._add_commentary('comment', value, before=before, after=after)", "if isinstance(pos, str): _parse = pos.split() if _parse[0] in ['>=', '==']: insert_pos =", "only be strings and there # is no comment if self.key in Card._commentaryKeys:", "_padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size: print 'Warning: File", "header card list class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct the CardList object from", "is None: npars = 0 else: npars = len(pardata) if parbscales is None:", "(data, header) tuple. \"\"\" if 'header' in extkeys: _gethdr = extkeys['header'] del extkeys['header']", "nx): \"\"\"Wrap the X format column Boolean array into an UInt8 array. input:", "self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err) return _err", "HDUList, indexed by number or name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize =", "NameError, \"Illegal key '%s'.\" % `key` return indx def _unwrapx(input, output, nx): \"\"\"Unwrap", "else: continue # ASCII table, convert numbers to strings if self._coldefs._tbtype == 'TableHDU':", "update the 3rd extension >>> update(file, dat, hdr, 3) # update the 3rd", "constructor. \"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis > 0:", "to .ascard to include the END card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes", "__getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute. The data of random group", "strlen = _len % Card.length return input + ' ' * (Card.length-strlen) def", "_func = lambda x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array)) except: try: #", "name return getattr(self, name) def _setkey(self, val): \"\"\"Set the key attribute, surrogate for", "result else: # verify the equal sign position if self.key not in Card._commentaryKeys", "argument can be the header associated with the data. If the 3rd argument", "exception is raised and the data is not written. Once sufficient data has", "self.__class__ = _Card_with_continue # remove the key/value/comment attributes, some of them may not", "in _commonNames: val = getattr(_cols, cname+'s')[i] if val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1`", "None: if 'header' in keys: header = keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu,", "ImageHDU(data) elif isinstance(data, FITS_rec): hdu = BinTableHDU(data) else: raise KeyError, 'data must be", "npts): if indx < -npts: indx = 0 elif indx < 0: indx", "elif _bool: self._convert[indx] = num.equal(dummy, ord('T')) else: return dummy return self._convert[indx] def _scale_back(self):", "string after column 8. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc", "self._parent.field(indx) desc[:] = 0 # reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0] = _npts", "right place (card %d).\" % insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\"", "integer or a float in fixed or # scientific notation. One for FSC", "_nrows = len(self.data) _ncols = len(self.columns.formats) _format = self.columns.formats # if data is", "mo: name = mo.group(1).rstrip() else: name = '' mo = re_extver.search(self._raw) if mo:", "read the first time, no need to copy, and keep it unchanged else:", "= self.header['NAXIS'] axes = naxis*[0] for j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse()", "to TUNIT keyword null: null value, corresponding to TNULL keyword bscale: bscale value,", "import maketrans import copy import signal import threading # Module variables _blockLen =", "y: 'a'+`y`, dummy) elif name == 'spans': # make sure to consider the", "return result else: if option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if", "scaled fields.\"\"\" for hdu in self: if 'data' in dir(hdu): if isinstance(hdu, (GroupsHDU,", "indx = _iswholeline(key[i], _naxis) offset = offset * _naxis + indx.offset # all", "field named \"XYZ\" and no other field name is a case variant of", "simple = re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo is not None: bitpix =", "def writeHDU(self, hdu): \"\"\"Write *one* FITS HDU. Must seek to the correct location", "= data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields self.data = data self.columns =", "fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else:", "if dtype in _fits2rec.keys(): # FITS format if dtype == 'A': output_format =", "= 1 for j in range(groups,naxis): size = size * self.header['NAXIS'+`j+1`] bitpix =", "in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name in ['TDISP', 'TDIM', 'THEAP']:", "tmp._arrays[i] if _scale: _arr *= bscale if _zero: _arr += bzero hdu.data._convert[i][:n] =", "is None: self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val) self.__dict__['_fixable'] = 0 raise", "if mo: name = mo.group(1).rstrip() else: name = '' mo = re_extver.search(self._raw) if", "output_verify='exception', verbose=0): \"\"\"Close the associated FITS file and memmap object, if any. output_verify:", "in range(len(dummy)): x = _fmt % dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError,", "HDU's while 1: try: hduList.append(ffo._readHDU()) except EOFError: break # check in the case", "\"\"\" if header is None: if 'header' in keys: header = keys['header'] hdu=_makehdu(data,", "AURA and its representatives may not be used to endorse or promote products", "LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "else: raise ValueError, \"Illegal format %s\" % fmt else: if dtype == 'a':", "Card\" % str(card) def _pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert a Card to", "def write(self,data): \"\"\" Write the given data to the stream. :Parameters: data :", "associated FITS file and memmap object, if any. output_verify: output verification option, default", "= self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero = _bzero))", "put the value string in one block and the comment string in another.", "one argument. \"\"\" result = \"\" element = 0 # go through the", "for X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool)", "'== 2', _isInt+\" and val >= 1 and val <= 999\", 1, option,", "after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale:", "except: pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This class is", "= self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:] = pardata[i]", "_numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *' + _digits_NFSC # This", "\"\"\"Get the group parameter values.\"\"\" if isinstance(parName, (int, long)): result = self.field(parName) else:", "of array dimensions'), Card('NAXIS1', 0, 'length of dimension 1'), Card('NAXIS2', 0, 'length of", "columns # this has to be done after the \"regular\" data is written", "# equivalent Ambiguous or conflicting specifications will raise an exception, e.g., >>> getdata('in.fits',", "in range(nmax): try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1] + 1", "# binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] = dummy", "file-reading data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu = hdu self.field = field #", "_key = _card.key if _key == 'END': break else: _cardList.append(_card) _keyList.append(_key) # Deal", "only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize = 1 def _verify (self, option='warn'): _text =", "card.\"\"\" \"\"\" If the keyword already exists, it's value/comment will be updated. If", "and binary table column # format spec, i.e. A7 in ASCII table is", "zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode, the pointer is", "of header it belongs to try: if cards[0].key == 'SIMPLE': if 'GROUPS' in", "the beginning in Solaris. self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self, attr):", "if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for", "= eval(cname) # get the argument's value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card):", "cards[0].key == 'SIMPLE': if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True: self._hdutype =", "len(block) != _blockLen: raise IOError, 'Block length is not %d: %d' % (_blockLen,", "size(self): \"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\" self._file.seek(0, 2)", "%s' % ext2 else: if 'extname' in keys: if 'extver' in keys: ext", "file name @type key: string @param key: keyword name @param ext: The rest", "a HDUList object is returned. \"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct a HDUList", "1]: array = array.copy() if bzero not in ['', None, 0]: array +=", "match if a FITS string, boolean, # number, or complex value is found,", "hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'): _err = _ErrList([], unit='Card') isValid = \"val", "KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 0: if n_ext2", "header provided in the constructor. \"\"\" size = 0 naxis = self.header.get('NAXIS', 0)", "the card image.\"\"\" # for commentary cards, no need to parse further if", "self._mod = 1 def __delitem__(self, key): \"\"\"Delete card(s) with the name 'key'.\"\"\" #", "longstring = '' ncards = self._ncards() for i in range(ncards): # take each", "raise ValueError, \"incorrect array type\" self.header['NAXIS'] = len(axes) # add NAXISi if it", "'' return valu = self._check(option='parse') if name == 'value': if valu is None:", "1. \"\"\" fmt = input_format (repeat, dtype, option) = _parse_tformat(fmt) if reverse ==", "before writing # output = data.byteswapped() else: output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell()", "available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all docstrings in this module. @group", "axes = naxis*[0] for j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes)", "means that the data size cannot be calculated or the 'END' card is", "result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e. everything else) result = None", "names, make # sure to preserve the one-to-one correspondence when updating the list(s).", "Card('XTENSION', '', ''), Card('BITPIX', 8, 'array data type'), Card('NAXIS', 2, 'number of array", "'names']: setattr(_data, attr, getattr(tmp, attr)) for i in range(len(tmp)): tmp._arrays[i] = _data.field(i) return", "BITPIX value (8, 16, 32, 64, -32, or -64) pardata: parameter data, as", "Card from the CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] # update", "self.unit = unit def __str__(self, tab=0): \"\"\"Print out nested structure with corresponding indentations.", "_ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes = [0] + axes elif isinstance(self.data, num.NumArray):", "def scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data by using BSCALE/BZERO. Call", "'columns': _cols = [] _pnames = [] _pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']]", "# Python might evaluate them as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')')", "the specified location. If no \"before\" or \"after\" is specified, it will be", "== 'a': data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] =", "also delete the keylist item def keys(self): \"\"\"Return a list of all keywords", "elif isinstance(input, _TableBaseHDU): hdr = input.header _nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape", "hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize # comment out to avoid circular reference", "'*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs =", "= num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy return self._convert[indx] (_str, _bool,", "None: self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError,", "the key, it is never fixable # always fix silently the case where", "card-with-continue's value is shortened if not isinstance(self, _Hierarch): self.__class__ = Card else: #", "# verify the comment (string), it is never fixable if result is not", "Comment text to be added. before: [same as in update()] after: [same as", "hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a table parent data, just pass it hdu.data._parent.field(i)[:n]", "self._unique[parName] if len(indx) == 1: self.field(indx[0])[:] = value # if more than one", "it to 1 _extver = self[j]._extver if _ver == _extver: found = j", "that the data size cannot be calculated or the 'END' card is not", "blocks = repr(hdu.header.ascard) + _pad('END') blocks = blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen", ">>> getdata('in.fits') By extension number: >>> getdata('in.fits', 0) # the primary header >>>", "if list[i][-1] == 's': list[i]=list[i][:-1] for att in list: if att not in", "out = tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for i in", "nc in range(1, _max+1): if _where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE", "= '(' + _realStr + ', ' + _imagStr + ')' self.__dict__['_valuestring'] =", "for i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option, _err) return _err class", "% repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify the", "Card\" % str(value) def __delitem__(self, key): \"\"\"Delete a Card from the CardList.\"\"\" _key", "1 def count_blanks(self): \"\"\"Find out how many blank cards are *directly* before the", "number of # quotes to be precise. # # Note that a non-greedy", "_AllHDU): raise ValueError, \"%s is not an HDU.\" % item else: if not", "= 0 try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) # 0.6.5.5 def size(self):", "= '' # do the value string valfmt = \"'%-s&'\" val = self.value.replace(\"'\",", "a list of all keyword-value pairs from the CardList.\"\"\" pairs = [] for", "if name == 'value': if valu is None: raise ValueError, \"Unparsable card, fix", "copied.\"\"\" if self.data is not None: _data = self.data.copy() else: _data = None", "range error for BZERO = +32768 self.header.update('BZERO', _zero) else: del self.header['BZERO'] if _scale", "copy(self): tmp = Column(format='I') # just use a throw-away format tmp.__dict__=self.__dict__.copy() return tmp", "== 'T' and randomGroups == 'T': groups = 1 else: groups = 0", "NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "ext2 else: if 'extname' in keys: if 'extver' in keys: ext = ext2['extname'],", "class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr == '_theap': self.__dict__[attr] =", "'' _cols = [] if pardata is None: npars = 0 else: npars", "Also, it does not break at the blank space between words. So it", "None # for the unparsable case if input is None: _tmp = self._getValueCommentString()", "cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append one Column to the", "index from the END, i.e. backward? default=0. If backward = 1, search from", "isinstance(ext[0], Header): header = ext[0] ext = ext[1:] elif not isinstance(ext[0], (int, long,", "for i in range(len(input)): if dtype == 'a': data_output[i] = chararray.array(input[i], itemsize=1) else:", "is not None: test_pos = '_index '+ pos if not eval(test_pos): err_text =", "default=None. before: name of the keyword, or index of the Card before which", "key) del dummy return out # if not a slice, do this because", "value) if before != None or after != None: self.ascard._pos_insert(new_card, before=before, after=after) else:", "of the next level # must present, even it has nothing. for item", "self[0].header if hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend'] = True else: if hdr['naxis']", "associated with 'data', if None, a header of the appropriate type is created", "also close the mm object. try: self.mmobject.close() except: pass def info(self): \"\"\"Summarize the", "need to run _ExtensionHDU.__init__ since it is not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header)", "'update'): print \"flush for '%s' mode is not supported.\" % self.__file.mode return self.update_tbhdu()", "2) # update the 2nd SCI extension >>> update(file, dat, 3, header=hdr) #", "'big': # # byteswap little endian arrays before writing # output = data.byteswapped()", "bzero: BZERO of the data parbscales: list of bscales for the parameters parbzeros:", "range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize the HDU: name,", "_ver == None: found = j nfound += 1 else: # if the", "return indx def _unwrapx(input, output, nx): \"\"\"Unwrap the X format column into a", "\"\"\" output[...] = 0 # reset the output nbytes = ((nx-1) / 8)", "else: _val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ') if", "in self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver = '' if", "ext2['extname'], ext2['extver'] else: ext = ext2['extname'] else: raise KeyError, 'Insufficient keyword argument: %s'", "== 2: if n_ext2 == 0: ext = ext1 else: raise KeyError, 'Redundant/conflicting", "value, before=None, after=None): \"\"\"Add a commentary card. If before and after are None,", "item in hdu: if not isinstance(item, _AllHDU): raise ValueError, \"%s is not an", "'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping from TFORM data", "Card._commentaryKeys and oldkey in Card._commentaryKeys): raise ValueError, 'Regular and commentary keys can not", "__init__(self, cards=[], keylist=None): \"\"\"Construct the CardList object from a list of Cards. cards:", "etc. and the array. Does not support theap yet. \"\"\" def __init__(self, name=None,", "self._size: print 'Warning: File size is smaller than specified data size. File may", "_bool = self._coldefs._recformats[indx][-2:] == _booltype else: _str = self._coldefs.formats[indx][0] == 'A' _bool =", "npts elif indx > npts: indx = npts return indx _start = input.start", "None. \"\"\" self.__file = file if hdus is None: hdus = [] #", "self._valuestring elif isinstance(self.value, complex): if self._valueModified: _tmp = '(' + _floatFormat(self.value.real) + ',", "[] _pnames = [] _pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i in", "in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]]", "after='NAXIS') def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute. The data of", "input: a list of Columns or a ColDefs object. header: header to be", "'section': return Section(self) elif attr == 'data': self.__dict__[attr] = None if self.header['NAXIS'] >", "not found. In the case of a missing 'END' card, the Header may", "of _key in _list if _count == 1: indx = _list.index(_key) elif _count", "for cname in _commonNames: value = getattr(self, cname) if value != None: text", "i in range(len(_cols)): for cname in _commonNames: val = getattr(_cols, cname+'s')[i] if val", "output = data.byteswapped() else: output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc ==", "self._words_group(val, val_len) for i in range(len(val_list)): if i == 0: headstr = \"%-8s=", "-> 'E' _fmt = _fits2rec[fits_fmt] # 'E' -> 'f4' _formats = (_fmt+',') *", "# combine contiguous CONTINUE cards with its parent card if nc > 0:", "created from files # other than FITS, the close() call can also close", "fields.\"\"\" for hdu in self: if 'data' in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU))", "raise ValueError, \"Unparsable card, fix it first with .verify('fix').\" if valu.group('bool') != None:", "0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self, attr): \"\"\"Get the 'data'", "= _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end + 1 _end =", "will be placed before or after the specified location. If no \"before\" or", "corresponding to TDIM keyword \"\"\" # any of the input argument (except array)", "record format (e.g. '3J'->'3i4') recfmt = _convert_format(format) except: try: # legit RecArray format?", "verification option, default = 'exception'. verbose: print out verbose messages? default = 0.", "_extver hdu._new = 0 elif self.__file.mode == 'update': if not self._resize: # determine", "class for all HDU (header data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted", "spec if n_ext1 > 2: raise ValueError, \"too many positional arguments\" elif n_ext1", "output's itemsize of %s\" % (x, _width[indx]) else: self._parent.field(indx)[i] = x if 'D'", "for the supplied data. This argument is optional. @keyword clobber: (optional) if True", "a number/string for cname in _commonNames: value = eval(cname) # get the argument's", "not found.' % `key` else: raise KeyError, 'Illegal key data type %s' %", "file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list class. This is the top-level FITS", "999\", 0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`, None,", "offset = blank_loc[loc-1] + 1 if loc == 0: offset = -1 except:", "in the middle if offset <= xoffset: offset = xoffset + strlen #", "elif xtension == 'IMAGE': self._hdutype = ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype", "\"\"\"Get a keyword value from the CardList. If no keyword is found, return", "cards. try: header = Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) # pass these", "the output nbytes = ((nx-1) / 8) + 1 unused = nbytes*8 -", "elif isinstance(other, ColDefs): b = list(other.data) else: raise TypeError, 'Wrong type of input'", "_list.append(Card('GROUPS', True, 'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters'))", "bool): valStr = '%20s' % `self.value`[0] elif isinstance(self.value , (int, long)): valStr =", "print \"%s:\" % att print ' ', getattr(self, att+'s') #def change_format(self, col_name, new_format):", "record array, or groups data object @param data: data to write to the", "writeto(filename, data, header=None, **keys): \"\"\"Create a new FITS file using the supplied data/header.", "nrows=shape[0]) return hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp =", "base name of the mktemp() output. \"\"\" dirName = os.path.dirname(input) if dirName !=", "for now XXX) _bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file,", "(_scale or _zero): # only do the scaling the first time and store", "parName): \"\"\"Get the group parameter values.\"\"\" if isinstance(parName, (int, long)): result = self.field(parName)", "= None fixable = fix_value is not None # if pos is a", "self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the memmap object, it", "(default) the card will be appended after the last non-commentary card. If =1,", "the old one if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper()", "XXX) _bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims)", "isinstance(other, ColDefs): b = list(other.data) else: raise TypeError, 'Wrong type of input' if", "memory.\"\"\" for i in range(len(self)): if self[i].data is not None: continue def update_tbhdu(self):", "ensure bscale/bzero are numbers if not _scale: bscale = 1 if not _zero:", "and memmap object, if any. output_verify: output verification option, default = 'exception'. verbose:", "extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim =", "hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero: _arr = tmp._arrays[i].copy() else: _arr", "_digits_NFSC # This regex helps delete leading zeros from numbers, otherwise # Python", "IOError, 'Block does not begin with SIMPLE or XTENSION' for i in range(0,", "= self.__file hdu._hdrLoc = _hdrLoc # beginning of the header area hdu._datLoc =", "pos = None, it can be anywhere. If the card does not exist,", "class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with urlretrieve to allow IOError exceptions to", "_item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self, start, end): _hdus = super(HDUList, self).__getslice__(start,end) result", "of the new FITS file to write to @type data: array, record array,", "def __repr__(self): text = '' for cname in _commonNames: value = getattr(self, cname)", "return _SinglePoint(1, indx) elif naxis == 1: return _OnePointAxis(1, 0) else: raise IndexError,", "if (data is DELAYED): return self.data = data # update the header self.update_header()", "value, before=before, after=after) def get_history(self): \"\"\"Get all histories as a list of string", "= val.strip() if len(val) <= 8: val = val.upper() if val == 'END':", "FITS file and memmap object, if any. output_verify: output verification option, default =", "!= 0: self.data += -_zero # 0.9.6.3 to avoid out of range error", "'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if isinstance(input, ColDefs): self.data = [col.copy() for col", "`repeat` output_format = _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes = ((repeat-1) / 8)", "None: if not isinstance(header, Header): raise ValueError, \"header must be a Header object\"", "_TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err) tfields = self.header['TFIELDS']", "The header object associated with the data to be written to the file.", "self._datLoc def _summary(self): return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def verify(self): pass class", "sure the content is written self.__file.flush() return loc def writeHDUdata(self, hdu): \"\"\"Write FITS", "the output file if exists, default = False. \"\"\" if (len(self) == 0):", "# 0.8.8 def _iswholeline(indx, naxis): if isinstance(indx, (int, long)): if indx >= 0", "if (data is not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] =", "# byteswap little endian arrays before writing # output = data.byteswapped() else: output", "_cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where # if not the real CONTINUE card,", "+ ', ' _format = _format[:-2] + ']' _dims = \"%dR x %dC\"", "range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue", "by examples: No extra arguments implies the primary header >>> getdata('in.fits') By extension", "+ _imagStr _valStr = '(' + _realStr + ', ' + _imagStr +", "'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the reverse dictionary of", "self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def __getattr__(self, attr): \"\"\"Get the 'data'", "reading from a # FITS file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self,", "'keyword name %s is not a string' % val self.__dict__['key'] = val def", "%d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = '' return \"%-10s", "the header represents a Primary header, it will be written to the beginning", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "= 0 else: dims = [0]*naxis for i in range(naxis): mo = re_naxisn.search(block,", "# legit RecArray format? recfmt = format format = _convert_format(recfmt, reverse=1) except: raise", "a LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big' output = hdu.data else:", "_convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec to record format spec. Do the opposite", "@param data: the new data used for appending @type header: L{Header} object or", "for _card in self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value) return output def _add_commentary(self,", "in _convert self._convert[indx] = num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx]) if _zero:", "of corresponding attribute values from all Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input:", "super(HDUList, self).__delslice__(i, j) self._resize = 1 def _verify (self, option='warn'): _text = ''", "list is not supplied (as in reading in the FITS file), # it", "# if the input is a list of Columns elif isinstance(input, (list, tuple)):", "keywords missing when trying to read HDU #%d.\\n There may be extra bytes", "the 'END' card is not found. In the case of a missing 'END'", "class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class", "u1 (i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype", "BZERO of the data parbscales: list of bscales for the parameters parbzeros: list", "list class Header: \"\"\"FITS header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a Header from", "energy astrophysics data. For details of the FITS standard, see the NASA/Science Office", "/ 8 return size def copy(self): \"\"\"Make a copy of the HDU, both", "except: continue # skip if there is no match if (keyword in _keyNames):", "= '(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')' valStr =", "in self.data] elif name == '_recformats': if self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt)", "Corrupted cases del self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype,", "r'(?P<real>' + _numr_FSC + ') *, *(?P<imag>' + _numr_FSC + ') *\\))' r')?", "ASCII table if self._coldefs._tbtype == 'TableHDU': _loc = [1] _width = [] for", "def _floatFormat(value): \"\"\"Format the floating number to make sure it gets the decimal", "pass class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu =", "octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>' + _digits_NFSC", "Define new signal interput handler keyboardInterruptSent = False def New_SIGINT(*args): print \"KeyboardInterrupt ignored", "the starting column of # a field may not be the column right", "== 'TableHDU': _formats = '' _itemsize = 0 for i in range(len(tmp)): _formats", "The rest of the arguments are used only for the first case. bitpix:", "http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage, see the I{PyFITS User's Manual} available from", "except KeyError: raise AttributeError(attr) def getfile(self): return self.__file def _readheader(self, cardList, keyList, blocks):", "# Determine how to scale the data # bscale and bzero takes priority", "blanks blank_loc = num.nonzero(arr == ' ')[0] offset = 0 xoffset = 0", "structured access to FITS Group data in a manner analogous to tables \"\"\"", "tables if _number and (_scale or _zero): # only do the scaling the", "%-12s %s\" % \\ (self.name, type, len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns the", "in ColDefs is a list of corresponding attribute values from all Columns. \"\"\"", "do the comment string if self.comment is None: comm = '' else: comm", "try: _index = cards.index_of(keywd) except: _index = None fixable = fix_value is not", "elif isinstance(input, FITS_rec): # input is a FITS_rec tmp = hdu.columns = input._coldefs", "= blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0: raise IOError self.__file.flush() loc", "is no way to communicate back to the _keylist. self._checkKey(self.key) # verify the", "elif naxis > len(key): key = key + (slice(None),) * (naxis-len(key)) offset =", "a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s) information of", "key = key[8:].strip() _index = self.ascard._keylist.index(key) return 1 except: return 0 def rename_key(self,", "super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod = 1 else: raise SyntaxError,", "if hdu.data is not None: # if image, need to deal with byte", "a string, or a tuple of (string, integer). \"\"\" if isinstance(key, (int, slice)):", "through header keywords to pick out column definition keywords dict = [{} for", "except: self.__dict__['value'] = _tmp.strip() elif input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr =", "specify extname, can only have one extension with # that name if _ver", "class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the table header: header", "if hdr['naxis'] == 0: hdr.update('extend', True, after='naxis') else: n = hdr['naxis'] hdr.update('extend', True,", "ext: The rest of the arguments are for extension specification. See L{getdata} for", "_nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) * Card.length _bytes = _bytes", "is None: eqLoc = 7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the card", "object\" if data is DELAYED: # this should never happen if header is", "= rec.RecArray r._coldefs = self._coldefs f = FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f", "j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if not", "if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU)", "type for each extension. @type filename: string @param filename: input FITS file name", "= Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def", "self.update_extend() def index_of(self, key): \"\"\"Get the index of an HDU from the HDUList.", "to a new file. This is a convenience method to provide a user", "the HDU, will be the value of the keywod EXTNAME, default=None. \"\"\" #", "names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale", "_unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy return self._convert[indx] (_str, _bool, _number, _scale, _zero,", "provide a user easier output interface if only one HDU needs to be", "_isInt+\" and val >= 0 and val <= 999\", 0, option, _err) tfields", "should not end with two single quotes, # whereas it should not end", "there is one single word which is longer than strlen, then it will", "'number of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT')", "object. try: self.mmobject.close() except: pass def info(self): \"\"\"Summarize the info of the HDU's", "a user easier output interface if only one HDU needs to be written", "if (j == 0): _after = 'naxis' else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`,", "# Convenience functions class _Zero(int): def __init__(self): self = 0 def _getext(filename, mode,", "num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big' if (self._bzero != 0 or self._bscale !=", "blocks of header, and put each card into a list of cards. Will", ">>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs are not case", "self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim == 0: dim =", "is created @type filename: string @param filename: name of the file to append", "info(filename): \"\"\"Print the summary information on a FITS file. This includes the name,", "= '' return size, name def setupHDU(self): \"\"\"Read one FITS HDU, data portions", "for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format a list of", "string if it is not the length of a card image (80 columns).", "string valfmt = \"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len) for", "= 'Fixed card to be FITS standard.: %s' % self.key else: self.__dict__['_err_text'] =", "comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value, comment)) self._mod = 1 def add_history(self,", "naxis) and (indx.step == 1): return _WholeLine(naxis, 0) else: if indx.step == 1:", "del self.header['BZERO'] def update_header(self): \"\"\"Update the header keywords to agree with the data.\"\"\"", "because there is no guarantee # the elements in the object array are", "Notes ----- Only the amount of data specified in the header provided to", "the case that the starting column of # a field may not be", "type=tmp._arrays[i].type()) if _scale or _zero: _arr = tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if", "standard (equal sign not at column 8).' raise ValueError, self._err_text, '\\n%s' % self._cardimage", "for the data part of the random group, # since binary table does", "header.' % newkey _index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index]", "table HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the table", "exception is raised. \"\"\" if self.writeComplete: raise IOError, \"The stream is closed and", "the HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc = self.__file.tell() # Read the first", "= \"Card('%s', %s)\" % (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card)", "else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: # Check for numbers with", "value: if not isinstance(value, str): raise TypeError, 'bad value type' value = value.upper()", "the HDU to a new file. This is a convenience method to provide", "card will be created and it will be placed before or after the", "'data': # same code as in _TableBaseHDU size = self.size() if size: self._file.seek(self._datLoc)", "__setitem__(self, key, value): \"\"\"To make sure the new item has consistent data type", "Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ') if real.group('sign') == None: _val = eval(_rdigt)", "= hdu.data._heapsize + hdu.data._gap if _pcount > 0: hdu.header['PCOUNT'] = _pcount # update", "is no comment if self.key in Card._commentaryKeys: if not isinstance(self.value, str): raise ValueError,", "% fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec to record", "else: if dtype == 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X", "isinstance(self, _Hierarch): valStr = valStr.strip() # comment string if keyStr.strip() in Card._commentaryKeys: #", "next level items, each of the next level # must present, even it", "str): while 1: try: del self.ascard[key] self._mod = 1 except: return # for", "type=num.Float32) else: # floating point cases if self._ffile.memmap: self.data = raw_data.copy() # if", "formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5]", "file is read the first time, no need to copy, and keep it", "The rest of the arguments are for extension specification. They are flexible and", "mode self.memmap = memmap if memmap and mode not in ['readonly', 'copyonwrite', 'update']:", "hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns # populate data to", "if not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" % item # second time go", "add_blank(self, value='', before=None, after=None): \"\"\"Add a blank card. value: Text to be added.", "True, option, _err) return _err # --------------------------Table related code---------------------------------- # lists of column/field", "# calculate PCOUNT, for variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP',", "the TableHDU, ImageHDU, and BinTableHDU classes. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset,", "fmt: code, width, prec = fmt.group('code', 'width', 'prec') else: raise ValueError, valu size", "constructor _card = \"Card('%s', %s)\" % (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" %", "ValueError, \"incorrect array type\" self.header['NAXIS'] = len(axes) # add NAXISi if it does", "self).__delslice__(i, j) self._resize = 1 def _verify (self, option='warn'): _text = '' _err", "first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new) card", "type=_type) dummy[:] = ASCIITNULL self._convert[indx] = dummy for i in range(len(self._parent)): if self._parent.field(indx)[i].strip()", "in _commonNames: attr = [''] * len(self) for i in range(len(self)): val =", "__init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data, either", "class _Group(rec.Record): \"\"\"One group of the random group data.\"\"\" def __init__(self, input, row=0):", "\"number `%s` does not fit into the output's itemsize of %s\" % (x,", "def setpar(self, fieldName, value): \"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU):", "else: _data = None return self.__class__(data=_data, header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write", "XXX need to consider platform dependence of the format (e.g. E-009 vs. E-09)", "0 data_output = _VLF([None]*len(input)) data_output._dtype = dtype if dtype == 'a': _nbytes =", "name == 'value': self._setvalue(val) elif name == 'comment': self._setcomment(val) else: raise AttributeError, name", "size else: strfmt = '>' + strfmt[:-1] return strfmt ''' def _verify(self, option='warn'):", "after: name of the keyword, or index of the Card after which the", "blank cards? default=1. If useblanks != 0, and if there are blank cards", "'%' _fmt = ' '*_lead + _pc + _format[1:] + _dict[_format[0]] + '", ":Parameters: name : string The name of the file to which the header", "== 's': list[i]=list[i][:-1] for att in list: if att not in _commonNames: print", "Card(key, value, comment) self.ascard._pos_insert(_card, before=before, after=after) else: self.ascard.append(Card(key, value, comment)) self._mod = 1", "a match if a FITS string, boolean, # number, or complex value is", "None return result else: # verify the equal sign position if self.key not", "longstring + _val elif name == 'comment': _comm = _card.comment if isinstance(_comm, str)", "_gcount = ' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount =", "shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale by TSCAL and TZERO if _scale or", "LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big' output = hdu.data else: output", "return hduList fitsopen = open # Convenience functions class _Zero(int): def __init__(self): self", "binary form must reproduce the above copyright notice, this list of conditions and", "by indexing or by the keyword name.\"\"\" _key = self.index_of(key) return super(CardList, self).__getitem__(_key)", "!= 'big': coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes > 1: # do the", "by comma(s). \"\"\" if attrib.strip().lower() in ['all', '']: list = _commonNames else: list", "in case of required cards in wrong order. if isinstance(self, _ExtensionHDU): firstkey =", "(_str, _bool, _number, _scale, _zero, bscale, bzero) def field(self, key): \"\"\"A view of", "(card, Card): nc = len(self) - self._blanks i = nc - 1 if", "\"\"\"Delete (the definition of) one Column.\"\"\" indx = _get_index(self.names, col_name) for cname in", "= Header(_list) if (data is not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize", "try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is not recognized.'", "numarray or table data.' else: hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename, data, header=None,", "and binary tables if _number and (_scale or _zero): # only do the", "\"'\", valu.group('strg')) elif valu.group('numr') != None: # Check for numbers with leading 0s.", "the last non-blank card. \"\"\" if isinstance (card, Card): nc = len(self) -", "str): _parse = pos.split() if _parse[0] in ['>=', '==']: insert_pos = eval(_parse[1]) #", "/ 8 if simple and not groups: name = 'PRIMARY' else: name =", "sure the new item has consistent data type to avoid misalignment. \"\"\" if", "specified, it will be appended at the end. key: keyword name value: keyword", "change_name(self, col_name, new_name): \"\"\"Change a Column's name.\"\"\" if new_name != col_name and new_name", "# 'E' -> 'f4' _formats = (_fmt+',') * npars data_fmt = '%s%s' %", "'E':'E', 'D':'E'} # calculate the starting point and width of each field for", "Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards = cards # if the key list", "len(keyStr + eqStr + valStr) > Card.length: raise ValueError, \"The keyword %s with", "use self.key commentStr = '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in [None,", "data: data to be used name: name to be populated in EXTNAME keyword", "to the file (for append and update modes only). output_verify: output verification option,", "attribute (in the commonName list) of a Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self,", "if not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber = keys.get('clobber', False) hdu.writeto(filename,", "[same as in update()] \"\"\" self._add_commentary(' ', value, before=before, after=after) def get_history(self): \"\"\"Get", "output_verify, clobber=clobber) def _verify(self, option='warn'): _err = _ErrList([], unit='Card') isValid = \"val in", "is at the beginning in Solaris. self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0) def", "GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data =", "endian arrays before writing # output = data.byteswapped() else: output = data output.tofile(self._ffo.getfile())", "i in range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n' return output[:-1] def _extractValueComment(self, name):", "range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap the X format", "def copy(self): tmp = Column(format='I') # just use a throw-away format tmp.__dict__=self.__dict__.copy() return", "isinstance (card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) # update the keylist self.count_blanks()", "self._hdutype return tmp def _strip(self): \"\"\"Strip cards specific to a certain kind of", "= 0 return hduList fitsopen = open # Convenience functions class _Zero(int): def", "attribute of the column definitions.\"%att continue print \"%s:\" % att print ' ',", "*ext, **extkeys): \"\"\"Get a keyword's value from a header in a FITS file.", "None, it can be anywhere. If the card does not exist, the new", "None: parbscales = [None]*npars if parbzeros is None: parbzeros = [None]*npars if bitpix", "still a \"view\" (for now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list)", "copy.deepcopy(self._convert) return f def _clone(self, shape): \"\"\"Overload this to make mask array indexing", "a list of Columns\" def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname = name[:-1]", "def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU self.name =", "_re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value into data type and width.", "for all the cards you need in the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header)", "header.copy() # # Check if the file already exists. If it does not,", "0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: #", "#_update(keyword, val) _append(Card(keyword, val)) def copy(self): \"\"\"Make a copy of the table HDU,", "card. \"\"\" if isinstance (card, Card): nc = len(self) - self._blanks i =", "already exist, but the provided header represents a Primary header, the header will", "precedence over `after' if both specified. default=None. after: name of the keyword, or", "_val[:-1] longstring = longstring + _val elif name == 'comment': _comm = _card.comment", "0 # reset the output nbytes = ((nx-1) / 8) + 1 unused", "= _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start = _where # if not the", "HDU is accessed. \"\"\" def _getname(self): \"\"\"Get the extname and extver from the", "A module for reading and writing Flexible Image Transport System (FITS) files. This", "_err class _Hierarch(Card): \"\"\"Cards begins with HIERARCH which allows keyword name longer than", "0: hdr.update('extend', True, after='naxis') else: n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self,", "exists, default = False. \"\"\" if (len(self) == 0): print \"There is nothing", "are not actually read here, but the beginning locations are computed. \"\"\" _cardList", "'Illegal slice %s, stop < start.' % input _step = input.step if _step", "name == 'comment': self.__dict__['comment'] = '' if valu is not None: _comm =", "does not exist and the provided header is not a Primary header, a", "forgives plurals and blanks. If there are two or more attribute names, they", "supplied (as in reading in the FITS file), # it will be constructed", "and _cardimage attributes are missing, # to avoid infinite loops if not (self.__dict__.has_key('value')", "name, dimensions, and formats.\"\"\" class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if data", "/ 8) + 1 # use an array, even if it is only", "Card('TFIELDS', 0, 'number of table fields') ]) if header is not None: #", "XTENSION' for i in range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key", "_zero, bscale, bzero) def field(self, key): \"\"\"A view of a Column's data as", "and exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC =", "!= 'big': coldata2.byteswap() coldata2._byteorder = 'big' # In case the FITS_rec was created", "_after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after) # delete extra NAXISi's for", "data attribute.\"\"\" if attr == 'section': return Section(self) elif attr == 'data': self.__dict__[attr]", "filename: string @param filename: input FITS file name @type key: string @param key:", "' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = '' return", "be a primary HDU if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)): err_text", "def _getsize(self, block): \"\"\"Get the size from the first block of the HDU.\"\"\"", "if backward: _indx = len(_keylist) - _indx - 1 return _indx except: raise", "keyword is found, the value to be returned. \"\"\" try: return self[key] except:", "class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a Header from a CardList. cards: A list", "\"\"\" # keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr =", "' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long string value/comment into CONTINUE", "card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is", "value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'BINTABLE' hdr = self.header if", "self.value # XXX need to consider platform dependence of the format (e.g. E-009", "'_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def", "construct Column\" # scale the array back to storage values if there is", "# decide which kind of header it belongs to try: if cards[0].key ==", "bytes) of the HDU's data part.\"\"\" self._file.seek(0, 2) return self._file.tell() - self._datLoc def", "in binary form must reproduce the above copyright notice, this list of conditions", "self._Formats = self.formats if len(self) == 1: dummy = [] else: dummy =", "plurals and blanks. If there are two or more attribute names, they must", "\"\"\"Add a blank card. value: Text to be added. before: [same as in", "valStr = '' # conserve space for HIERARCH cards if isinstance(self, _Hierarch): valStr", "self._cardimage elif option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option ==", "else: data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset _offset +=", "dims = [] if not isinstance(key, tuple): key = (key,) naxis = self.hdu.header['NAXIS']", "only specify extname, can only have one extension with # that name if", "be allowed) to insert. The new card will be inserted before it. card:", "random group data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self, input, row) def par(self, fieldName):", "= num.array(array) except: try: # then try to conver it to a strings", "len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break # combine contiguous CONTINUE cards", "an HDUList object. name: Name of the FITS file to be opened. mode:", "ext = ext[1:] elif not isinstance(ext[0], (int, long, str, tuple)): raise KeyError, 'Input", "# fix it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate", "_VLF): for i in coldata: if not isinstance(i, chararray.CharArray): if i._type.bytes > 1:", "group data object @param data: the new data used for appending @type header:", "full so pad the data to the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete", "longer be written\" curDataSize = self._ffo.getfile().tell() - self._datLoc if curDataSize + data.itemsize()*data._size >", "_dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if the string", "self._coldefs.starts[indx] - _loc[indx] if _lead < 0: raise ValueError, \"column `%s` starting point", "'TableHDU': _loc = [1] _width = [] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1])", "file. This includes the name, type, length of header, data shape and type", "in range(len(self)): if self[i].data is not None: continue def update_tbhdu(self): \"\"\"Update all table", "a FITS record array from a RecArray.\"\"\" # input should be a record", "no keyword is found, the value to be returned. \"\"\" try: return self[key]", "GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of", "containing arrays else: if isinstance(value, (list, tuple)) and len(indx) == len(value): for i", "original file if self._resize: oldName = self.__file.name oldMemmap = self.__file.memmap _name = _tmpName(oldName)", "= None self.__dict__[attr] = data elif attr == 'columns': _cols = [] _pnames", "raise _name, \"exists\" class VerifyError(exceptions.Exception): \"\"\"Verify exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors", "self.__format_RE.match(valu) if fmt: code, width, prec = fmt.group('code', 'width', 'prec') else: raise ValueError,", "class Card(_Verify): # string length of a card length = 80 # String", "shortened if not isinstance(self, _Hierarch): self.__class__ = Card else: # does not support", "the stream to satisfy the amount specified in the header, the stream is", "tuple(_shape) _format = _format[_format.rfind('.')+1:] # if data is not touched yet, use header", "float \"\"\" _hdr = getheader(filename, *ext, **extkeys) return _hdr[key] def _makehdu(data, header): if", "source code must retain the above copyright notice, this list of conditions and", "can take one HDU, as well as a list of HDU's as input", "be multiple of 80.\"\"\" _len = len(input) if _len == Card.length: return input", "Card): nc = len(self) - self._blanks i = nc - 1 if not", "if not isinstance(_key, str): raise KeyError, key _key = (_key.strip()).upper() nfound = 0", "is a commentary card. \"\"\" # no equal sign for commentary cards (i.e.", "= value[i] else: raise ValueError, \"parameter value must be a sequence with %d", "# This will not be the first extension in the file so we", "resized.\" break # Data: if 'data' not in dir(hdu): continue if hdu.data is", "scale the data # bscale and bzero takes priority if (bscale != 1", "cells with zeros or blanks if = 0, copy the data from input,", "pos) pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size =", "a slice of HDUs from the HDUList, indexed by number only.\"\"\" super(HDUList, self).__delslice__(i,", "data part.\"\"\" self._file.seek(0, 2) return self._file.tell() - self._datLoc def _summary(self): return \"%-10s %-11s\"", "use numarray attribute format, (e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None, use", "self._max = max(self._max, len(value)) class Column: \"\"\"Column class which contains the definition of", "valu.group('strg') != None: _val = re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') != None: #", "self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value) return output def _add_commentary(self, key, value, before=None,", "xoffset: offset = xoffset + strlen # collect the pieces in a list", "new card will be inserted before it. card: The Card to be inserted.", "into data type and width. try: (dtype, width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype]", "[1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`, dummy) elif name == 'spans': #", "_scale_back(self): \"\"\"Update the parent array, using the (latest) scaled array.\"\"\" _dict = {'A':'s',", "data type %s' % type(key) def copy(self): \"\"\"Make a (deep)copy of the CardList.\"\"\"", "'%s' card.\" % keywd if fixable: # use repr to accomodate both string", "if data is touched, use data info. if 'data' in dir(self): if self.data", "ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] = _coldefs", "A number sub-string, either an integer or a float in fixed or #", "into the output's itemsize of %s\" % (x, _width[indx]) else: self._parent.field(indx)[i] = x", "hduList fitsopen = open # Convenience functions class _Zero(int): def __init__(self): self =", "\"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val == 2', 2,", "hdu._ffile = self return hdu def writeHDU(self, hdu): \"\"\"Write *one* FITS HDU. Must", "it's defined (in the case of reading from a # FITS file) self.data", "table definition keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the", "if self.starts[i] is '': self.starts[i] = last_end + 1 _end = self.starts[i] +", "long)): _start = _normalize(_start, naxis) else: raise IndexError, 'Illegal slice %s, start must", "keyword EXTVER does not exist, default it to 1 _extver = self[j]._extver if", "# must be before int checking since bool is also int elif isinstance(self.value", "implementation, it will put the value string in one block and the comment", "key + (slice(None),) * (naxis-len(key)) offset = 0 for i in range(naxis): _naxis", "very usable after the call. type (string): destination data type, use numarray attribute", "!= hdu._datSpan: self._resize = 1 if verbose: print \"One or more data area", "self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err) self.req_cards('GROUPS',", "the file and the provided header will be added as the first extension.", "1: pass elif self._dtype == 'a': value = chararray.array(value, itemsize=1) else: value =", "TZERO keyword disp: display format, corresponding to TDISP keyword start: column starting position", "a manner analogous to tables \"\"\" def __init__(self, input=None, bitpix=None, pardata=None, parnames=[], bscale=None,", "ascardimage. \"\"\" # keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr", "input should be a record array self.__setstate__(input.__getstate__()) # _parent is the original (storage)", "order. _commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames", "hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete the original", "END. \"\"\" if isinstance (card, Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) # update", "value, before=None, after=None): \"\"\"Add a COMMENT card. value: Comment text to be added.", "The new card will be inserted before it. card: The Card to be", "'minmax': if isinstance(_type, num.FloatingType): _scale = 1 _zero = 0 else: # flat", "= hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver = 1 hdu._file = self.__file hdu._hdrLoc", "is not the correct type.\" if data._byteorder != 'big': # # byteswap little", "index of the Card before which the new card will be placed. The", "_err def req_cards(self, keywd, pos, test, fix_value, option, errlist): \"\"\"Check the existence, location,", "\"update/append\" mode # CardList needs its own _mod attribute since it has methods", "0) if naxis < 1000: for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`,", "\"\"\"Locate the equal sign in the card image before column 10 and return", "GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames)", "0 if dim > nrows: nrows = dim if tbtype == 'TableHDU': _formats", "!= self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment = 'binary table extension' class StreamingHDU: \"\"\"", "dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data =", "class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0): if mode not in _python_mode.keys(): raise \"Mode", "in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 if singleThread: if keyboardInterruptSent: raise", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "!= '': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary cards, value can only be", "'' if valu is not None: _comm = valu.group('comm') if isinstance(_comm, str): self.__dict__['comment']", "= errlist fix = '' cards = self.header.ascard try: _index = cards.index_of(keywd) except:", "else: # if the supposed location is specified if pos is not None:", "*)' r'(?P<comm>.*)' r')?$') # keys of commentary cards _commentaryKeys = ['', 'COMMENT', 'HISTORY']", "re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo = re_bitpix.search(block) if", "nrows == 0: for arr in tmp._arrays: if arr is not None: dim", "str) in case of control character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal", "commentary cards, value can only be strings and there # is no comment", "keys can not be renamed to each other.' elif (force == 0) and", "if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input): \"\"\"Fix the card image", "end, even if there are blank cards in front of END. \"\"\" if", ">= 0 and indx < naxis: if naxis > 1: return _SinglePoint(1, indx)", "valStr = str(self.value) # put all parts together output = keyStr + eqStr", "in hdu: if not isinstance(item, _AllHDU): raise ValueError, \"%s is not an HDU.\"", "if the string value can fit in one line. # Instead, just truncate", "break at the blank space between words. So it may not look pretty.", "image and return the string before the equal sign. If there is no", "how to scale the data # bscale and bzero takes priority if (bscale", "option, _err) _after = self.header['NAXIS'] + 3 # if the card EXTEND exists,", "_formats += 'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1],", "If there are two or more attribute names, they must be separated by", "1: # find the END card mo = end_RE.search(block) if mo is None:", "FITS block and no more data will be accepted. An attempt to write", "for card in self: block = block + repr(card) return block def __str__(self):", "'Attribute %s not defined.' % name self.__dict__[name] = attr return self.__dict__[name] \"\"\" #", "elif isinstance(value, chararray.CharArray) and value.itemsize() == 1: pass elif self._dtype == 'a': value", "put each card into a list of cards. Will deal with CONTINUE cards", "oldMemmap = self.__file.memmap _name = _tmpName(oldName) _hduList = open(_name, mode=\"append\") if (verbose): print", "def update(filename, data, *ext, **extkeys): \"\"\"Update the specified extension with the input data/header.", "to be more than one 80-char \"physical\" cards. _max = _keyList.count('CONTINUE') _start =", "for i in range(ncards): # take each 80-char card as a regular card", "_ncols = self.header['TFIELDS'] _format = '[' for j in range(_ncols): _format += self.header['TFORM'+`j+1`]", "if self._coldefs._tbtype == 'TableHDU': _loc = [1] _width = [] for i in", "extension number: >>> getdata('in.fits', 0) # the primary header >>> getdata('in.fits', 2) #", "the delayed data for i in range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr, Delayed):", "useblanks != 0, and if there are blank cards directly before END, it", "array of shape (s, nx) nx: number of bits \"\"\" pow2 = [128,", "of cards into a printable string.\"\"\" kard = self._cardimage output = '' for", "\"\"\" val_len = 67 comm_len = 64 output = '' # do the", "naxis): \"\"\"Set the slice's start/stop in the regular range.\"\"\" def _normalize(indx, npts): if", "= bzero not in ['', None, 0] # ensure bscale/bzero are numbers if", "Image Transport System (FITS) files. This file format was endorsed by the International", "a header. :Parameters: name : string The name of the file to which", "_err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\" _dict = {8:'B', 16:'I', 32:'J',", "name @param ext: The rest of the arguments are for extension specification. See", "result.group('comm') if _str is not None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct a Card", "% key else: # multiple match raise NameError, \"Ambiguous key name '%s'.\" %", "+ _numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>'", "size cannot be calculated or the 'END' card is not found. In the", "sliced FITS_rec will view the same scaled columns as # the original dummy", "to the stream. :Parameters: data : NumArray Data to stream to the file.", "else: repeat = eval(repeat) return (repeat, dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS", "indexed by number or name.\"\"\" key = self.index_of(key) _item = super(HDUList, self).__getitem__(key) if", "nfound += 1 else: # if the keyword EXTVER does not exist, default", "with the field method), it will try to match the exact name first,", "getdata('in.fits', 0) # the primary header >>> getdata('in.fits', 2) # the second extension", "% array array._dtype = recfmt._dtype else: raise ValueError, \"Data is inconsistent with the", "it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__", "isinstance(parName, (int, long)): result = self.field(parName) else: indx = self._unique[parName.lower()] if len(indx) ==", "<= 999\", 0, option, _err) naxis = self.header.get('NAXIS', 0) if naxis < 1000:", "__str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard def items(self): \"\"\"Return", "is to speed up the open. Any header will not be initialized till", "since there is no way to communicate back to the _keylist. self._checkKey(self.key) #", "= head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from the", "used for all docstrings in this module. @group Header-related Classes: Card, CardList, _Card_with_continue,", "% _expValStr valStr = '%-20s' % valStr # must be before int checking", "self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in [None, '']: commentStr = '' else: commentStr", "range(len(indx)): self.field(indx[i])[:] = value[i] else: raise ValueError, \"parameter value must be a sequence", "= \"CONTINUE '&' / \" + commfmt % i output = output +", "isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct", "raise NameError, \"Illegal key '%s'.\" % `key` return indx def _unwrapx(input, output, nx):", "data. \"\"\" if attr == 'data': # same code as in _TableBaseHDU size", "%s\" % fmt else: if dtype == 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype,", "return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if not", "name == 'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip() # drop the ending \"&\"", "will be in the order of NAXIS's which is the # reverse of", "= len(tmp._arrays[i]) n = min(size, nrows) if fill: n = 0 (_scale, _zero,", "'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max else: val = _convert_format(val, reverse=1)", "and maximum of the data to scale. The option will be overwritten by", "key: keyword name value: keyword value (to be used for updating) comment: keyword", "+ \"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1] print results def open(name, mode=\"copyonwrite\", memmap=0):", "data in this HDU.' if _gethdr: _hdr = hdu.header hdulist.close() if _gethdr: return", "'unfixable': _text = \"Unfixable error: %s\" % _text else: exec(fix) #if option !=", "string %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _checkKey(self, val):", "methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and _card.key != 'CONTINUE': raise", "of null strings.\"\"\" for cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields)", "Columns or a ColDefs object. header: header to be used to populate the", "= super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def", "case of a missing 'END' card, the Header may also contain the binary", "append(self, hdu): \"\"\"Append a new HDU to the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList,", "# insert the keywords EXTEND if header is None: dim = `self.header['NAXIS']` if", "= key _ver = None if not isinstance(_key, str): raise KeyError, key _key", "r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC + ')", "corrupt the original array if bzero not in ['', None, 0] or bscale", "of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo", "# if image, need to deal with byte order if isinstance(hdu, _ImageBaseHDU): if", "# update TFORM for variable length columns for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i],", "# --------------------------Table related code---------------------------------- # lists of column/field definition common names and keyword", "_File): _data._byteorder = 'big' # pass datLoc, for P format _data._heapoffset = hdu._theap", "the header provided in the constructor. \"\"\" size = 0 naxis = self.header.get('NAXIS',", "<= Card.length: output = \"%-80s\" % output # longstring case (CONTINUE card) else:", "width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr = input.header _nfields", "append(filename, data, header=None): \"\"\"Append the header/data to FITS file if filename exists, create", "or by the keyword name.\"\"\" _key = self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self,", "source and binary forms, with or without modification, are permitted provided that the", "of the input data does not match what is expected by the header,", "!= 1: self.data /= _scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if self.data._type !=", "== 0: ext = ext1[0] else: if isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting", "specifications will raise an exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an", "for hdu in self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver =", "def close(self, output_verify='exception', verbose=0): \"\"\"Close the associated FITS file and memmap object, if", "hdu.header hdulist.close() return hdr def getdata(filename, *ext, **extkeys): \"\"\"Get the data from an", "unchanged else: self.header = header else: # construct a list of cards of", "has the proper value. \"\"\" hdr = self[0].header if hdr.has_key('extend'): if (hdr['extend'] ==", "a default PrimaryHDU to the file before writing the # given header. #", "else: raise TypeError, 'Wrong type of input' if option == 'left': tmp =", "record array from a RecArray.\"\"\" # input should be a record array self.__setstate__(input.__getstate__())", "card self._blanks = 0 self.count_blanks() def __getitem__(self, key): \"\"\"Get a Card by indexing", "if isinstance(hdu._ffile, _File): _data._byteorder = 'big' # pass datLoc, for P format _data._heapoffset", "structure with corresponding indentations. A tricky use of __str__, since normally __str__ has", "the group data itself (a numarray) or a record array (FITS_rec) which will", "> 1: if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes >", "1 mo = re_pcount.search(block) if mo is not None: pcount = int(mo.group(1)) else:", "_format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] - _loc[indx] if _lead < 0: raise", "name. \"\"\" oldkey = oldkey.strip().upper() newkey = newkey.strip().upper() if newkey == 'CONTINUE': raise", "size = 1 for j in range(groups,naxis): size = size * self.header['NAXIS'+`j+1`] bitpix", "TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT, for", "!= data.type(): raise TypeError, \"Supplied data is not the correct type.\" if data._byteorder", "value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu =", "_val = UNDEFINED self.__dict__['value'] = _val if '_valuestring' not in self.__dict__: self.__dict__['_valuestring'] =", "in comm_list: commstr = \"CONTINUE '&' / \" + commfmt % i output", "formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder = 'big' # pass datLoc, for", "self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err =", "shape=dims) raw_data._byteorder = 'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\"", "valu.group('cplx') != None: # Check for numbers with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real'))", "if option == 'ignore': return elif option == 'parse': # check the value", "self[key] except: return default def update(self, key, value, comment=None, before=None, after=None): \"\"\"Update one", "any. output_verify: output verification option, default = 'exception'. verbose: print out verbose messages?", "tform if repeat == '': repeat = 1 else: repeat = eval(repeat) return", "_card in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue #", "self.field(indx[0]) # if more than one group parameter have the same name else:", "the column definitions.\"%att continue print \"%s:\" % att print ' ', getattr(self, att+'s')", "will overwrite the file. Default is False. \"\"\" if header is None: if", "string @param filename: name of the new FITS file to write to @type", "_list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) if", "this HDUList.\"\"\" if self.__file is None: _name = '(No file associated with this", "itemsize=eval(recfmt[1:])) # then try variable length array except: if isinstance(recfmt, _FormatP): try: _func", "# calculate the starting point and width of each field for ASCII table", "not implemented for mode `%s`.\" % mode else: if os.path.splitext(self.name)[1] == '.gz': #", "if not isinstance(value, str): raise TypeError, 'bad value type' value = value.upper() if", "the EXTEND keyword is in primary HDU if there is extension if len(self)", "if not eval(test_pos): err_text = \"'%s' card at the wrong place (card %d).\"", "mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size = abs(bitpix) * gcount", "or a record array (FITS_rec) which will contain both group parameter info and", "data is not touched yet, use header info. else: _shape = () _nrows", "key, hdu): \"\"\"Set an HDU to the HDUList, indexed by number or name.\"\"\"", "re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the table header:", "pieces in a list tmp = input[xoffset:offset] list.append(tmp) if len(input) == offset: break", "try: indx = nameList.index(key.rstrip()) except ValueError: # try to match case-insentively, _key =", "to avoid infinite loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = '' #", "isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None: hdu.data._scale_back() if isinstance(hdu, _TableBaseHDU) and", "argument `before' takes precedence over `after' if both specified. default=None. after: name of", "the end. \"\"\" new_card = Card(key, value) if before != None or after", "data ASCIITNULL = 0 # value for ASCII table cell with value =", "0 self.count_blanks() def __getitem__(self, key): \"\"\"Get a Card by indexing or by the", "self._keylist[_key] # update the keylist self.count_blanks() self._mod = 1 def count_blanks(self): \"\"\"Find out", "appended after the last non-commentary card. If =1, the card will be appended", "Corrupted HDU class.\"\"\" \"\"\" This class is used when one or more mandatory", "_SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu = hdu def", "(dtype, width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width == '': width =", "unique mapping. If there is a field named \"XYZ\" and no other field", "will be delayed for col in range(_nfields): dict[col]['array'] = Delayed(input, col) # now", "len(indx) == 1: result = self.field(indx[0]) # if more than one group parameter", "already exist, it will be created and if the header represents a Primary", "the scaling the first time and store it in _convert self._convert[indx] = num.array(dummy,", "key): \"\"\"Get an HDU from the HDUList, indexed by number or name.\"\"\" key", "ImageHDU(data, header) f = open(filename, mode='update') f.append(hdu) f.close() def update(filename, data, *ext, **extkeys):", "of range.' % indx elif isinstance(indx, slice): indx = _normalize_slice(indx, naxis) if (indx.start", "0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims", "ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card image from the attributes: key, value, and", "'TableHDU': for i in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if width is None:", "new table for i in range(len(tmp)): if tmp._arrays[i] is None: size = 0", "2'), Card('PCOUNT', 0, 'number of group parameters'), Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS',", "complete FITS block and no more data will be accepted. An attempt to", "%d: %d' % (_blockLen, len(block)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise", "if size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs = self.columns else: data = None", "0) else: raise IndexError, 'Index %s out of range.' % indx elif isinstance(indx,", "in indx[1:]: result += self.field(i) return result def setpar(self, parName, value): \"\"\"Set the", "key) if isinstance(key, slice): out = tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert =", "Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage, see the", "0.6.5.5 def size(self): \"\"\"Size (in bytes) of the data portion of the HDU.\"\"\"", "self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt", "numbers if self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type =", "_cardList = [] _keyList = [] blocks = self._raw if (len(blocks) % _blockLen)", "integer.' % input return slice(_start, _stop, _step) class _KeyType: def __init__(self, npts, offset):", "def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a card's keyword in the header. oldkey:", "= 0 else: self.writeComplete = 1 def write(self,data): \"\"\" Write the given data", "be used in \"update/append\" mode # CardList needs its own _mod attribute since", "_key == 'END': break else: _cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE cards #", "input: a sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max = 0 def", "extension specification(s). Header and extension specs can also be keyword arguments. For example:", "0.6.5.5 def size(self): \"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\"", "raise EOFError hdu = _TempHDU() hdu._raw = '' # continue reading header blocks", "= self.header.get('NAXIS', 0) if naxis > 0: simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F')", "search # to avoid starting at the same CONTINUE card else: _start =", "Card): _key = self.index_of(key) # only set if the value is different from", "class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if data is touched, use data", "_zero: bzero = 0 return (_str, _bool, _number, _scale, _zero, bscale, bzero) def", "new '%s' card.\" % keywd if fixable: # use repr to accomodate both", "= \"HDUList's element %s is not an extension HDU.\" % `i` _text =", "_data = hdu.data except IndexError: raise IndexError, 'No data in this HDU.' if", "len(input) # check for one word longer than strlen, break in the middle", "self['TFIELDS'] del self['NAXIS'] for i in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del", "array type\" self.header['NAXIS'] = len(axes) # add NAXISi if it does not exist", "numarray typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8,", "_out # make a copy if scaled, so as not to corrupt the", "an array.\"\"\" indx = _get_index(self._coldefs.names, key) if (self._convert[indx] is None): # for X", "if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name)", "self: # Header: # Add 1 to .ascard to include the END card", "temp file\", _name for hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close()", "input else: return input + ' ' * (Card.length-strlen) # minimum length is", "self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len) for i in range(len(val_list)): if i ==", "if i._type.bytes > 1: if i._byteorder != 'big': i.byteswap() i._byteorder = 'big' else:", "= chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big'", "@group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU,", "\"data\" array of data type dtype. The descriptor location will have a zero", "the first case. bitpix: data type as expressed in FITS BITPIX value (8,", "image dimensions, reverse the order of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes = naxis*[0]", "!= 0: self.data += self._bzero # delete the keywords BSCALE and BZERO after", "(i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype ==", "of the file. If the file does not exist and the provided header", "a required Card.\"\"\" \"\"\"If pos = None, it can be anywhere. If the", "it to the right place (card %d).\" % insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d];", "self.formats self._arrays[i] = input[i].array \"\"\" def __getitem__(self, key): x = self.data[key] if isinstance(key,", "_TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\"", "fmt else: if dtype == 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print", "image data by using BSCALE/BZERO. Call to this method will scale self.data and", "if type is None: type = self.NumCode[self._bitpix] _type = getattr(num, type) # Determine", "[8, 16, 32, 64, -32, -64]\" # Verify location and value of mandatory", "# take each 80-char card as a regular card and use its methods.", "del self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields", "issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name", "None self.header = header self.data = data self.name = None def size(self): \"\"\"Returns", "the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if", "valid value/comment string. # The valu group will return a match if a", "can only be strings and there # is no comment if self.key in", "if _card.key == 'HISTORY': output.append(_card.value) return output def get_comment(self): \"\"\"Get all comments as", "no match if (keyword in _keyNames): _list.append(i) for i in _list: del self.header.ascard[i]", "__init__(self, name=None, format=None, unit=None, null=None, \\ bscale=None, bzero=None, disp=None, start=None, \\ dim=None, array=None):", "< 1000: for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>=", "\"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\" size = 0", "card to search # to avoid starting at the same CONTINUE card else:", "'data': self.__dict__[attr] = None if self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if", "dummy return self._convert[indx] if _str: return self._parent.field(indx) # ASCII table, convert strings to", "= _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and isinstance(self, BinTableHDU): val = _cols._recformats[i] if", "= 0 hdu.header.ascard._mod = 0 except: pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension", "the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each piece of data: shdu.write(data) shdu.close()", "continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes) if _bytes != hdu._datSpan:", "the data from an extension of a FITS file (and optionally the header).", "point overlaps to the previous column\" % indx+1 _trail = _loc[indx+1] - _width[indx]", "= [] else: dummy = map(lambda x, y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr", "bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n < nrows: if", "signal interput handler keyboardInterruptSent = False def New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush", "1: self.mmobject = self.__file._mm if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() #", "n < nrows: if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make the", "have the same name, the # value must be a list (or tuple)", "0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ') if real.group('sign') == None:", "Card('SIMPLE', True, 'conforms to FITS standard') _list = CardList([ c0, Card('BITPIX', 8, 'array", "self.data += self._bzero # delete the keywords BSCALE and BZERO after scaling del", "the _mm attribute.\"\"\" if attr == '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return", "closed and can no longer be written\" curDataSize = self._ffo.getfile().tell() - self._datLoc if", "preserve the one-to-one correspondence when updating the list(s). # Use lists, instead of", "not None or self.key in Card._commentaryKeys: return result else: if option in ['fix',", "to open a FITS file and return an HDUList object. name: Name of", "'HIERARCH': val = val[8:].strip() self.__class__ = _Hierarch else: raise ValueError, 'keyword name %s", "self.insert(loc, card, useblanks=useblanks) elif after != None: loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks)", "the primary header needs the keyword EXTEND or if it has the proper", "[] for i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword", "_hdr = hdu.header hdulist.close() if _gethdr: return _data, _hdr else: return _data def", "extname and extver if hdu.name == '': hdu.name, hdu._extver = hdu._getname() elif hdu.name", "setupHDU(self): \"\"\"Read one FITS HDU, data portions are not actually read here, but", "_OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class Section: \"\"\"Image section.\"\"\" def", "_size+_padLength(_size) def close(self): \"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU", "arrays/numbers.\" % len(indx) def _getitem(self, offset): row = (offset - self._byteoffset) / self._strides[0]", "size = 0 naxis = self.header.get('NAXIS', 0) if naxis > 0: size =", "option='warn'): _text = '' _err = _ErrList([], unit='HDU') # the first (0th) element", "._parent def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out =", "be written to the stream. If the provided data would cause the stream", "> 1: self.update_extend() hduList = open(name, mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify)", "= 0 if hdu.data is not None: # if image, need to deal", "if self._tbtype == 'TableHDU': last_end = 0 attr = [0] * len(self) for", "_iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise IndexError, 'Subsection data is not", "if _dummy.strip(): if self.unit: result += _INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result +=", "len(coldata) > 0: coldata.tofile(self.__file) _shift = self.__file.tell() - _where hdu.data._heapsize = _shift -", "hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update header in place: Name =\", hdu.name,", "re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn", "_checkKey(self, val): \"\"\"Verify the keyword to be FITS standard.\"\"\" # use repr (not", "len(data_output[i]) desp_output[i,1] = _offset _offset += len(data_output[i]) * _nbytes return data_output class _VLF(objects.ObjectArray):", "nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table from the input column definitions.\"\"\" \"\"\"", "output_verify: output verification option, default='exception'. clobber: Overwrite the output file if exists, default", "or \"after\" is specified, it will be appended at the end. key: keyword", "match the exact name first, so in the example in (a), field('abc') will", "is not written. Once sufficient data has been written to the stream to", "= self.field(indx[0]) # if more than one group parameter have the same name", "def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute. The data of random", "# data area size, including padding hdu._datSpan = _size + _padLength(_size) hdu._new =", "'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value into data type", "insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option,", "(above) _where = self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if", "isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format", "in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for i in range(_tfields): del", "('append', 'update'): print \"flush for '%s' mode is not supported.\" % self.__file.mode return", "count_blanks(self): \"\"\"Find out how many blank cards are *directly* before the END card.\"\"\"", "than specified data size. File may have been truncated.' hdu._ffile = self return", "regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM value into", "BinTableHDU(data) else: raise KeyError, 'data must be numarray or table data.' else: hdu=header._hdutype(data=data,", "valStr = \"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr valStr", "fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards begins with HIERARCH which allows keyword", "= _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes = ((repeat-1) / 8) + 1", "indx+1 _trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if _trail < 0: raise", "if dtype == 'a': _nbytes = 1 else: _nbytes = num.getType(dtype).bytes for i", "input _step = input.step if _step is None: _step = 1 elif isinstance(_step,", "self.NumCode[self._bitpix] _type = getattr(num, type) # Determine how to scale the data #", "to accomodate both the ASCII table and binary table column # format spec,", "i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a':", "if parbscales is None: parbscales = [None]*npars if parbzeros is None: parbzeros =", "val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and isinstance(self, BinTableHDU):", "data specified in the header provided to the class constructor may be written", "tuple of (string, integer). \"\"\" if isinstance(key, (int, slice)): return key elif isinstance(key,", "String for a FITS standard compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE", "is not supplied (as in reading in the FITS file), # it will", "(verbose): print \"update header in place: Name =\", hdu.name, _extver if 'data' in", "If the optional keyword 'header' is set to True, this function will return", "== _extver: found = j nfound += 1 if (nfound == 0): raise", "os.remove(self.__file.name) if (verbose): print \"delete the original file\", oldName # reopen the renamed", "input return slice(_start, _stop, _step) class _KeyType: def __init__(self, npts, offset): self.npts =", "self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif self.data is None: axes =", "self._err_text + '\\n%s' % self._cardimage # verify the comment (string), it is never", "if = 0, copy the data from input, undefined cells will still be", "'big' if (self._bzero != 0 or self._bscale != 1): if _bitpix > 0:", "first time print out all top level messages for item in self: if", "self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU):", "second extension By name, i.e., EXTNAME value (if unique): >>> getdata('in.fits', 'sci') >>>", "where each part is no longer than strlen and no word is cut", "value of a required Card.\"\"\" \"\"\"If pos = None, it can be anywhere.", "j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize the", "new_name else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): \"\"\"Change a Column's unit.\"\"\"", "getval(filename, key, *ext, **extkeys): \"\"\"Get a keyword's value from a header in a", "(in bytes) of the data portion of the HDU.\"\"\" size = 0 naxis", "the # reverse of the numarray shape if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:]", "\"\"\"Split a long string into parts where each part is no longer than", "attr)) for i in range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def new_table (input,", "parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set the group parameter value.\"\"\"", "the commonName list) of a Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] =", "argument `before' takes precedence over `after' if both specified. They can be either", "'copyonwrite', 'update']: raise \"Memory mapping is not implemented for mode `%s`.\" % mode", "'*77) _hdrLoc = self.__file.tell() # Read the first header block. block = self.__file.read(_blockLen)", "= self.__file.tell() _size = 0 if hdu.data is not None: # if image,", "the info of the HDU's in this HDUList.\"\"\" if self.__file is None: _name", "else: ext = ext2['extname'] else: raise KeyError, 'Insufficient keyword argument: %s' % ext2", "cards after the first card.' if not isinstance(_card.value, str): raise ValueError, 'Cards with", "found.' % `key` else: raise KeyError, 'Illegal key data type %s' % type(key)", "'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU):", "_TableBaseHDU): del self['TFIELDS'] for name in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for", "# beginning of the data area # data area size, including padding hdu._datSpan", "string into parts where each part is no longer than strlen and no", "+ '%-80s' % commstr return output def _words_group(self, input, strlen): \"\"\"Split a long", "self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars] = input else: self._parent.field(npars)[:] = input else:", "table, convert strings to numbers if self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32,", "'' if repeat != 1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype", "values from all Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list of", "a (new) card image from the attributes: key, value, and comment, or from", "blanks. If there are two or more attribute names, they must be separated", "next column\" % indx+1 if 'A' in _format: _pc = '%-' else: _pc", "may be extra bytes after the last HDU or the file is corrupted.'", "<= 999\", 1, option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\" and val == 0\",", "base class for the TableHDU, ImageHDU, and BinTableHDU classes. \"\"\" def __init__(self, data=None,", "not exist, a new card will be created and it will be placed", "as a list of string texts.\"\"\" output = [] for _card in self.ascardlist():", "__coerce__(self, other): pass # needed for __add__ def __add__(self, other, option='left'): if isinstance(other,", "\"%-80s\" % output # longstring case (CONTINUE card) else: # try not to", "self._unique[parName.lower()] if len(indx) == 1: result = self.field(indx[0]) # if more than one", "_ncols = len(self.columns.formats) _format = self.columns.formats # if data is not touched yet,", "= _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def copy(self): \"\"\"Make a copy of", "(nfound == 0): raise KeyError, 'extension %s not found' % `key` elif (nfound", "_scale = 1 _zero = 0 else: # flat the shape temporarily to", "# shift the unused bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct", "_hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete the original file\", oldName # reopen", "the group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set the group", "an HDU to the HDUList, indexed by number or name.\"\"\" _key = self.index_of(key)", "= val.upper() if val == 'END': raise ValueError, \"keyword 'END' not allowed\" self._checkKey(val)", "'END' card, the Header may also contain the binary data(*). (*) In future", "bzero if _scale: dummy /= bscale elif self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx]", "self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new) card image from the attributes: key, value,", "a blank card. value: Text to be added. before: [same as in update()]", "\"\"\" header: header to be used data: data to be used name: name", "'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass # needed for __add__ def __add__(self, other,", "array += -bzero if bscale not in ['', None, 1]: array /= bscale", "derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED", "specified. They can be either a keyword name or index. \"\"\" if before", "= '%20d' % self.value # XXX need to consider platform dependence of the", "self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s) information of the column definition.\"\"\"", "positive.' % input else: raise IndexError, 'Illegal slice %s, step must be integer.'", "A corrupted HDU usually means that the data size cannot be calculated or", "i in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format =", "isinstance(data, num.NumArray): hdu = ImageHDU(data) elif isinstance(data, FITS_rec): hdu = BinTableHDU(data) else: raise", "self.formats if len(self) == 1: dummy = [] else: dummy = map(lambda x,", "if 'header' in keys: header = keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU):", "\"\"\"FITS table extension base HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\" header:", "indx = _normalize_slice(indx, naxis) if (indx.start == 0) and (indx.stop == naxis) and", "indexing. _list = [] for i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key =", "= self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1) if (data is DELAYED): return self.data", "order of NAXIS's which is the # reverse of the numarray shape if", "valstr = valfmt % val_list[i] output = output + '%-80s' % (headstr +", "value): \"\"\"Set a Card by indexing or by the keyword name.\"\"\" if isinstance", "only be used right before writing to the output file, as the data", "PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX,", "self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update the header keywords to agree with the", "if the keyword EXTVER does not exist, default it to 1 _extver =", "self.header['TFORM'+`j+1`] + ', ' _format = _format[:-2] + ']' _dims = \"%dR x", "stream is full so pad the data to the next FITS block #", "list of conditions and the following disclaimer. 2. Redistributions in binary form must", "self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input): \"\"\"Fix the card image for fixable non-standard", "Construct a StreamingHDU object given a file name and a header. :Parameters: name", "getattr(self[i], cname) if val != None: attr[i] = val elif name == '_arrays':", "str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input): \"\"\"Fix the card image for fixable", "In the case of a missing 'END' card, the Header may also contain", "comment. Core code for ascardimage. \"\"\" # keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'):", "cards=[], keylist=None): \"\"\"Construct the CardList object from a list of Cards. cards: A", "if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata) >", "it gets the decimal point.\"\"\" valueStr = \"%.16G\" % value if \".\" not", "and writing FITS files and manipulating their contents. A module for reading and", "should never happen if header is None: raise ValueError, \"No header to setup", "nrows) if fill: n = 0 (_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if", "Primary header provided into an image # extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE')", "the FITS file), # it will be constructed from the card list. if", "'': output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer else: _repeat =", "data is not NDarray, make it to be one, i.e. # input arrays", "the string before column 9. \"\"\" eqLoc = self._locateEq() if eqLoc is None:", "else: _gcount = '' return \"%-10s %-11s %5d %-12s %s%s\" % \\ (self.name,", "in 1999 and mandated by NASA as the standard format for storing high", "= operator.countOf(_list, _key) # occurrence of _key in _list if _count == 1:", "_format: self._parent.field(indx).sub('E', 'D') # binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy)", "KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 2: if n_ext2", "a list of cards of minimal header _list = CardList([ Card('XTENSION', '', ''),", "not be initialized till the HDU is accessed. \"\"\" def _getname(self): \"\"\"Get the", "self.has_key(key): j = self.ascard.index_of(key) if comment is not None: _comment = comment else:", "+ _comm.rstrip() + ' ' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long", "key elif isinstance(key, str): _key = key.strip().upper() if _key[:8] == 'HIERARCH': _key =", "else: output = hdu.data output.tofile(self.__file) _size = output.nelements() * output._itemsize # write out", "keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None): \"\"\"Append the header/data to FITS", "_pc = '%-' else: _pc = '%' _fmt = ' '*_lead + _pc", "\"KeyboardInterrupt ignored until flush is complete!\" keyboardInterruptSent = True # Install new handler", "_commentaryKeys = ['', 'COMMENT', 'HISTORY'] def __init__(self, key='', value='', comment=''): \"\"\"Construct a card", "= 0 if self._cardimage[:8].upper() == 'HIERARCH': _start = 8 self.__class__ = _Hierarch return", "commstr = \"CONTINUE '&' / \" + commfmt % i output = output", "definition dictionaries for each field for _card in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try:", "min) / 2. # throw away -2^N _scale = (max - min) /", "extension HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"Construct an image HDU. data:", "HDU\", hdu.name, _extver hdu._new = 0 elif self.__file.mode == 'update': if not self._resize:", "a Boolean array. input: input Uint8 array of shape (s, nbytes) output: output", "be integer.' % input return slice(_start, _stop, _step) class _KeyType: def __init__(self, npts,", "more than one group parameter have the same name, the # value must", "(Card.length-10): self.__class__ = _Card_with_continue output = self._breakup_strings() else: print 'card is too long,", "a printable string.\"\"\" output = '' for card in self: output += str(card)", "# bscale and bzero takes priority if (bscale != 1 or bzero !=0):", "tbtype='BinTableHDU'): \"\"\"Create a new table from the input column definitions.\"\"\" \"\"\" input: a", "raise ValueError, \"Illegal format %s\" % fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII", "data will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied data", "header is not None: if not isinstance(header, Header): raise ValueError, \"header must be", "for j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize", "# if not parsable (i.e. everything else) result = None return result else:", "1 self._resize = 1 else: raise \"HDUList can only append an HDU\" #", "\"\"\"Get a Card by indexing or by the keyword name.\"\"\" _key = self.index_of(key)", "value, before=None, after=None): \"\"\"Add a HISTORY card. value: History text to be added.", "_FormatX): print 'X format' elif dtype+option in _rec2fits.keys(): # record format _repeat =", "def _setkey(self, val): \"\"\"Set the key attribute, surrogate for the __setattr__ key case.\"\"\"", "print 'Output verification result:' print x if _option == 'exception' and x: raise", "self._cardimage.find('=') != 8: if option in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card image is", "Header(_list) if (data is not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2']", "format spec. Do the opposite if reverse = 1. \"\"\" fmt = input_format", "val != None: attr[i] = val elif name == '_arrays': attr = [col.array", "in range(len(tmp)): _formats += 'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array('", "== 0: ext = _Zero() elif 'ext' in keys: if n_ext2 == 1:", "hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data is not DELAYED): if isinstance(data, rec.RecArray):", "is cut into two pieces. But if there is one single word which", "messages for item in self: if not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" %", "'Redundant/conflicting keyword argument(s): %s' % ext2 else: if 'extname' in keys: if 'extver'", "_comm != '': longstring = longstring + _comm.rstrip() + ' ' self.__dict__[name] =", "\\ dim=None, array=None): \"\"\"Construct a Column by specifying attributes. All attributes except format", "\"\"\" self._add_commentary('history', value, before=before, after=after) def add_comment(self, value, before=None, after=None): \"\"\"Add a COMMENT", "data used for updating The rest of the arguments are flexible: the 3rd", "code for ascardimage. \"\"\" # keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self,", "desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize", "Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val) self.__dict__['_fixable'] = 0", "will scale self.data and update the keywords of BSCALE and BZERO in self.header.", "how many blank cards are *directly* before the END card.\"\"\" for i in", "exceptions to be raised when a file specified by a URL cannot be", "urllib._urlopener.tempcache = {} # Initialize tempcache with an empty # dictionary to enable", "it does not exist for j in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except:", "% str(val) self.__dict__['value'] = val def _setcomment(self, val): \"\"\"Set the comment attribute.\"\"\" if", "[same as in update()] \"\"\" self._add_commentary('comment', value, before=before, after=after) def add_blank(self, value='', before=None,", "a list (or tuple) containing arrays else: if isinstance(value, (list, tuple)) and len(indx)", "Cards Dimensions Format\\n\" % _name for j in range(len(self)): results = results +", "bscale = 1 if not _zero: bzero = 0 return (_str, _bool, _number,", "{} for i in range(len(self.parnames)): _name = self.parnames[i] if _name in _unique: _unique[_name].append(i)", "its parent card if nc > 0: _longstring = _cardList[_where-1]._cardimage for c in", "= self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap,", "= _size + _shift # pad the FITS data block if _size >", "# parse the extension spec if n_ext1 > 2: raise ValueError, \"too many", "+ ' ' * (Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating number to make", "not isinstance(indx, _WholeLine): raise IndexError, 'Subsection data is not contiguous.' # the offset", "'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype,", "except: try: # then try to conver it to a strings array array", "file (for append and update modes only). output_verify: output verification option, default =", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "the keyword to be FITS standard.\"\"\" # use repr (not str) in case", "__getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\" if attr == '_mm': self.__dict__[attr] = Memmap.open(self.name,", "_dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self, data=None, header=None, name=None):", "+ fix_text return _text def verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option =", "option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\" and val == 0\", 0, option, _err)", "if the HDUList is resized, need to write it to a tmp file,", "for x in _other: indx.remove(x) tmp = [self[i] for i in indx] return", "_end self._width = _end else: raise KeyError, 'Attribute %s not defined.' % name", "first case. bitpix: data type as expressed in FITS BITPIX value (8, 16,", "dummy = self._convert[indx].copy() if _zero: dummy -= bzero if _scale: dummy /= bscale", "card must have string value. \"\"\" def __str__(self): \"\"\"Format a list of cards", "if _ver == None: found = j nfound += 1 else: # if", "xtension == 'TABLE': self._hdutype = TableHDU elif xtension == 'IMAGE': self._hdutype = ImageHDU", "self._keylist[:] # make a copy _keylist.reverse() try: _indx = _keylist.index(_key) if backward: _indx", "in _other: indx.remove(x) tmp = [self[i] for i in indx] return ColDefs(tmp) def", "a printable string.\"\"\" kard = self._cardimage output = '' for i in range(len(kard)/80):", "== 0: datasize = 0 else: dims = [0]*naxis for i in range(naxis):", "bscale = _bscale, bzero = _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format", "(value or comment) is changed, will reconstructe # the card image. self._ascardimage() def", "value): \"\"\"Set an HDU attribute.\"\"\" if attr == 'name' and value: if not", "X format column Boolean array into an UInt8 array. input: input Boolean array", "ttype, tform, etc. and the array. Does not support theap yet. \"\"\" def", "and BZERO del self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update the header keywords to", "binary table's data. \"\"\" if attr == 'data': # same code as in", "'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the reverse dictionary", "if tbtype == 'TableHDU': _formats = '' _itemsize = 0 for i in", "comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e. everything else)", "elif naxis == 1: return _OnePointAxis(1, 0) else: raise IndexError, 'Index %s out", "attr, getattr(tmp, attr)) for i in range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def", "else: _key = key _ver = None if not isinstance(_key, str): raise KeyError,", "self.ascard.append(Card(key, value, comment)) self._mod = 1 def add_history(self, value, before=None, after=None): \"\"\"Add a", "% _name for j in range(len(self)): results = results + \"%-3d %s\\n\"%(j, self[j]._summary())", "raised and the data is not written. Once sufficient data has been written", "has been written to the stream to satisfy the amount specified in the", "file: The opened physical file associated with the HDUList. Default = None. \"\"\"", "_shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount = ' %d", "# make a copy if scaled, so as not to corrupt the original", "datLoc, for P format _data._heapoffset = hdu._theap + hdu._datLoc _data._file = hdu._file _tbsize", "val): \"\"\"Set the value attribute.\"\"\" if isinstance(val, (str, int, long, float, complex, bool,", "*0*(?P<digt>' + _digits_NFSC + ')') # FSC commentary card string which must contain", "a ColDefs object. header: header to be used to populate the non-required keywords", "= self._xtn hdr.ascard[0].comment = 'binary table extension' class StreamingHDU: \"\"\" A class that", "self._checkText(val) else: if val is not None: raise ValueError, 'comment %s is not", "format, corresponding to TDISP keyword start: column starting position (ASCII table only), corresponding", "only states that a # string should not end with two single quotes,", "the card will be appended after the last non-commentary card. If =1, the", "'' if repeat != 1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise", "= getattr(self, cname) if value != None: text += cname + ' =", "or _zero: for i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical) column", "It has a nested list structure constructed by error messages generated by verifications", "card else: _start = _where + 1 if _keyList[_start:].count('CONTINUE') == 0: break #", "NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32,", "in input: if not isinstance(col, Column): raise \"Element %d in the ColDefs input", "and indx < naxis: if naxis > 1: return _SinglePoint(1, indx) elif naxis", "group parameter values.\"\"\" if isinstance(parName, (int, long)): result = self.field(parName) else: indx =", "%s, stop < start.' % input _step = input.step if _step is None:", "tmp = [self[i] for i in indx] return ColDefs(tmp) def _setup(self): \"\"\" Initialize", "+ self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else:", "else: n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write", "is found, though the # comment maybe an empty string. _value_FSC_RE = re.compile(", "'T' and randomGroups == 'T': groups = 1 else: groups = 0 size", "self.__file = __builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode, the pointer is at the", "1 else: raise \"HDUList can only append an HDU\" # make sure the", "FITS file instead of requiring data to all be written at once. The", "r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys of commentary cards _commentaryKeys = ['', 'COMMENT',", "pointer is at the end after the open in # Linux, but is", "name = '' mo = re_extver.search(self._raw) if mo: extver = int(mo.group(1)) else: extver", "rec.Record.__init__(self, input, row) def par(self, fieldName): \"\"\"Get the group parameter value.\"\"\" return self.array.par(fieldName)[self.row]", "for the parameters parbzeros: list of bzeros for the parameters \"\"\" if isinstance(input,", "if (verbose): try: _extver = `hdu.header['extver']` except: _extver = '' # only append", "# delete the original file, and rename the tmp to the original file", "name of the file to append to @type data: array, table, or group", "_Hierarch else: raise ValueError, 'keyword name %s is too long (> 8), use", "close. It appears to find the # end of a string rather well,", "data array: %s\" % array array._dtype = recfmt._dtype else: raise ValueError, \"Data is", "array, # _convert is the scaled (physical) array. self._parent = input self._convert =", "% dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does not fit", "if (len(blocks) % _blockLen) != 0: raise IOError, 'Header size is not multiple", "have one extension with # that name if _ver == None: found =", "# given header. # if not os.path.exists(name): if not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()])", "to allow IOError exceptions to be raised when a file specified by a", "(`input.shape[1:]`, _fmt) _formats += data_fmt gcount = input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`,", "a record array self.__setstate__(input.__getstate__()) # _parent is the original (storage) array, # _convert", "i in range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key", "val = getattr(self[i], cname) if val != None: attr[i] = val elif name", "def _getitem(self, offset): row = (offset - self._byteoffset) / self._strides[0] return _Group(self, row)", "_INDENT*tab+\"%s\\n\" % item # second time go through the next level items, each", "_step <= 0: raise IndexError, 'Illegal slice %s, step must be positive.' %", "try: if cards[0].key == 'SIMPLE': if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True:", "return [self[i] for i in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an HDU from", "array if bzero not in ['', None, 0] or bscale not in ['',", "_zero = min _scale = (max - min) / (2.**8 - 1) else:", "else: raise AttributeError, name return getattr(self, name) def _setkey(self, val): \"\"\"Set the key", "True # Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append', 'update'): print", "urllib.urlretrieve(name) else: self.name = name self.mode = mode self.memmap = memmap if memmap", "dat, 'sci', 2) # update the 2nd SCI extension >>> update(file, dat, 3,", "allowed) to insert. The new card will be inserted before it. card: The", "val def _setvalue(self, val): \"\"\"Set the value attribute.\"\"\" if isinstance(val, (str, int, long,", "len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue output = self._breakup_strings() else: print 'card is", "= _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] +", "raw_data try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple", "dtype == 'a': _nbytes = 1 else: _nbytes = num.getType(dtype).bytes for i in", "FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale, _zero) =", "'_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0)", "[] _keyList = [] blocks = self._raw if (len(blocks) % _blockLen) != 0:", "a list of cards. Will deal with CONTINUE cards in a later stage", "a keyword value from the CardList. If no keyword is found, return the", "other] indx=range(len(self)) for x in _other: indx.remove(x) tmp = [self[i] for i in", "return the HDUList and the extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1 = len(ext1)", "[i.lower() for i in parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__())", "else: if 'extname' in keys: if 'extver' in keys: ext = ext2['extname'], ext2['extver']", "definitions.\"\"\" return self.columns def update(self): \"\"\" Update header keywords to reflect recent changes", "self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class", "str): raise TypeError, 'bad value type' value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] =", "in place: Name =\", hdu.name, _extver # reset the modification attributes after updating", "_AllHDU: \"\"\"Base class for all HDU (header data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU):", "_indx - 1 return _indx except: raise KeyError, 'Keyword %s not found.' %", "\"\"\"Break up long string value/comment into CONTINUE cards. This is a primitive implementation,", "Card.length: raise ValueError, \"The keyword %s with its value is too long.\" %", "Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e. everything else) result = None return result", "will be appended at the end. key: keyword name value: keyword value (to", "#_after += 1 except: pass _pos = '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1,", "_card.comment if isinstance(_comm, str) and _comm != '': longstring = longstring + _comm.rstrip()", "=1, the card will be appended after the last non-blank card. \"\"\" if", "isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU': #", "= rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if", "# match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\(", "= hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] = key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception', verbose=0):", "except: _extver = '' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print", "self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1]", "\"\"\" If the keyword already exists, it's value/comment will be updated. If it", "self.offset = offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class", "blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0: raise IOError self.__file.flush() loc =", "else: commentStr = ' / ' + self.comment else: commentStr = '' #", "== '_theap': self.__dict__[attr] = 0 try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) #", "type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array = _out # make a copy if", "npars = len(pardata) if parbscales is None: parbscales = [None]*npars if parbzeros is", "Format\\n\" % _name for j in range(len(self)): results = results + \"%-3d %s\\n\"%(j,", "to the input string to be multiple of 80.\"\"\" _len = len(input) if", "the CardList. card: The Card to be appended. useblanks: Use any *extra* blank", "blank_loc[loc-1] + 1 if loc == 0: offset = -1 except: offset =", "_TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the file is first opened. This is to", "\"No header to setup HDU.\" # if the file is read the first", "self._coldefs._recformats[indx]._nx) continue (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # add", "+= _dummy element += 1 return result class _Verify: \"\"\"Shared methods for verification.\"\"\"", "self.parnames = [i.lower() for i in parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names=", "== 'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype == 'F': output_format", "to decipher where the last block of the Header ends, but this task", "Initialize all attributes to be a list of null strings.\"\"\" for cname in", "= '' else: dim = str(dim) self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim) if", "self.req_cards('GROUPS', _pos, 'val == True', True, option, _err) return _err # --------------------------Table related", "# variable length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:] = 0", "'' for card in self: block = block + repr(card) return block def", "hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update header in place: Name =\", hdu.name, _extver", "'(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')' valStr = '%20s'", "'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if the", "comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary cards, value can only", "\"\"\" Get the index of the key in the name list. The key", "any of the input argument (except array) can be a Card or just", "if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames = self.columns._pnames else:", "val): \"\"\"Set the key attribute, surrogate for the __setattr__ key case.\"\"\" if isinstance(val,", "header object associated with the data to be written to the file. :Returns:", "== 0: for arr in tmp._arrays: if arr is not None: dim =", "str): if n_ext2 == 1 and 'extver' in keys: ext = ext1[0], ext2['extver']", "verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option = option.lower() if _option not in", "object or None @param header: the header associated with 'data', if None, a", "self.__dict__['_valueModified'] = 0 elif name == 'comment': self.__dict__['comment'] = '' if valu is", "= not(_bool or _str) bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale = bscale", "(FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A number sub-string,", "string after the equal sign. If there is no equal sign, return the", "no card (or blank card), append at the end. \"\"\" new_card = Card(key,", "data_fmt gcount = input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale", "from a (raw) string. It will pad the string if it is not", "DELAYED): return self.data = data # update the header self.update_header() self._bitpix = self.header['BITPIX']", "all remaining axes else: offset *= _naxis if dims == []: dims =", "if _zero: self._convert[indx] += bzero elif _bool: self._convert[indx] = num.equal(dummy, ord('T')) else: return", "['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i in range(_tfields): del self[name+`i+1`] if", "# try to find exact match first try: indx = nameList.index(key.rstrip()) except ValueError:", "Checks for a valid value/comment string. It returns a match object # for", "an HDU.\" % hdu try: super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise IndexError, 'Extension", "_File: \"\"\"A file I/O class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0): if mode not", "deal with var length table if isinstance(coldata, _VLF): for i in coldata: if", "TypeError, 'Wrong type of input' if option == 'left': tmp = list(self.data) +", "for i in parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for", "update()] after: [same as in update()] \"\"\" self._add_commentary('history', value, before=before, after=after) def add_comment(self,", "each part is no longer than strlen and no word is cut into", "*= _naxis if dims == []: dims = [1] npt = 1 for", "hdu._file = ffo.getfile() # if not resized, update in place else: for hdu", "self._xtn hdr.ascard[0].comment = 'binary table extension' class StreamingHDU: \"\"\" A class that provides", "try: _extver = `hdu.header['extver']` except: _extver = '' # only append HDU's which", "self.header = Header(_list) if (data is not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] =", "isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data", "= _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif input.group('numr') != None:", "isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval = self._xtn else: firstkey = 'SIMPLE' firstval", "sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max = 0 def __setitem__(self, key,", "self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if self.data._type != _type: self.data = num.array(num.around(self.data), type=_type)", "storage values if there is bscale/bzero if isinstance(array, num.NumArray): # boolean needs to", "ValueError, self._err_text + '\\n%s' % self._cardimage # verify the comment (string), it is", "= None if self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU):", "containing ASCII data. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None,", "_File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size = self.size() if", "slice, do this because Record has no __getstate__. # also more efficient. else:", "__getitem__(self, key): \"\"\"Get an HDU from the HDUList, indexed by number or name.\"\"\"", "of the input header, since it # may get modified. the data is", "= min((i+1)*8, nx) for j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input,", "to TTYPE keyword format: column format, corresponding to TFORM keyword unit: column unit,", "keywords. # Do the first card here, instead of in the respective HDU", "the associated FITS file and memmap object, if any. output_verify: output verification option,", "exact name matched, it will try to match the name with case insensitivity.", "*?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC", "and (threadName.getName() == 'MainThread') if singleThread: # Define new signal interput handler keyboardInterruptSent", "International Astronomical Union in 1999 and mandated by NASA as the standard format", "'column definitions have a different table type' elif isinstance(input, FITS_rec): # input is", "if _card.key == 'COMMENT': output.append(_card.value) return output def _add_commentary(self, key, value, before=None, after=None):", "\"\"\"Base class for all HDUs which are not corrupted.\"\"\" # 0.6.5.5 def size(self):", "if output_verify == 'warn': output_verify = 'exception' self.verify(option=output_verify) # check if the output", "including padding hdu._datSpan = _size + _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if", "'END' card is not found. In the case of a missing 'END' card,", "[None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format a list", "_str is not None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct a Card object from", "smaller than specified data size. File may have been truncated.' hdu._ffile = self", "try: _func = lambda x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array)) except: try:", "def _breakup_strings(self): \"\"\"Break up long string value/comment into CONTINUE cards. This is a", "not to use CONTINUE if the string value can fit in one line.", "= self.formats self._arrays[i] = input[i].array \"\"\" def __getitem__(self, key): x = self.data[key] if", "if (verbose): print \"update header in place: Name =\", hdu.name, _extver if 'data'", "self._datLoc if curDataSize + data.itemsize()*data._size > self._size: raise IOError, \"Supplied data will overflow", "== 1: return _OnePointAxis(1, 0) else: raise IndexError, 'Index %s out of range.'", "= list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format = `self.data.type()` _shape.reverse()", "1] _zero = bzero not in ['', None, 0] # ensure bscale/bzero are", "of group parameters'), Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS', 0, 'number of table", "= rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs = self._coldefs f = FITS_rec(r) f._convert =", "!= 0, and if there are blank cards directly before END, it will", "if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big': output = hdu.data.byteswapped() else: output =", "a file name and a header. :Parameters: name : string The name of", "sure to preserve the one-to-one correspondence when updating the list(s). # Use lists,", "string. option: verification option, default=silentfix. \"\"\" # Only if the card image already", "re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT", "after the stream has been filled will raise an IOError exception. If the", "KeyError, 'there are %d extensions of %s' % (nfound, `key`) else: return found", "# for the unparsable case if input is None: _tmp = self._getValueCommentString() try:", "len(self) > 1: self.update_extend() def index_of(self, key): \"\"\"Get the index of an HDU", "=\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo", "not correct for all cases, but # it comes pretty darn close. It", "pass datLoc, for P format _data._heapoffset = hdu._theap + hdu._datLoc _data._file = hdu._file", "a match object # for a valid value/comment string. # The valu group", "= results + \"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1] print results def open(name,", "the keyword name.\"\"\" _key = self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self, start, end):", "slice %s, start must be integer.' % input _stop = input.stop if _stop", "_memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True # deprecated FALSE = False", "field # translation table for floating value string _fix_table = maketrans('de', 'DE') _fix_table2", "order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big': output = hdu.data.byteswapped() else: output", "offset = xoffset + strlen # collect the pieces in a list tmp", "= map(lambda x: x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key) # occurrence of _key", "data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\" This class", "errcode, errmsg, headers): raise IOError, (errcode, errmsg, url) urllib._urlopener = ErrorURLopener() # Assign", "# NFSC allows lower case of DE for exponent, allows space between sign,", "to be used to populate the non-required keywords nrows: number of rows in", "\"&\" if _val[-1] == '&': _val = _val[:-1] longstring = longstring + _val", "CardList. key: the keyword name (a string) or the index (an integer). backward:", "self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU class.\"\"\" def __init__(self, data=None,", "where the last block of the Header ends, but this task may be", "preferred order. _commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim']", "self.field(i) if self._convert[i] is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return", "pass elif self._dtype == 'a': value = chararray.array(value, itemsize=1) else: value = num.array(value,", "% i output = output + '%-80s' % commstr return output def _words_group(self,", "hduList.append(ffo._readHDU()) except EOFError: break # check in the case there is extra space", "super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self,", "will try to match the name with case insensitivity. So, in the last", "oldName # reopen the renamed new file with \"update\" mode os.rename(_name, oldName) ffo", "primary HDU.\" fix_text = 'Fixed by inserting one as 0th HDU.' fix =", "The Card to be inserted. useblanks: Use any *extra* blank cards? default=1. If", "parameter have the same name, the # value must be a list (or", "self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple == 'T' and randomGroups == 'T': groups", "(_blockLen, len(block)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does", "Return the size (in bytes) of the data portion of the HDU. :Parameters:", "if scaled, so as not to corrupt the original array if bzero not", "for j in range(groups,naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount", "not self._resize: # determine if any of the HDU is resized for hdu", "heap area for each # variable length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc =", "_File has ts own private attribute __file. \"\"\" if self.__file != None: if", "= self._bscale _zero = self._bzero elif option == 'minmax': if isinstance(_type, num.FloatingType): _scale", "= UNDEFINED self.__dict__['value'] = _val if '_valuestring' not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu')", "is # a null string elif isinstance(self.value, str): if self.value == '': valStr", "\"\"\"Return a list of all keyword-value pairs from the CardList.\"\"\" pairs = []", "x = self.data[key] if isinstance(key, (int, long)): return x else: return ColDefs(x) def", "3rd argument can be the header associated with the data. If the 3rd", "extension of a FITS file (and optionally the header). @type filename: string @param", "Card._commentaryKeys: # do NOT use self.key commentStr = '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'):", "in a later stage as CONTINUE cards may span across blocks. \"\"\" if", "if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod =", "option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1 if option", "card(s). elif len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue # remove the key/value/comment attributes,", "'' # continue reading header blocks until END card is reached while 1:", "if option == 'left': tmp = list(self.data) + b else: tmp = b", "NOT use self.key commentStr = '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in", "return map(lambda x: getattr(x,'key'), self) def index_of(self, key, backward=0): \"\"\"Get the index of", "cards are *directly* before the END card self._blanks = 0 self.count_blanks() def __getitem__(self,", "HDU usually means that the data size cannot be calculated or the 'END'", "_iswholeline(key[i], _naxis) offset = offset * _naxis + indx.offset # all elements after", "in a # preferred order. _commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero',", "FITS file if filename exists, create if not. If only data is supplied,", "field. \"\"\" if isinstance(key, (int, long)): indx = int(key) elif isinstance(key, str): #", "# if the HDUList is resized, need to write it to a tmp", "parameters'), Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS', 0, 'number of table fields') ])", "format in variable length table.\"\"\" pass # TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)')", "ext2 elif n_ext1 == 0: if n_ext2 == 0: ext = _Zero() elif", "private attribute __file. \"\"\" if self.__file != None: if self.__file.memmap == 1: self.mmobject", "_err) return _err # --------------------------Table related code---------------------------------- # lists of column/field definition common", "if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val) self.__dict__['_fixable']", "If given an index, always returns 0. \"\"\" try: key = key.strip().upper() if", "== '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT',", "= 1 if option == 'ignore': return elif option == 'parse': # check", "= ' ' def __setattr__(self, attr, value): \"\"\"Set an HDU attribute.\"\"\" if attr", "disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct a Column by specifying attributes. All attributes", "part.\"\"\" self.__file.flush() loc = self.__file.tell() _size = 0 if hdu.data is not None:", "= _Zero() elif 'ext' in keys: if n_ext2 == 1: ext = ext2['ext']", "_shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.')+1:] # if data is not touched", "blocks): \"\"\"Read blocks of header, and put each card into a list of", "a scaled column may have wrong byteorder if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder", "The name of the file to which the header and data will be", "Any specifed arguments, except defaults, must be compliant to FITS standard. key: keyword", "GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0)", "a Column by specifying attributes. All attributes except format can be optional. name:", "Check for numbers with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, '", "+ _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max else: val = _convert_format(val, reverse=1) #_update(keyword,", "header >>> getdata('in.fits', 2) # the second extension >>> getdata('in.fits', ext=2) # the", "or name.\"\"\" key = self.index_of(key) _item = super(HDUList, self).__getitem__(key) if isinstance(_item, _TempHDU): super(HDUList,", "bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX not found where expected\") mo = re_gcount.search(block)", "= value # if more than one group parameter have the same name,", "Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the largest column shape as the shape", "n_ext2 == 1 and 'extver' in keys: ext = ext1[0], ext2['extver'] raise KeyError,", "if i > 0 and (not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element %s", "undefined. The comment field will # return a match if the comment separator", "\"Illegal key '%s'.\" % `key` return indx def _unwrapx(input, output, nx): \"\"\"Unwrap the", "if isinstance(self, _Hierarch): valStr = valStr.strip() # comment string if keyStr.strip() in Card._commentaryKeys:", "self.__file hdu._hdrLoc = _hdrLoc # beginning of the header area hdu._datLoc = self.__file.tell()", "if self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8: if option in ['exception',", "force: if new key name already exist, force to have duplicate name. \"\"\"", ">= 0\", 0, option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val == 1\",", "pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size = abs(bitpix)", "% indx+1 _trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if _trail < 0:", "(card %d).\" % (keywd, _index) fix_text = \"Fixed by moving it to the", "string which must contain printable ASCII characters. _ASCII_text = r'[ -~]*$' _comment_FSC_RE =", "self[name+`i+1`] if issubclass(self._hdutype == TableHDU): for i in range(_tfields): del self['TBCOL'+`i+1`] except: pass", "isinstance(_stop, (int, long)): _stop = _normalize(_stop, naxis) else: raise IndexError, 'Illegal slice %s,", "= [] # can take one HDU, as well as a list of", "because Record has no __getstate__. # also more efficient. else: return tmp def", "all attributes to be a list of null strings.\"\"\" for cname in _commonNames:", "bytes after the last HDU or the file is corrupted.' % (len(hduList)+1) break", "cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format a list of cards into a string.\"\"\"", "npars = 0 else: npars = len(pardata) if parbscales is None: parbscales =", "argument has wrong data type.' if 'header' in extkeys: header = extkeys['header'] del", "file. :Parameters: None :Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use", "not fixable: option = 'unfixable' if option in ['warn', 'exception']: #raise VerifyError, _text", "cards in wrong order. if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval = self._xtn", "fix_value is not None # if pos is a string, it must be", "= _VLF(map(_func, array)) except: raise ValueError, \"Inconsistent input data array: %s\" % array", "be scaled and is therefore not very usable after the call. type (string):", "CardList.\"\"\" return map(lambda x: getattr(x,'key'), self) def index_of(self, key, backward=0): \"\"\"Get the index", "TBCOL keyword dim: column dimension corresponding to TDIM keyword \"\"\" # any of", "`int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format", "one or more mandatory Cards are corrupted (unparsable), such as the 'BITPIX', 'NAXIS',", "be created and it will be placed before or after the specified location.", "__add__ def __add__(self, other, option='left'): if isinstance(other, Column): b = [other] elif isinstance(other,", "'exception']: #raise VerifyError, _text #elif option == 'warn': pass # fix the value", "no need to check key and comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) #", "card). If there is no card (or blank card), append at the end.", "offset * _naxis + indx.offset # all elements after the first WholeLine must", "__getstate__. # also more efficient. else: return tmp def _get_scale_factors(self, indx): \"\"\" Get", "(string): destination data type, use numarray attribute format, (e.g. 'UInt8', 'Int16', 'Float32' etc.).", "after which the new card will be placed. default=None. \"\"\" if self.has_key(key): j", "tuple. \"\"\" if 'header' in extkeys: _gethdr = extkeys['header'] del extkeys['header'] else: _gethdr", "a RecArray.\"\"\" # input should be a record array self.__setstate__(input.__getstate__()) # _parent is", "_scale or _zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale, _zero) =", "'silentfix']: result = self._check('parse') self._fixValue(result) if option == 'fix': self.__dict__['_fix_text'] = 'Fixed card", "data = None self.__dict__[attr] = data elif attr == 'columns': _cols = []", "is None): # for X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx = self._coldefs._recformats[indx]._nx dummy", "value and _cardimage attributes are missing, # to avoid infinite loops if not", "header=None): \"\"\"Construct a primary HDU. data: the data in the HDU, default=None. header:", "IOError, \"The stream is closed and can no longer be written\" curDataSize =", "equal sign for commentary cards (i.e. part of the string value) _key =", "= value self._mod = 1 def __delitem__(self, key): \"\"\"Delete card(s) with the name", "= ext1 else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1", "of the file. If the file does not already exist, it will be", "key): \"\"\"Get the index of an HDU from the HDUList. The key can", "of the header can be used to reconstruct another kind of header. \"\"\"", "if imag.group('sign') == None: _val += eval(_idigt)*1j else: _val += eval(imag.group('sign') + _idigt)*1j", "if _number and (_scale or _zero): # only do the scaling the first", "and the whole card must have string value. \"\"\" def __str__(self): \"\"\"Format a", "# from a table parent data, just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif", "to numbers if self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type", "ValueError, \"column `%s` starting point overlaps to the previous column\" % indx+1 _trail", "found, otherwise, 0. key: keyword name. If given an index, always returns 0.", "if cname == 'format' and isinstance(self, BinTableHDU): val = _cols._recformats[i] if isinstance(val, _FormatX):", "= super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return result def __setitem__(self, key, hdu): \"\"\"Set", "i in range(1, len(self)): if str(self[-i]) != ' '*Card.length: self._blanks = i -", "_err = errlist fix = '' cards = self.header.ascard try: _index = cards.index_of(keywd)", "not fit into the output's itemsize of %s\" % (x, _width[indx]) else: self._parent.field(indx)[i]", "% format else: raise ValueError, \"Must specify format to construct Column\" # scale", "len(input) == offset: break xoffset = offset return list class Header: \"\"\"FITS header", "takes precedence over `after' if both specified. default=None. after: name of the keyword,", "cards. A corrupted HDU usually means that the data size cannot be calculated", "_format, bscale = _bscale, bzero = _bzero)) data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())`", "= re.sub(\"''\", \"'\", _card.value).rstrip() # drop the ending \"&\" if _val[-1] == '&':", "_err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking is specified if test: val", "if block == '': break else: break hdu._raw += block _size, hdu.name =", "0] # ensure bscale/bzero are numbers if not _scale: bscale = 1 if", "= numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is not None: _valStr = numr.group('sign')+_valStr elif", "for one word longer than strlen, break in the middle if offset <=", "extension with the input data/header. @type filename: string @param filename: name of the", "remove the key/value/comment attributes, some of them may not exist for name in", "(keywd, val) fix_text = \"Fixed by setting a new value '%s'.\" % fix_value", "self.header.get('NAXIS', 0) # for random group image, NAXIS1 should be 0, so we", "try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod = 1", "if attr == 'name' and value: if not isinstance(value, str): raise TypeError, 'bad", "None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with urlretrieve to allow", "part.\"\"\" self._file.seek(0, 2) return self._file.tell() - self._datLoc def _summary(self): return \"%-10s %-11s\" %", "raise IndexError, 'Subsection data must be contiguous.' for j in range(i+1,naxis): _naxis =", "[['a','b','c']] # equally, beautiful! _func = lambda x: chararray.array(x, itemsize=1) array = _VLF(map(_func,", "if filename exists, create if not. If only data is supplied, a minimal", "[col.array for col in self.data] elif name == '_recformats': if self._tbtype == 'BinTableHDU':", "isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names, key) for key in", "file to append to @type data: array, table, or group data object @param", "%s is not a string' % val self.__dict__['comment'] = val def __setattr__(self, name,", "item.__str__(tab=tab+1) # print out a message only if there is something if _dummy.strip():", "standard.: %s' % self.key # verify the key, it is never fixable #", "= '' else: commentStr = ' / ' + self.comment else: commentStr =", "return size def copy(self): \"\"\"Make a copy of the HDU, both header and", "_file: file associated with array (None) _datLoc: starting byte location of data block", "FITS, the close() call can also close the mm object. try: self.mmobject.close() except:", "be FITS standard.\"\"\" # use repr (not str) in case of control character", "attributes are missing, # to avoid infinite loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')):", "not a Card\" % str(card) def _pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert a", "% input.index(col) self.data = [col.copy() for col in input] # if the format", "'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII table extension'", "FITS standard compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC) #", "_start = 0 if self._cardimage[:8].upper() == 'HIERARCH': _start = 8 self.__class__ = _Hierarch", "' ') if imag.group('sign') is not None: _imagStr = imag.group('sign') + _imagStr _valStr", "created (BinTableHDU or TableHDU) \"\"\" # construct a table HDU hdu = eval(tbtype)(header=header)", "2) return self._file.tell() - self._datLoc def _summary(self): return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\")", "parameters \"\"\" if isinstance(input, num.NumArray): _formats = '' _cols = [] if pardata", "format = fits_fmt, bscale = parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt,", "provided into an image # extension header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE']", "else: # verify the equal sign position if self.key not in Card._commentaryKeys and", "self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val >= 0\", 0, option, _err)", "after the open in # Linux, but is at the beginning in Solaris.", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size", "name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU self.name = name if self.header['NAXIS'] <=", "a new '%s' card.\" % keywd if fixable: # use repr to accomodate", "== 2 and 'extver' in keys: ext = ext2['ext'], ext2['extver'] else: raise KeyError,", "1): raise KeyError, 'there are %d extensions of %s' % (nfound, `key`) else:", "# Initialize tempcache with an empty # dictionary to enable file cacheing class", "ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 ==", "else: raise TypeError, \"table data has incorrect type\" # set extension name if", "self._resize = 1 if verbose: print \"One or more header is resized.\" break", "= self.size()*8/abs(_bitpix) else: dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap =", "existence, location, and value of a required Card.\"\"\" \"\"\"If pos = None, it", "value def _verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option) # Verify location and value", "not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" % item # second time go through", "list) of a Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value def", "x: print 'Output verification result:' print x if _option == 'exception' and x:", "array \"\"\" _offset = 0 data_output = _VLF([None]*len(input)) data_output._dtype = dtype if dtype", "tmp def _strip(self): \"\"\"Strip cards specific to a certain kind of header. Strip", "used to endorse or promote products derived from this software without specific prior", "self.__dict__['key'] = head.strip() else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the keyword", "str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod = 1 else: raise", "None: _valStr = numr.group('sign')+_valStr elif input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr =", "is the original (storage) array, # _convert is the scaled (physical) array. self._parent", "_err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and val == 0\", 0, option,", "else: _cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE cards # if a long string", "of the key in the name list. The key can be an integer", "class. It has this two-tier calls because _File has ts own private attribute", "#%d.\\n There may be extra bytes after the last HDU or the file", "# it comes pretty darn close. It appears to find the # end", "list of Columns or a ColDefs object. header: header to be used to", "isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "will be inserted before it. card: The Card to be inserted. useblanks: Use", "indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value def change_name(self, col_name, new_name): \"\"\"Change", "None, 'val == 0', 0, option, _err) tfields = self.header['TFIELDS'] for i in", "by indexing or by the keyword name.\"\"\" if isinstance (value, Card): _key =", "hdu in self: # Header: # Add 1 to .ascard to include the", "+ hdu.data._gap if _pcount > 0: hdu.header['PCOUNT'] = _pcount # update TFORM for", "attribute __file. \"\"\" if self.__file != None: if self.__file.memmap == 1: self.mmobject =", "'BinTableHDU': _str = 'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype else: _str", "arr = chararray.array(input+' ', itemsize=1) # locations of the blanks blank_loc = num.nonzero(arr", "# a null string elif isinstance(self.value, str): if self.value == '': valStr =", "to match case-insentively, _key = key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(), nameList) _count", "_cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format =", "dummy[i]*bscale+bzero # Boolean (logical) column if self._coldefs._recformats[indx]._dtype is _booltype: for i in range(len(self._parent)):", "# check TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate", "# self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\" Return the", "num.NumArray): # make the scaled data = 0, not the stored data hdu.data._parent.field(i)[n:]", "name with case insensitivity. So, in the last example, field('Abc') will cause an", "in _list if _count == 1: indx = _list.index(_key) elif _count == 0:", "data to all be written at once. The following psudo code illustrates its", "single word which is longer than strlen, then it will be split in", "beginning of the file and the provided header will be added as the", "= str(self.value) # put all parts together output = keyStr + eqStr +", "self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale,", "be scaled too if recfmt == _booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'),", "_bytes != hdu._datSpan: self._resize = 1 if verbose: print \"One or more data", "filename: string @param filename: name of the file to be updated data: the", "dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale by TSCAL and", "range(min(self._blanks, how_many)): del self[-1] # it also delete the keylist item def keys(self):", "\"\"\"Construct a FITS record array from a RecArray.\"\"\" # input should be a", "= re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo is not", "if isinstance(parName, (int, long)): self.field(parName)[:] = value else: indx = self._unique[parName] if len(indx)", "avoid circular reference of _pcount # pass the attributes for attr in ['formats',", "fixable: # use repr to accomodate both string and non-string types # Boolean", "Overwrite the output file if exists, default = False. \"\"\" if isinstance(self, _ExtensionHDU):", "data from input (an HDU object).\"\"\" tmp = hdu.columns # get the right", "= _File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo) # read all HDU's while 1:", "None: _comm = valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input):", "has_key(self, key): \"\"\"Check for existence of a keyword. Returns 1 if found, otherwise,", "KeyError, 'Insufficient keyword argument: %s' % ext2 return hdulist, ext def getheader(filename, *ext,", "_extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from the card image.\"\"\" longstring", "in self.__dict__: self.__dict__['_valueModified'] = 0 elif name == 'comment': self.__dict__['comment'] = '' if", "Get the table data from input (an HDU object).\"\"\" tmp = hdu.columns #", "This is a convenience method to provide a user easier output interface if", "\"\"\" A module for reading and writing FITS files and manipulating their contents.", "beginning locations are computed. \"\"\" _cardList = [] _keyList = [] blocks =", ".name), Each attribute in ColDefs is a list of corresponding attribute values from", "data: the new data used for appending @type header: L{Header} object or None", "isinstance(indx, (int, long)): if indx >= 0 and indx < naxis: if naxis", "(self._bzero != 0 or self._bscale != 1): if _bitpix > 0: # scale", "all the cards you need in the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for", "(list, tuple)): for col in input: if not isinstance(col, Column): raise \"Element %d", "HDU.\" # if the file is read the first time, no need to", "= _dict[self._coldefs._Formats[indx][0]] # if the string = TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip()", "both SIMPLE and XTENSION to accomodate Extension # and Corrupted cases del self['SIMPLE']", "the data is still a \"view\" (for now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist())", "in the constructor. \"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis", "%s already exists in header.' % newkey _index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment", "appending after these blank cards, so the total space will not increase (default).", "find the END card mo = end_RE.search(block) if mo is None: hdu._raw +=", "= _iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise IndexError, 'Subsection data is", "i in range(len(dummy)): x = _fmt % dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise", "print 'X format' elif dtype+option in _rec2fits.keys(): # record format _repeat = ''", "of an ASCII column has no width, add one if tbtype == 'TableHDU':", "Binary table byteswap elif isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields): coldata = hdu.data.field(i)", "extension is a TableHDU containing ASCII data. \"\"\" def __init__(self, data=None, header=None): self._file,", "value, corresponding to TNULL keyword bscale: bscale value, corresponding to TSCAL keyword bzero:", "of the file. \"\"\" self.header = header.copy() # # Check if the file", "offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass", "option == 'minmax': if isinstance(_type, num.FloatingType): _scale = 1 _zero = 0 else:", "a temporary file name which should not already exist. Use the directory of", "NAXISi's for j in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass if", "'big': coldata2.byteswap() coldata2._byteorder = 'big' # In case the FITS_rec was created in", "self.index_of(key) if isinstance(hdu, (slice, list)): if isinstance(_key, int): raise ValueError, \"An element in", "_coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr]", "_verify(self, option='warn'): \"\"\"No verification (for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more", "return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\" _dict = {8:'B', 16:'I',", "== 'F': output_format = 'f8' else: raise ValueError, \"Illegal format %s\" % fmt", "the random group data.\"\"\" def __init__(self, input, row=0): rec.Record.__init__(self, input, row) def par(self,", "added. before: [same as in update()] after: [same as in update()] \"\"\" self._add_commentary('comment',", "ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 else: if 'extname'", "list of Cards, default=[]. \"\"\" # decide which kind of header it belongs", "(int, long)): _start = _normalize(_start, naxis) else: raise IndexError, 'Illegal slice %s, start", "0 for i in range(len(tmp)): _formats += 'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i]", "*ext, **extkeys) hdu = hdulist[_ext] hdr = hdu.header hdulist.close() return hdr def getdata(filename,", "in this module. @group Header-related Classes: Card, CardList, _Card_with_continue, Header, _Hierarch @group HDU", "len(self.data) _ncols = len(self.columns.formats) _format = self.columns.formats # if data is not touched", "contains CONTINUE card(s). elif len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue # remove the", "return self._file.tell() - self._datLoc def _summary(self): return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def", "= abs(bitpix) * gcount * (pcount + size) / 8 return size def", "== '': valStr = \"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" %", "option, _err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val == 1\", 1, option, _err)", "_end self._Formats = self.formats self._arrays[i] = input[i].array \"\"\" def __getitem__(self, key): x =", "\"\"\"Verify exception class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors list class. It has a", "is None: _shape, _format = (), '' else: # the shape will be", "= self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset = offset * _naxis + indx.offset", "= 1 return name, extver def _getsize(self, block): \"\"\"Get the size from the", "and the header appended to the end of the file. If the file", "abs(bitpix) * gcount * (pcount + datasize) / 8 if simple and not", "'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None): self._file, self._datLoc = None, None if header", "_update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe out the", "_realStr + ', ' + _imagStr + ')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def", "it is not the length of a card image (80 columns). If the", "%s%s\" % \\ (self.name, type, len(self.header.ascard), _shape, _format, _gcount) def scale(self, type=None, option=\"old\",", "== 1: pass elif self._dtype == 'a': value = chararray.array(value, itemsize=1) else: value", "be opened and the header appended to the end of the file. If", "HDU's which are \"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append HDU\", hdu.name,", "range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return self._convert[indx] if _str: return", "[] _nblanks = input.count(' ') nmax = max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ',", "header will not be initialized till the HDU is accessed. \"\"\" def _getname(self):", "used name: name to be populated in EXTNAME keyword \"\"\" if header is", "for j in range(naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount", "key != '' or value != '' or comment != '': self._setkey(key) self._setvalue(value)", "# deprecated FALSE = False # deprecated _INDENT = \" \" DELAYED =", "can be a name or index. newkey: new keyword, must be a string.", "'ABC' respectively. (b) When you *refer* to a field (presumably with the field", "j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if", "products derived from this software without specific prior written permission. THIS SOFTWARE IS", "extension being referenced If the optional keyword 'header' is set to True, this", "the largest column shape as the shape of the record if nrows ==", "created in a LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big' output =", "file does already exist, but the provided header represents a Primary header, the", "0 or self._bscale != 1): if _bitpix > 0: # scale integers to", "all HDU's into memory.\"\"\" for i in range(len(self)): if self[i].data is not None:", "if not self._resize: # determine if any of the HDU is resized for", "get the data (does not include bscale/bzero for now XXX) _bitpix = self.hdu.header['BITPIX']", "# Boolean is also OK in this constructor _card = \"Card('%s', %s)\" %", "= \"Fixed by setting a new value '%s'.\" % fix_value if fixable: fix", "part of the random group, # since binary table does not support ND", "self.name = name def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute.\"\"\" if", "long string has CONTINUE cards, the \"Card\" is considered # to be more", "same as 7A in # binary table, so both will produce 'a7'. if", "memmap: Is memmory mapping to be used? default=0. \"\"\" # instantiate a FITS", "if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist = HDUList([self])", "# if the column data is not NDarray, make it to be one,", "= hdu.columns = input._coldefs else: # input is a list of Columns tmp", "to agree with the data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX']", "new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\" Get the table", "\"\"\"Summarize the info of the HDU's in this HDUList.\"\"\" if self.__file is None:", "or bzero !=0): _scale = bscale _zero = bzero else: if option ==", "None: size = 0 else: size = len(tmp._arrays[i]) n = min(size, nrows) if", "return self.__dict__[name] \"\"\" # make sure to consider the case that the starting", "if data._byteorder != 'big': # # byteswap little endian arrays before writing #", "parent array, using the (latest) scaled array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E',", "> npts: indx = npts return indx _start = input.start if _start is", "mode else: if os.path.splitext(self.name)[1] == '.gz': # Handle gzip files if mode in", "print 'card is too long, comment is truncated.' output = output[:Card.length] self.__dict__['_cardimage'] =", "raise ValueError, \"No header to setup HDU.\" # if the file is read", "# scale integers to Float32 self.data = num.array(raw_data, type=num.Float32) else: # floating point", "def __len__(self): return len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass", "in one line. # Instead, just truncate the comment if isinstance(self.value, str) and", "to accomodate Extension # and Corrupted cases del self['SIMPLE'] del self['XTENSION'] del self['BITPIX']", "self.size() if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames = self.columns._pnames", "= None else: width = eval(width) except: raise ValueError, 'Illegal format `%s` for", "do the scaling the first time and store it in _convert self._convert[indx] =", "'data' or 'columns' attribute.\"\"\" if attr == 'data': size = self.size() if size:", "'' _nrows = 0 else: _nrows = len(self.data) _ncols = len(self.columns.formats) _format =", "position if self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8: if option in", "for cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column):", "64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self,", "_key = (_key.strip()).upper() nfound = 0 for j in range(len(self)): _name = self[j].name", "['update', 'append']: raise \"Writing to zipped fits files is not supported\" zfile =", "* len(self) for i in range(len(self)): val = getattr(self[i], cname) if val !=", "*= n # Now, get the data (does not include bscale/bzero for now", "valu.group('bool') != None: _val = valu.group('bool')=='T' elif valu.group('strg') != None: _val = re.sub(\"''\",", "string. # The valu group will return a match if a FITS string,", "= [other] elif isinstance(other, ColDefs): b = list(other.data) else: raise TypeError, 'Wrong type", "can deal with scaled columns. \"\"\" def __init__(self, input): \"\"\"Construct a FITS record", "'bad value type' value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME',", "_Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal sign in the card image", "output # longstring case (CONTINUE card) else: # try not to use CONTINUE", "and determine if this is a single treaded application threadName = threading.currentThread() singleThread", "only if there is something if _dummy.strip(): if self.unit: result += _INDENT*tab+\"%s %s:\\n\"", "need to consider platform dependence of the format (e.g. E-009 vs. E-09) elif", "hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self) loc =", "raise IOError, \"The stream is closed and can no longer be written\" curDataSize", "'readonly', *ext, **extkeys) hdu = hdulist[_ext] hdr = hdu.header hdulist.close() return hdr def", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "\\ (self.name, type, len(self.header.ascard), _shape, _format, _gcount) def scale(self, type=None, option=\"old\", bscale=1, bzero=0):", "name=name) self._xtn = 'BINTABLE' hdr = self.header if hdr[0] != self._xtn: hdr[0] =", "in self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value) return output def get_comment(self): \"\"\"Get all", "header: header to be used data: data to be used name: name to", "= re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC", "j nfound += 1 else: # if the keyword EXTVER does not exist,", "fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the supposed location is specified if pos", "= self.header['EXTNAME'] self.name = name def __getattr__(self, attr): \"\"\"Get the 'data' or 'columns'", "= str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix'] and x.find('Unfixable') != -1: raise VerifyError,", "0 naxis = self.header.get('NAXIS', 0) if naxis > 0: size = 1 for", "format' elif dtype+option in _rec2fits.keys(): # record format _repeat = '' if repeat", "range(len(self)): results = results + \"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1] print results", "'D' in _format: self._parent.field(indx).sub('E', 'D') # binary table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy", "pairs from the CardList.\"\"\" pairs = [] for card in self.ascard: pairs.append((card.key, card.value))", "= 1 if verbose: print \"One or more data area is resized.\" break", "array, record array (i.e. table), or groups data object depending on the type", "val == 'END': raise ValueError, \"keyword 'END' not allowed\" self._checkKey(val) else: if val[:8].upper()", "If only data is supplied, a minimal header is created @type filename: string", "out how many blank cards are *directly* before the END card self._blanks =", "and (optionally) comment. Any specifed arguments, except defaults, must be compliant to FITS", "(slice, list)): if isinstance(_key, int): raise ValueError, \"An element in the HDUList must", "def __getitem__(self, key): \"\"\"Get an HDU from the HDUList, indexed by number or", "write to @type data: array, record array, or groups data object @param data:", "_FormatX, _VLF \"\"\" \"\"\" Do you mean: \"Profits\"? - Google Search, when asked", "indx+1 if 'A' in _format: _pc = '%-' else: _pc = '%' _fmt", "module for reading and writing FITS files and manipulating their contents. A module", "n_ext2 = len(ext2) keys = ext2.keys() # parse the extension spec if n_ext1", "the last HDU or corrupted HDU except ValueError: print 'Warning: Required keywords missing", "for i in range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n' return output[:-1] def _extractValueComment(self,", "value is too long.\" % self.key if len(output) <= Card.length: output = \"%-80s\"", "all cells with zeros or blanks if = 0, copy the data from", "be appended at the end, even if there are blank cards in front", "tempcache with an empty # dictionary to enable file cacheing class _File: \"\"\"A", "'XTENSION']): raise IOError, 'Block does not begin with SIMPLE or XTENSION' for i", "is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr = input.header _nfields =", "verify each card for _card in self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self, keywd,", "be strings and there # is no comment if self.key in Card._commentaryKeys: if", "\"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('PCOUNT', None, _isInt+\" and val ==", "self._convert[i] is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del dummy return out #", "[] if pardata is None: npars = 0 else: npars = len(pardata) if", "an empty # dictionary to enable file cacheing class _File: \"\"\"A file I/O", "calls their own verify for i in range(len(self)): if i > 0 and", "raise ValueError, \"header must be a Header object\" if data is DELAYED: #", "raise IndexError, 'Illegal slice %s, stop must be integer.' % input if _stop", "in range(len(self)): if i > 0 and (not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's", "KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if the primary header needs the keyword", "except KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple of image dimensions, reverse", "not NDarray, make it to be one, i.e. # input arrays can be", "_itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data =", "Name =\", hdu.name, _extver if 'data' in dir(hdu): if hdu.data is not None:", "len(self) > 1: self.update_extend() hduList = open(name, mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu)", "with CONTINUE cards in a later stage as CONTINUE cards may span across", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "or self.key in Card._commentaryKeys: return result else: if option in ['fix', 'silentfix']: result", "end_RE = re.compile('END'+' '*77) _hdrLoc = self.__file.tell() # Read the first header block.", "self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs = self.columns else: data = None self.__dict__[attr] =", "_type = getattr(num, type) # Determine how to scale the data # bscale", "_scale, _zero, bscale, bzero) def field(self, key): \"\"\"A view of a Column's data", "= self.size() if size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs = self.columns else: data", "a list of cards of minimal header if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION',", "the data part of a table HDU's data part. This is a layer", "self._ffo.getfile().tell() - self._datLoc if curDataSize + data.itemsize()*data._size > self._size: raise IOError, \"Supplied data", "' ')[0] offset = 0 xoffset = 0 for i in range(nmax): try:", "% (keywd, val) fix_text = \"Fixed by setting a new value '%s'.\" %", "if self.key in Card._commentaryKeys: if not isinstance(self.value, str): raise ValueError, 'Value in a", "def copy(self): r = rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs = self._coldefs f =", "val): \"\"\"Verify val to be printable ASCII text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text']", "all cases, but # it comes pretty darn close. It appears to find", "80, assume it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() ==", "delete extra NAXISi's for j in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError:", "repeat == '': repeat = 1 else: repeat = eval(repeat) return (repeat, dtype,", "string' % val self.__dict__['comment'] = val def __setattr__(self, name, val): if name ==", "has consistent data type to avoid misalignment. \"\"\" if isinstance(value, num.NumArray) and value.type()", "._convert list # so the sliced FITS_rec will view the same scaled columns", "file and the base name of the mktemp() output. \"\"\" dirName = os.path.dirname(input)", "= input.stop if _stop is None: _stop = naxis elif isinstance(_stop, (int, long)):", "naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>= 0\", 1, option, _err) # verify", "_convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec to record format spec. \"\"\" ascii2rec =", "or complex value is found, otherwise it will return # None, meaning the", "the checking is in order, in case of required cards in wrong order.", "simple == 'T' and randomGroups == 'T': groups = 1 else: groups =", "arguments, except defaults, must be compliant to FITS standard. key: keyword name, default=''.", "CardList.\"\"\" pairs = [] for card in self.ascard: pairs.append((card.key, card.value)) return pairs def", "num2char because the # result is not allowed to expand (as C/Python does).", "be the header associated with the data. If the 3rd argument is not", "of Columns tmp = hdu.columns = ColDefs(input, tbtype) # read the delayed data", "provided header will be added as the first extension. If the file does", "(key,) naxis = self.hdu.header['NAXIS'] if naxis < len(key): raise IndexError, 'too many indices.'", "keyword value, default=''. comment: comment, default=''. \"\"\" if key != '' or value", "'C':'c8', 'M':'c16', 'K':'i8'} # the reverse dictionary of the above _rec2fits = {}", "has the attribute .names while Column has .name), Each attribute in ColDefs is", "= self._convert[indx] else: continue # ASCII table, convert numbers to strings if self._coldefs._tbtype", "make sure the EXTEND keyword is in primary HDU if there is extension", "+ '%-80s' % (headstr + valstr) # do the comment string if self.comment", "cards like SIMPLE, BITPIX, etc. so the rest of the header can be", "x: getattr(x,'key'), self) def index_of(self, key, backward=0): \"\"\"Get the index of a keyword", "Do you mean: \"Profits\"? - Google Search, when asked for \"PyFITS\" \"\"\" import", "0) if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes =", "self._bscale, self.data) if self._bzero != 0: self.data += self._bzero # delete the keywords", "is integer else: _repeat = '' if repeat != 1: _repeat = `repeat`", "key, value): \"\"\"To make sure the new item has consistent data type to", "card. If =1, the card will be appended after the last non-blank card.", "self._mod = 1 def add_history(self, value, before=None, after=None): \"\"\"Add a HISTORY card. value:", "same name else: result = self.field(indx[0]).astype('f8') for i in indx[1:]: result += self.field(i)", "delayed data for i in range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i]", "== 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if", "in range(len(indx)): self.field(indx[i])[:] = value[i] else: raise ValueError, \"parameter value must be a", "the optional keyword 'header' is set to True, this function will return a", "(to be used for updating) comment: keyword comment (to be used for updating),", "None, an appropriate header will be created for the data object supplied. \"\"\"", "else: valStr = '%20s' % self._valuestring elif isinstance(self.value, Undefined): valStr = '' #", "# other than FITS, the close() call can also close the mm object.", "def getheader(filename, *ext, **extkeys): \"\"\"Get the header from an extension of a FITS", "name %s is not a string' % val self.__dict__['key'] = val def _setvalue(self,", "def _wrapx(input, output, nx): \"\"\"Wrap the X format column Boolean array into an", "default=''. \"\"\" if key != '' or value != '' or comment !=", "return _OnePointAxis(1, 0) else: raise IndexError, 'Index %s out of range.' % indx", "KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 if isinstance(ext1[0], str): if n_ext2 ==", "in keys: if 'extver' in keys: ext = ext2['extname'], ext2['extver'] else: ext =", "data: the new data used for updating The rest of the arguments are", "# the card image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card image", "_repeat+_fits2rec[dtype] elif dtype == 'X': nbytes = ((repeat-1) / 8) + 1 #", "data of all HDU's into memory.\"\"\" for i in range(len(self)): if self[i].data is", "[0] * len(self) for i in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i]", "to TZERO keyword disp: display format, corresponding to TDISP keyword start: column starting", "r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC + ')", "= input self._convert = [None]*self._nfields self.names = self._names def copy(self): r = rec.RecArray.copy(self)", "indx) elif naxis == 1: return _OnePointAxis(1, 0) else: raise IndexError, 'Index %s", "str): raise ValueError, 'Cards with CONTINUE must have string value.' if name ==", "will be inserted at the beginning of the file and the provided header", "if header is not None: # Make a \"copy\" (not just a view)", "HDU or corrupted HDU except ValueError: print 'Warning: Required keywords missing when trying", "!= None: _val = valu.group('bool')=='T' elif valu.group('strg') != None: _val = re.sub(\"''\", \"'\",", "File may have been truncated.' hdu._ffile = self return hdu def writeHDU(self, hdu):", "min) / (2.**8 - 1) else: _zero = (max + min) / 2.", "self._resize: oldName = self.__file.name oldMemmap = self.__file.memmap _name = _tmpName(oldName) _hduList = open(_name,", "loop), # fix it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self):", "raise ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify the keyword to be FITS standard.\"\"\"", "len(_keylist) - _indx - 1 return _indx except: raise KeyError, 'Keyword %s not", "= ColDefs(input, tbtype) # read the delayed data for i in range(len(tmp)): _arr", "card), append at the end. \"\"\" new_card = Card(key, value) if before !=", "setup HDU.\" # if the file is read the first time, no need", "of cards into a string.\"\"\" block = '' for card in self: block", "in range(len(self)): results = results + \"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1] print", "StreamingHDU object given a file name and a header. :Parameters: name : string", "empty # dictionary to enable file cacheing class _File: \"\"\"A file I/O class\"\"\"", "= int(mo.group(1)) else: extver = 1 return name, extver def _getsize(self, block): \"\"\"Get", "range(_min, _max): if j != _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) #", "cards = [None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format", "_Verify): \"\"\"HDU list class. This is the top-level FITS object. When a FITS", "None else: width = eval(width) except: raise ValueError, 'Illegal format `%s` for ASCII", "!= None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ') if real.group('sign') is", "/ comm_len + 1 comm_list = self._words_group(comm, comm_len) for i in comm_list: commstr", "keyword format: column format, corresponding to TFORM keyword unit: column unit, corresponding to", "exist (to avoid infinite loop), # fix it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage()", "(list, tuple)) and len(indx) == len(value): for i in range(len(indx)): self.field(indx[i])[:] = value[i]", "with an even number of # quotes to be precise. # # Note", "HDU class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct a primary HDU. data: the data", "for a FITS standard compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE =", "_cards = super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return result def __setitem__(self, key,", "= head.strip() else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the keyword value", "= self._raw if (len(blocks) % _blockLen) != 0: raise IOError, 'Header size is", "of \"XYZ\", then field('xyz'), field('Xyz'), etc. will get this field. \"\"\" if isinstance(key,", "dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy return self._convert[indx] (_str,", "CONTINUE cards in a later stage as CONTINUE cards may span across blocks.", "self.field(parName)[:] = value else: indx = self._unique[parName] if len(indx) == 1: self.field(indx[0])[:] =", "is optional. @keyword clobber: (optional) if True and if filename already exists, it", "0: # # This will not be the first extension in the file", "'TableHDU': _formats = '' _itemsize = 0 for i in range(len(tmp)): _formats +=", "= len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO',", "byte order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder != 'big': output = hdu.data.byteswapped() else:", "len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\" return self.columns def", "data used for appending @type header: L{Header} object or None @param header: the", "gets the decimal point.\"\"\" valueStr = \"%.16G\" % value if \".\" not in", "regex is not correct for all cases, but # it comes pretty darn", "def info(self, attrib='all'): \"\"\"Get attribute(s) information of the column definition.\"\"\" \"\"\"The attrib can", "# input is a FITS_rec tmp = hdu.columns = input._coldefs else: # input", "to scale. The option will be overwritten by any user specified bscale/bzero values.", "the renamed new file with \"update\" mode os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\",", "result += self.field(i) return result def setpar(self, parName, value): \"\"\"Set the group parameter", "is not None: dim = arr._shape[0] else: dim = 0 if dim >", "which the new card will be placed. default=None. \"\"\" if self.has_key(key): j =", "string @param key: keyword name @param ext: The rest of the arguments are", "file if exists, default = False. \"\"\" if (len(self) == 0): print \"There", "1, option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\" and val == 0\", 0, option,", "after the first card.' if not isinstance(_card.value, str): raise ValueError, 'Cards with CONTINUE", "verify the equal sign position if self.key not in Card._commentaryKeys and self._cardimage.find('=') !=", "fix = \"self.header['%s'] = %s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable))", "data = GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames = self.columns._pnames else: data = None", "so the total space will not increase (default). When useblanks == 0, the", "'update':'r+'} TRUE = True # deprecated FALSE = False # deprecated _INDENT =", "the END card mo = end_RE.search(block) if mo is None: hdu._raw += block", "def _scale_back(self): \"\"\"Update the parent array, using the (latest) scaled array.\"\"\" _dict =", "= 1 else: raise SyntaxError, \"%s is not a Card\" % str(card) def", "(verbose): print \"delete the original file\", oldName # reopen the renamed new file", "'' else: # the shape will be in the order of NAXIS's which", "the regular range.\"\"\" def _normalize(indx, npts): if indx < -npts: indx = 0", "_number = not(_bool or _str) bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale =", "in Card._commentaryKeys): raise ValueError, 'Regular and commentary keys can not be renamed to", "= _bytes + _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose:", "update the header self.update_header() self._bitpix = self.header['BITPIX'] # delete the keywords BSCALE and", "returns the output \"data\" array of data type dtype. The descriptor location will", "tuple)) and len(indx) == len(value): for i in range(len(indx)): self.field(indx[i])[:] = value[i] else:", "def __str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard def items(self):", "# # byteswap little endian arrays before writing # output = data.byteswapped() else:", "# The <strg> regex is not correct for all cases, but # it", "(unparsable value string).' raise ValueError, self._err_text + '\\n%s' % self._cardimage # verify the", "prepend a default PrimaryHDU to the file before writing the # given header.", "(_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] = last_end + 1", "_name = _name.strip().upper() if _name == _key: # if only specify extname, can", "__sub__(self, other): if not isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names,", "= [] _pnames = [] _pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i", "i.e. # input arrays can be just list or tuple, not required to", "val = _cols._recformats[i] if isinstance(val, _FormatX): val = `val._nx` + 'X' elif isinstance(val,", "return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def __init__(self, data=None, header=None, name=None):", "in _commonNames: value = eval(cname) # get the argument's value keyword = _keyNames[_commonNames.index(cname)]", "specified location. If no \"before\" or \"after\" is specified, it will be appended", "= +32768 self.header.update('BZERO', _zero) else: del self.header['BZERO'] if _scale != 1: self.data /=", "the HDUList must be an HDU.\" for item in hdu: if not isinstance(item,", "_imagStr = imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is not None: _imagStr = imag.group('sign')", "cards. This is a primitive implementation, it will put the value string in", "self.data] elif name == '_recformats': if self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt) for", "for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e. everything else) result", "(_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does not fit into the output's itemsize of", "list of all keywords from the CardList.\"\"\" return map(lambda x: getattr(x,'key'), self) def", "Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key) if _key == 'END': break def _readHDU(self):", "output: output Boolean array of shape (s, nx) nx: number of bits \"\"\"", "to. output_verify: output verification option, default='exception'. clobber: Overwrite the output file if exists,", "Note EXTNAMEs are not case sensitive By combination of EXTNAME and EXTVER, as", "*\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field>", "in Card._commentaryKeys and oldkey in Card._commentaryKeys): raise ValueError, 'Regular and commentary keys can", "= _get_index(self._coldefs.names, key) if (self._convert[indx] is None): # for X format if isinstance(self._coldefs._recformats[indx],", "Verify location and value of mandatory keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '==", "_convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def copy(self): \"\"\"Make a copy of the", "if '_valueModified' not in self.__dict__: self.__dict__['_valueModified'] = 0 elif name == 'comment': self.__dict__['comment']", "from the attributes: key, value, and comment, or from raw string. option: verification", "= self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\" if attr ==", "!= -1: raise VerifyError, '\\n'+x if (_option != \"silentfix\") and x: print 'Output", "the HDUList. The key can be an integer, a string, or a tuple", "to the new file @type header: L{Header} object or None @param header: the", "an HDU\" # make sure the EXTEND keyword is in primary HDU if", "Card.\"\"\" \"\"\"If pos = None, it can be anywhere. If the card does", "there is extension if len(self) > 1: self.update_extend() hduList = open(name, mode=\"append\") for", "reconstructe # the card image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card", "\"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all HDUs which", "provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY' # insert the keywords EXTEND", "nameList.index(key.rstrip()) except ValueError: # try to match case-insentively, _key = key.lower().rstrip() _list =", "'IMAGE' self.header._hdutype = ImageHDU # insert the require keywords PCOUNT and GCOUNT dim", "fixable=0) _err.append(_text) else: _result = self[i]._verify(option) if _result: _err.append(_result) return _err def append(self,", "span across blocks. \"\"\" if len(block) != _blockLen: raise IOError, 'Block length is", "parbscales: list of bscales for the parameters parbzeros: list of bzeros for the", "(_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data must be", "check the value only, no need to check key and comment for 'parse'", "True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\" def __init__(self, data=None,", "_shape, _format = (), '' else: # the shape will be in the", "tbtype == 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] =", "= bscale not in ['', None, 1] _zero = bzero not in ['',", "class Header: \"\"\"FITS header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a Header from a", "val_list = self._words_group(val, val_len) for i in range(len(val_list)): if i == 0: headstr", "_verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option) # Verify locations and values of mandatory", "in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option == 'fix': self.__dict__['_fix_text'] =", "image extension HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"Construct an image HDU.", "where expected\") if naxis == 0: datasize = 0 else: dims = [0]*naxis", "separated by comma(s). \"\"\" if attrib.strip().lower() in ['all', '']: list = _commonNames else:", "else: raise KeyError, 'Illegal key data type %s' % type(key) def copy(self): \"\"\"Make", "test, fix_value, option, errlist): \"\"\"Check the existence, location, and value of a required", "for all columns after this call. The final offset will be calculated when", "from numbers, otherwise # Python might evaluate them as octal values. _number_FSC_RE =", "longer than strlen and no word is cut into two pieces. But if", "is not a primary HDU.\" fix_text = 'Fixed by inserting one as 0th", "option, default = 'exception'. verbose: print out verbose messages? default = 0. \"\"\"", "start with CONTINUE and the whole card must have string value. \"\"\" def", "bytes) of the HDU's data part.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0)", "_key = key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key)", "first opened. This is to speed up the open. Any header will not", "elif name == 'spans': # make sure to consider the case that the", "= _get_index(self.names, col_name) for cname in _commonNames: attr = getattr(self, cname+'s') del attr[indx]", "comment string if self.comment is None: comm = '' else: comm = self.comment", "None: _shape, _format = (), '' _nrows = 0 else: _nrows = len(self.data)", "data type'), Card('NAXIS', 0, 'number of array dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS',", "header to setup HDU.\" # if the file is read the first time,", "block == '': break else: break hdu._raw += block _size, hdu.name = hdu._getsize(hdu._raw)", "elif n_ext1 == 2: if n_ext2 == 0: ext = ext1 else: raise", "> (Card.length-10): self.__class__ = _Card_with_continue output = self._breakup_strings() else: print 'card is too", "if dim == 0: dim = '' else: dim = str(dim) self.header.update('PCOUNT', 0,", "in another. Also, it does not break at the blank space between words.", "comment: keyword comment (to be used for updating), default=None. before: name of the", "and update modes only). output_verify: output verification option, default = 'exception'. verbose: print", "True else: if hdr['naxis'] == 0: hdr.update('extend', True, after='naxis') else: n = hdr['naxis']", "0 for i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset", "/ 8 return size def close(self): \"\"\" Close the 'physical' FITS file. :Parameters:", "each card into a list of cards. Will deal with CONTINUE cards in", "type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max, len(value)) class Column: \"\"\"Column class which", "attributes. All attributes except format can be optional. name: column name, corresponding to", "naxis) else: raise IndexError, 'Illegal slice %s, stop must be integer.' % input", "stream to overflow, an IOError exception is raised and the data is not", "header=header) # pass these attributes hdu._file = self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc =", "the data to the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush()", "sign. If there is no equal sign, return the string after column 8.", "`self._coldefs.names` + ')' return tmp # synchronize the sliced FITS_rec and its ._parent", "if _gethdr: return _data, _hdr else: return _data def getval(filename, key, *ext, **extkeys):", "_str = self._coldefs.formats[indx][0] == 'A' _bool = 0 # there is no boolean", "data type to numarray data type (code) _booltype = 'i1' _fits2rec = {'L':_booltype,", "= '%s%s' % (`input.shape[1:]`, _fmt) _formats += data_fmt gcount = input.shape[0] for i", "= self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return", "Card._commentaryKeys: # not using self.key eqStr = '' if self.__dict__.has_key('value'): valStr = str(self.value)", "case card-with-continue's value is shortened if not isinstance(self, _Hierarch): self.__class__ = Card else:", "ValueError, \"An element in the HDUList must be an HDU.\" for item in", "= hdulist[1] _data = hdu.data except IndexError: raise IndexError, 'No data in this", "Overwrite the output file if exists, default = False. \"\"\" if (len(self) ==", "raise AttributeError(attr) def par(self, parName): \"\"\"Get the group parameter values.\"\"\" if isinstance(parName, (int,", "and (indx.stop == naxis) and (indx.step == 1): return _WholeLine(naxis, 0) else: if", "format? recfmt = format format = _convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal format", "is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0])", "\"all\" which will print out all attributes. It forgives plurals and blanks. If", "80.\"\"\" _len = len(input) if _len == Card.length: return input elif _len >", "it must be of the syntax of \"> n\", # where n is", "Wipe out the old table definition keywords. Mark them first, # then delete", "= ext2['ext'] elif n_ext2 == 2 and 'extver' in keys: ext = ext2['ext'],", "long)): indx = int(key) elif isinstance(key, str): # try to find exact match", "def __setattr__(self, attr, value): \"\"\"Set an HDU attribute.\"\"\" if attr == 'name' and", "field, and field('ABC') will get the second field. If there is no exact", "= '' for i in range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n' return output[:-1]", "For 'ab+' mode, the pointer is at the end after the open in", "getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs are not case sensitive By combination of", "the Card after which the new card will be placed. default=None. \"\"\" if", "different columns called 'abc' and 'ABC' respectively. (b) When you *refer* to a", "card. value: History text to be added. before: [same as in update()] after:", "_arr += bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n <", "\"\"\" oldkey = oldkey.strip().upper() newkey = newkey.strip().upper() if newkey == 'CONTINUE': raise ValueError,", "the data to be written to the file. :Returns: None Notes ----- The", "= data elif attr == 'columns': _cols = [] _pnames = [] _pcount", "the modification attributes after updating for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod", "right after the last field elif tbtype == 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i])", "if = 1, will fill all cells with zeros or blanks if =", "hdu.header.ascard._mod = 0 except: pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class.", "for i in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i]", "if result is not None or self.key in Card._commentaryKeys: return result else: if", "is None: _name = '(No file associated with this HDUList)' else: _name =", "and val>= 0\", 1, option, _err) # verify each card for _card in", "before=None, after=None): \"\"\"Add a HISTORY card. value: History text to be added. before:", "table byteswap elif isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2", "tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale,", "_rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format in binary tables.\"\"\" pass class _FormatP(str): \"\"\"For", "Close the 'physical' FITS file. :Parameters: None :Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener):", "for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name in ['TDISP',", "the new card will have the fix_value as its value when created. Also", "not contiguous.' # the offset needs to multiply the length of all remaining", "to Float32 self.data = num.array(raw_data, type=num.Float32) else: # floating point cases if self._ffile.memmap:", "of the attributes listed in _commonNames. The default is \"all\" which will print", "i._byteorder = 'big' else: if coldata._type.bytes > 1: if coldata._byteorder != 'big': coldata.byteswap()", "GroupsHDU self.name = name if self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0,", "Boolean array. input: input Uint8 array of shape (s, nbytes) output: output Boolean", "#def change_format(self, col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\"", "synchronize the sliced FITS_rec and its ._parent def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self,", "if `_type` == 'UInt8': # UInt8 case _zero = min _scale = (max", "_python_mode[mode]) # For 'ab+' mode, the pointer is at the end after the", "if isinstance(indx, (int, long)): if indx >= 0 and indx < naxis: if", "# Add 1 to .ascard to include the END card _nch80 = reduce(operator.add,", "else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 0:", "begin with SIMPLE or XTENSION' for i in range(0, len(blocks), Card.length): _card =", "'exception' and x: raise VerifyError def _pad(input): \"\"\"Pad balnk space to the input", "return (_str, _bool, _number, _scale, _zero, bscale, bzero) def field(self, key): \"\"\"A view", "be a Header object\" if data is DELAYED: # this should never happen", "IOError, \"Supplied data will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError,", "#new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\" Get the table data", "def _getKeyString(self): \"\"\"Locate the equal sign in the card image and return the", "len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key) if _key ==", "mo = end_RE.search(block) if mo is None: hdu._raw += block block = self.__file.read(_blockLen)", "f def _clone(self, shape): \"\"\"Overload this to make mask array indexing work properly.\"\"\"", "nested structure with corresponding indentations. A tricky use of __str__, since normally __str__", "hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS record array is", "isinstance(input, num.NumArray): _formats = '' _cols = [] if pardata is None: npars", "ValueError(\"NAXIS not found where expected\") if naxis == 0: datasize = 0 else:", "will get this field. \"\"\" if isinstance(key, (int, long)): indx = int(key) elif", "if there is no match if (keyword in _keyNames): _list.append(i) for i in", "the comment (string), it is never fixable if result is not None: _str", "# for card image longer than 80, assume it contains CONTINUE card(s). elif", "to be one, i.e. # input arrays can be just list or tuple,", "the original (storage) array, # _convert is the scaled (physical) array. self._parent =", "dict[col]['array'] = Delayed(input, col) # now build the columns tmp = [Column(**attrs) for", "isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name,", "range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j == 0: continue _shape += (self.header['NAXIS'+`j+1`],) _format", "should be a record array self.__setstate__(input.__getstate__()) # _parent is the original (storage) array,", "bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale = bscale not in ['', None,", "if _step is None: _step = 1 elif isinstance(_step, (int, long)): if _step", "\"\"\"Find out how many blank cards are *directly* before the END card.\"\"\" for", "file with \"update\" mode os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file =", "data=data, header=header) self.header._hdutype = GroupsHDU self.name = name if self.header['NAXIS'] <= 0: self.header['NAXIS']", "self.__dict__['_err_text'] = 'Card image is not FITS standard (equal sign not at column", "pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype)", "'columns' attribute.\"\"\" if attr == 'data': size = self.size() if size: self._file.seek(self._datLoc) data", "of column/field definition common names and keyword names, make # sure to preserve", "card has invalid value '%s'.\" % (keywd, val) fix_text = \"Fixed by setting", "return getattr(self, name) def _setkey(self, val): \"\"\"Set the key attribute, surrogate for the", "(not just a view) of the input header, since it # may get", "parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo =", "isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata) > 0:", "val >= 0 and val <= 999\", 0, option, _err) naxis = self.header.get('NAXIS',", "not exist, the new card will have the fix_value as its value when", "Boolean array of shape (s, nx) nx: number of bits \"\"\" pow2 =", "card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch #", "KeyError, 'Attribute %s not defined.' % name self.__dict__[name] = attr return self.__dict__[name] \"\"\"", "new_name): \"\"\"Change a Column's name.\"\"\" if new_name != col_name and new_name in self.names:", "from a list of Cards. cards: A list of Cards, default=[]. \"\"\" list.__init__(self,", "column definition.\"\"\" \"\"\"The attrib can be one or more of the attributes listed", "given a file name and a header. :Parameters: name : string The name", "* (pcount + size) / 8 return size def copy(self): \"\"\"Make a copy", "if xtension == 'TABLE': self._hdutype = TableHDU elif xtension == 'IMAGE': self._hdutype =", "return input + ' ' * (Card.length-strlen) # minimum length is 80 else:", "operator import __builtin__ import urllib import tempfile import gzip import zipfile import numarray", "tuple, not required to be NDArray if format is not None: # check", "modified. the data is still a \"view\" (for now) hcopy = header.copy() hcopy._strip()", "int(mo.group(1)) else: raise ValueError(\"BITPIX not found where expected\") mo = re_gcount.search(block) if mo", "start=None, \\ dim=None, array=None): \"\"\"Construct a Column by specifying attributes. All attributes except", "val[8:].strip() self.__class__ = _Hierarch else: raise ValueError, 'keyword name %s is too long", "= zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist) != 1: raise \"Zip files with", "need in the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each piece of data:", "header) if not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber = keys.get('clobber', False)", "update the 'sci' extension >>> update(file, dat, 3) # update the 3rd extension", "of the current thread and determine if this is a single treaded application", "getdata('in.fits', 2) # the second extension >>> getdata('in.fits', ext=2) # the second extension", "header): if header is None: if isinstance(data, num.NumArray): hdu = ImageHDU(data) elif isinstance(data,", "vs. E-09) elif isinstance(self.value, float): if self._valueModified: valStr = '%20s' % _floatFormat(self.value) else:", "be an integer, a string, or a tuple of (string, integer). \"\"\" if", "None if len(ext) > 0: if isinstance(ext[0], Header): header = ext[0] ext =", "name=None): \"\"\"Construct an image HDU. data: the data in the HDU, default=None. header:", "coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): # only swap", "stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu class", "type is None: type = self.NumCode[self._bitpix] _type = getattr(num, type) # Determine how", "to include the END card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1)", "shape (s, nbytes) nx: number of bits \"\"\" output[...] = 0 # reset", "from the HDUList, indexed by number or name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key)", "is not None: _realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, '", "there are blank cards directly before END, it will use this space first,", "val) self.unit = unit def __str__(self, tab=0): \"\"\"Print out nested structure with corresponding", "j in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except: if (j == 0): _after", "Does not support theap yet. \"\"\" def __init__(self, name=None, format=None, unit=None, null=None, \\", "' ' * (Card.length-strlen) # minimum length is 80 else: strlen = _len", "or TableHDU) \"\"\" # construct a table HDU hdu = eval(tbtype)(header=header) if isinstance(input,", "not comm == '': nlines = len(comm) / comm_len + 1 comm_list =", "of Cards. cards: A list of Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards =", "= raw_data try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a", "not(_bool or _str) bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale = bscale not", "= self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale = bscale not in ['', None, 1]", "if self.value == '': valStr = \"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr =", "__radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list, tuple)):", "and not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else: self.name = name self.mode =", "'bzero', 'disp', 'start', 'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP',", "Convenience functions class _Zero(int): def __init__(self): self = 0 def _getext(filename, mode, *ext1,", "else: self.header = header else: # construct a list of cards of minimal", "try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT',", "axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape()) axes.reverse() elif self.data", "field if self._tbtype == 'TableHDU': last_end = 0 attr = [0] * len(self)", "array = _out # make a copy if scaled, so as not to", "is None: hdus = [] # can take one HDU, as well as", "is supplied, a minimal header is created @type filename: string @param filename: name", "in front of END. bottom: If =0 (default) the card will be appended", "+= block _size, hdu.name = hdu._getsize(hdu._raw) # get extname and extver if hdu.name", "'Extension %s is out of bound or not found.' % key self._resize =", "_val = re.sub(\"''\", \"'\", _card.value).rstrip() # drop the ending \"&\" if _val[-1] ==", "as a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus = [hdus]", "if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape())", "_val = re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') != None: # Check for numbers", "data block in file (None) \"\"\" # mappings between FITS and numarray typecodes", "the 3rd argument can be the header associated with the data. If the", "0 except: pass return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This class", "a minimal Header will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name = 'PRIMARY'", "\"\"\"Get the data from an extension of a FITS file (and optionally the", "range(1, len(self)): if str(self[-i]) != ' '*Card.length: self._blanks = i - 1 break", "\"Memory mapping is not implemented for mode `%s`.\" % mode else: if os.path.splitext(self.name)[1]", "'warn', 'exception']: raise ValueError, 'Option %s not recognized.' % option if (_option ==", "offset for all columns after this call. The final offset will be calculated", "the input header, since it # may get modified. the data is still", "tuple) containing arrays else: if isinstance(value, (list, tuple)) and len(indx) == len(value): for", "value from a header in a FITS file. @type filename: string @param filename:", "of a FITS file (and optionally the header). @type filename: string @param filename:", "del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del", "into repeat, data type, and option.\"\"\" try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except:", "1 _end = self.starts[i] + _width - 1 self.spans[i] = _end - last_end", "if not bottom: for i in range(nc-1, -1, -1): # locate last non-commentary", "_zero: _arr = tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if _scale: _arr *= bscale", "for i in range(len(tmp)): _formats += 'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i] hdu.data", "_str) bscale = self._coldefs.bscales[indx] bzero = self._coldefs.bzeros[indx] _scale = bscale not in ['',", "input FITS file name @type: string @param ext: The rest of the arguments", "class that provides the capability to stream data to a FITS file instead", "a FITS standard compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$' _keywd_FSC_RE = re.compile(_keywd_FSC)", "'%s' does not exist.\" % key else: # multiple match raise NameError, \"Ambiguous", "is not None: # check TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] =", "self._keylist if backward: _keylist = self._keylist[:] # make a copy _keylist.reverse() try: _indx", "# verify each card for _card in self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self,", "and rename the tmp to the original file if self._resize: oldName = self.__file.name", "self._check('parse') self._fixValue(result) if option == 'fix': self.__dict__['_fix_text'] = 'Fixed card to be FITS", "gzip files if mode in ['update', 'append']: raise \"Writing to gzipped fits files", "i in range(len(indx)): self.field(indx[i])[:] = value[i] else: raise ValueError, \"parameter value must be", "using the cards. try: header = Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) #", "in update()] after: [same as in update()] \"\"\" self._add_commentary('history', value, before=before, after=after) def", "= num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion", "TUNIT keyword null: null value, corresponding to TNULL keyword bscale: bscale value, corresponding", "= self._coldefs.starts[indx] - _loc[indx] if _lead < 0: raise ValueError, \"column `%s` starting", "the class tmp._hdutype = self._hdutype return tmp def _strip(self): \"\"\"Strip cards specific to", "of the arguments are for extension specification. See L{getdata} for explanations/examples. @rtype: L{Header}", "if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new) card image", "is 80 else: strlen = _len % Card.length return input + ' '", "tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a table parent data, just pass", "fmt return output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec to record format", "def __delitem__(self, key): \"\"\"Delete a Card from the CardList.\"\"\" _key = self.index_of(key) super(CardList,", "firstval, option, _err) self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid, 8, option, _err) self.req_cards('NAXIS',", "classes. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None, None, None", "The key can be an integer, a string, or a tuple of (string,", "r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>'", "card, fix it first with .verify('fix').\" if valu.group('bool') != None: _val = valu.group('bool')=='T'", "wrong order. if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION' firstval = self._xtn else: firstkey", "type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data by using BSCALE/BZERO. Call to this", "pass class Section: \"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu = hdu def __getitem__(self,", "not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype = ImageHDU #", "b + list(self.data) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self,", "self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close the associated FITS file and", "_floatFormat(self.value) else: valStr = '%20s' % self._valuestring elif isinstance(self.value, complex): if self._valueModified: _tmp", "% val self.__dict__['comment'] = val def __setattr__(self, name, val): if name == 'key':", "keyword %s already exists in header.' % newkey _index = self.ascard.index_of(oldkey) _comment =", "self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0) size", "for the HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND", "min _scale = (max - min) / (2.**8 - 1) else: _zero =", "store it in _convert self._convert[indx] = num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx], bscale, self._convert[indx])", "how many blank cards are *directly* before the END card self._blanks = 0", "except: try: # legit RecArray format? recfmt = format format = _convert_format(recfmt, reverse=1)", "+= \".0\" return valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading", "filename: string @param filename: input FITS file name @param ext: The rest of", "mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open a FITS file and return an HDUList", "= _cols._recformats[i] if isinstance(val, _FormatX): val = `val._nx` + 'X' elif isinstance(val, _FormatP):", "is not None: _comm = valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def", "= input.count(' ') nmax = max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1) #", "to avoid circular reference of _pcount # pass the attributes for attr in", "if hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update data in", "file cacheing class _File: \"\"\"A file I/O class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0):", "_data is None: raise IndexError, 'No data in this HDU.' if _gethdr: _hdr", "first field, and field('ABC') will get the second field. If there is no", "cards of minimal header _list = CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8, 'array", "+ _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size: print 'Warning:", "'' # conserve space for HIERARCH cards if isinstance(self, _Hierarch): valStr = valStr.strip()", "return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self,", "if _zero: dummy -= bzero if _scale: dummy /= bscale elif self._coldefs._tbtype ==", "% name # make sure the EXTEND keyword is there if there is", "or blanks if = 0, copy the data from input, undefined cells will", "minimal header if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image extension') else: c0", "is the # reverse of the numarray shape if isinstance(self, GroupsHDU): _shape =", "= (_nch80+1) * Card.length _bytes = _bytes + _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc):", "can be displayed in a # preferred order. _commonNames = ['name', 'format', 'unit',", "class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None): \"\"\"data: data of", ":Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with urlretrieve to", "of a Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value def change_name(self,", "= os.path.dirname(input) if dirName != '': dirName += '/' _name = dirName +", "unit='Card') isValid = \"val in [8, 16, 32, 64, -32, -64]\" # Verify", "'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype if isinstance(input, ColDefs): self.data = [col.copy() for", "the decimal point.\"\"\" valueStr = \"%.16G\" % value if \".\" not in valueStr", "indx = nameList.index(key.rstrip()) except ValueError: # try to match case-insentively, _key = key.lower().rstrip()", "self._mod = 1 def copy(self): \"\"\"Make a copy of the Header.\"\"\" tmp =", "super(HDUList, self).append(hdu) hdu._new = 1 self._resize = 1 else: raise \"HDUList can only", "name in ['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return self def", "both the location and the size of the data area return loc, _size+_padLength(_size)", "the keywod EXTNAME, default=None. \"\"\" # no need to run _ExtensionHDU.__init__ since it", "force to have duplicate name. \"\"\" oldkey = oldkey.strip().upper() newkey = newkey.strip().upper() if", "given data to the stream. :Parameters: data : NumArray Data to stream to", "So it may not look pretty. \"\"\" val_len = 67 comm_len = 64", "the header keywords to agree with the data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if", "original BSCALE and BZERO values when the data was read/created. If \"minmax\", use", "HDUList must be an HDU.\" for item in hdu: if not isinstance(item, _AllHDU):", "name to be populated in EXTNAME keyword \"\"\" if header is not None:", "step must be integer.' % input return slice(_start, _stop, _step) class _KeyType: def", "1 if _keyList[_start:].count('CONTINUE') == 0: break # construct the Header object, using the", "_verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option) # Verify location and value of mandatory", "= _out # make a copy if scaled, so as not to corrupt", "maketrans('dD', 'eE') class Card(_Verify): # string length of a card length = 80", "indentations. A tricky use of __str__, since normally __str__ has only one argument.", "cards of the same name (except blank card). If there is no card", "extension') else: c0 = Card('SIMPLE', True, 'conforms to FITS standard') _list = CardList([", "opened, a HDUList object is returned. \"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct a", "data was read/created. If \"minmax\", use the minimum and maximum of the data", "the NASA/Science Office of Standards and Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For", "in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del self['PCOUNT'] del self['GCOUNT']", "isinstance(key, tuple): key = (key,) naxis = self.hdu.header['NAXIS'] if naxis < len(key): raise", "to be precise. # # Note that a non-greedy match is done for", "it. card: The Card to be inserted. useblanks: Use any *extra* blank cards?", "_CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\" This class is used when one or", "of Columns, an (table) HDU tbtype: which table HDU, 'BinTableHDU' (default) or 'TableHDU'", "= pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars] = input", "self._blanks = i - 1 break def append(self, card, useblanks=1, bottom=0): \"\"\"Append a", "to search # to avoid starting at the same CONTINUE card else: _start", "y: x-y, self.starts[1:], [1]+self.starts[1:-1]) dummy.append(self._width-self.starts[-1]+1) attr = map(lambda y: 'a'+`y`, dummy) elif name", "imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is not None: _imagStr = imag.group('sign') + _imagStr", "if self[i].data is not None: continue def update_tbhdu(self): \"\"\"Update all table HDU's for", "\"\"\" if isinstance(key, (int, long)): indx = int(key) elif isinstance(key, str): # try", "to zipped fits files is not supported\" zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist()", "# string value should occupies at least 8 columns, unless it is #", "0 hdu._new = 0 hdu._file = ffo.getfile() # if not resized, update in", "size (in bytes) of the HDU's data part.\"\"\" self._file.seek(0, 2) return self._file.tell() -", "= {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self,", "needed for __add__ def __add__(self, other, option='left'): if isinstance(other, Column): b = [other]", "else: indx = self._unique[parName.lower()] if len(indx) == 1: result = self.field(indx[0]) # if", "make sure the content is written self.__file.flush() return loc def writeHDUdata(self, hdu): \"\"\"Write", "= {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if the string =", "and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def _verify(self, option='warn'): \"\"\"ImageHDU verify", "\"\"\"Make a (deep)copy of the CardList.\"\"\" cards = [None]*len(self) for i in range(len(self)):", "the new data used for updating The rest of the arguments are flexible:", "= '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in [None, '']: commentStr =", "minimal header _list = CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8, 'array data type'),", "def __delitem__(self, key): \"\"\"Delete card(s) with the name 'key'.\"\"\" # delete ALL cards", "hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names,", "(FITS) files. This file format was endorsed by the International Astronomical Union in", "[0]*naxis for i in range(naxis): mo = re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1]", "array into an UInt8 array. input: input Boolean array of shape (s, nx)", "to TSCAL keyword bzero: bzero value, corresponding to TZERO keyword disp: display format,", "the provided header will be added as the first extension. If the file", "__iter__(self): return [self[i] for i in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an HDU", "is not an extension HDU.\" % `i` _text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text)", "ext=('sci',1), extname='err', extver=2) @return: an array, record array (i.e. table), or groups data", "The option will be overwritten by any user specified bscale/bzero values. bscale/bzero: user", "except: raise ValueError, \"Inconsistent input data array: %s\" % array array._dtype = recfmt._dtype", "# preferred order. _commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start',", "It will pad the string if it is not the length of a", "reverse the order of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes = naxis*[0] for j", "it # may get modified. the data is still a \"view\" (for now)", "raise TypeError, \"table data has incorrect type\" # set extension name if not", "= cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype = TableHDU elif xtension == 'IMAGE':", "bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0) size =", "chararray.array(value, itemsize=1) else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max,", "communicate back to the _keylist. self._checkKey(self.key) # verify the value, it may be", "\"\"\"Make sure if the primary header needs the keyword EXTEND or if it", "Read the first header block. block = self.__file.read(_blockLen) if block == '': raise", "the size (in bytes) of the data portion of the HDU. :Parameters: None", "is not None: if not isinstance(header, Header): raise ValueError, \"header must be a", "twice, first time print out all top level messages for item in self:", "of the data bzero: BZERO of the data parbscales: list of bscales for", "_card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key == 'END': break else: _cardList.append(_card)", "and val <= 999\", 0, option, _err) tfields = self.header['TFIELDS'] for i in", "structure of the HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc = self.__file.tell() # Read", "set extension name if not name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name =", "class _Verify: \"\"\"Shared methods for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix =", "name cannot be reset.' elif name == 'value': self._setvalue(val) elif name == 'comment':", "the proper value. \"\"\" hdr = self[0].header if hdr.has_key('extend'): if (hdr['extend'] == False):", "self.__file.tell() # beginning of the data area # data area size, including padding", "value): \"\"\"Set a header keyword value.\"\"\" self.ascard[key].value = value self._mod = 1 def", "'&' / \" + commfmt % i output = output + '%-80s' %", "and val == 0\", 0, option, _err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random", "try not to use CONTINUE if the string value can fit in one", "OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "-32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None,", "FITS_rec tmp = hdu.columns = input._coldefs else: # input is a list of", "HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc = self.__file.tell() # Read the first header", "num.where(array==0, ord('F'), ord('T'), _out) array = _out # make a copy if scaled,", "swap unswapped # deal with var length table if isinstance(coldata, _VLF): for i", "self._convert = [None]*self._nfields self.names = self._names def copy(self): r = rec.RecArray.copy(self) r.__class__ =", "' / ' + self.comment else: commentStr = '' # equal sign string", "isinstance(_start, (int, long)): _start = _normalize(_start, naxis) else: raise IndexError, 'Illegal slice %s,", "A module for reading and writing FITS files and manipulating their contents. A", "IndexError, 'Subsection data must be contiguous.' for j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`]", "'0': dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension", "shape (s, nx) output: output Uint8 array of shape (s, nbytes) nx: number", "n_ext2 == 0: ext = _Zero() elif 'ext' in keys: if n_ext2 ==", "newkey.strip().upper() if newkey == 'CONTINUE': raise ValueError, 'Can not rename to CONTINUE' if", "not case sensitive By combination of EXTNAME and EXTVER, as separate arguments or", "the ending \"&\" if _val[-1] == '&': _val = _val[:-1] longstring = longstring", "\"Illegal format `%s`.\" % format self.format = format # does not include Object", "self._mod = 1 else: raise SyntaxError, \"%s is not a Card\" % str(card)", "self.header['BITPIX'] # delete the keywords BSCALE and BZERO del self.header['BSCALE'] del self.header['BZERO'] def", "dirName = os.path.dirname(input) if dirName != '': dirName += '/' _name = dirName", "if _trail < 0: raise ValueError, \"column `%s` ending point overlaps to the", "self.name = name self.mode = mode self.memmap = memmap if memmap and mode", "def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from the card image.\"\"\"", "keyword comment (to be used for updating), default=None. before: name of the keyword,", "\"minmax\", use the minimum and maximum of the data to scale. The option", "the file is written. input: input object array desp_output: output \"descriptor\" array of", "def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute the verification with", "comm = '' else: comm = self.comment commfmt = \"%-s\" if not comm", "== None: _val = eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt", "is not None: # Make a \"copy\" (not just a view) of the", "\"\"\"Rename a card's keyword in the header. oldkey: old keyword, can be a", "[] _pcount = self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale =", "= 1 def count_blanks(self): \"\"\"Find out how many blank cards are *directly* before", "``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "= _ErrList([], unit='Card') isValid = \"val in [8, 16, 32, 64, -32, -64]\"", "else: _str = self._coldefs.formats[indx][0] == 'A' _bool = 0 # there is no", "verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val == 2', 2, option,", "its value is too long.\" % self.key if len(output) <= Card.length: output =", "created and it will be placed before or after the specified location. If", "not an HDU.\" % hdu try: super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise IndexError,", "del self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS']", "return an HDUList object. name: Name of the FITS file to be opened.", "(repeat, dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec to record format", "of the table header: header to be used for the HDU name: the", "fitsopen = open # Convenience functions class _Zero(int): def __init__(self): self = 0", "the pointer is at the end after the open in # Linux, but", "the column right after the last field if self._tbtype == 'TableHDU': last_end =", "class ImageHDU(_ExtensionHDU, _ImageBaseHDU): \"\"\"FITS image extension HDU class.\"\"\" def __init__(self, data=None, header=None, name=None):", "Core code for ascardimage. \"\"\" # keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if", "_where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break # combine contiguous", "naxis) else: raise IndexError, 'Illegal slice %s, start must be integer.' % input", "break else: _cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE cards # if a long", "= name[:-1] if cname in _commonNames: attr = [''] * len(self) for i", "sign in the card image before column 10 and return its location. It", "in ['update', 'append']: raise \"Writing to zipped fits files is not supported\" zfile", "'Illegal slice %s, start must be integer.' % input _stop = input.stop if", "raise ValueError, self._err_text + '\\n%s' % self._cardimage # verify the comment (string), it", "array.copy() if bzero not in ['', None, 0]: array += -bzero if bscale", "self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx] else: continue # ASCII table, convert numbers", "isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if", "*(?P<imag>' + _numr_NFSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$')", "# self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if", "self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try:", "nbytes) output: output Boolean array of shape (s, nx) nx: number of bits", "raise an exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an array, record", "array dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if isinstance(self, (_ExtensionHDU,", "in Card._commentaryKeys: if not isinstance(self.value, str): raise ValueError, 'Value in a commentary card", "FITS_rec was created in a LittleEndian machine hdu.data._byteorder = 'big' hdu.data._parent._byteorder = 'big'", "When useblanks == 0, the card will be appended at the end, even", "FITS standard # appears vague on this issue and only states that a", "that the following conditions are met: 1. Redistributions of source code must retain", "else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] = dummy del dummy #", "output verification option, default='exception'. clobber: Overwrite the output file if exists, default =", "scale the data: if \"old\", use the original BSCALE and BZERO values when", "of the HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc = self.__file.tell() # Read the", "after=None): \"\"\"Add a blank card. value: Text to be added. before: [same as", "== 'BinTableHDU': _str = 'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype else:", "FITS block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes _memmap_mode", "to FITS standard. key: keyword name, default=''. value: keyword value, default=''. comment: comment,", "not in Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks()", "HDU from the HDUList. The key can be an integer, a string, or", "\"\"\"Set the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if val is not None:", "commentStr = '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in [None, '']: commentStr", "tmp = tmp[:loc+7] + `self._coldefs.names` + ')' return tmp # synchronize the sliced", "(len(hduList)+1) break # initialize/reset attributes to be used in \"update/append\" mode # CardList", "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.", "corresponding to TFORM keyword unit: column unit, corresponding to TUNIT keyword null: null", "in the example in (a), field('abc') will get the first field, and field('ABC')", "if self.__file is None: _name = '(No file associated with this HDUList)' else:", "location will have a zero offset for all columns after this call. The", "else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment", "the same name (except blank card). If there is no card (or blank", "\\ bscale=None, bzero=None, disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct a Column by specifying", "number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize = 1 def _verify (self, option='warn'): _text", "header._hdutype(data=DELAYED, header=header) # pass these attributes hdu._file = self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc", "name): \"\"\"Exatrct the keyword value or comment from the card image.\"\"\" # for", "pass # TFORM regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular", "HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return [self[i] for i in range(len(self))].__iter__()", "isinstance(key, (int, long)): indx = int(key) elif isinstance(key, str): # try to find", "the group parameter values.\"\"\" if isinstance(parName, (int, long)): result = self.field(parName) else: indx", "= 0 hdu.header.ascard._mod = 0 hdu._new = 0 hdu._file = ffo.getfile() # if", "than FITS, the close() call can also close the mm object. try: self.mmobject.close()", "the header associated with 'data', if None, an appropriate header will be created", "the X format column into a Boolean array. input: input Uint8 array of", "'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE =", "range(_nfields): dict[col]['array'] = Delayed(input, col) # now build the columns tmp = [Column(**attrs)", "of the above _rec2fits = {} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str):", "tuple of image dimensions, reverse the order of NAXIS.\"\"\" naxis = self.header['NAXIS'] axes", "def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val", "64, -32, or -64) pardata: parameter data, as a list of (numeric) arrays.", "of the data area return loc, _size+_padLength(_size) def close(self): \"\"\"Close the 'physical' FITS", "associated with this HDUList)' else: _name = self.__file.name results = \"Filename: %s\\nNo. Name", "(AURA) Redistribution and use in source and binary forms, with or without modification,", "that the starting column of # a field may not be the column", "in ['>=', '==']: insert_pos = eval(_parse[1]) # if the card does not exist", "image is not FITS standard (unparsable value string).' raise ValueError, self._err_text + '\\n%s'", "a match if the comment separator is found, though the # comment maybe", "name. If given an index, always returns 0. \"\"\" try: key = key.strip().upper()", "provided header is not a Primary header, a default Primary HDU will be", "EXTVER, as separate arguments or as a tuple: >>> getdata('in.fits', 'sci', 2) #", "the first card.' if not isinstance(_card.value, str): raise ValueError, 'Cards with CONTINUE must", "_repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype == 'X': nbytes = ((repeat-1)", "data=None, header=None, name=None): \"\"\"data: data of the table header: header to be used", "self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale:", "may also contain the binary data(*). (*) In future it may be possible", "disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this", "i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i])", "=\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)')", "= _Hierarch # for card image longer than 80, assume it contains CONTINUE", "# further conversion for both ASCII and binary tables if _number and (_scale", "also be keyword arguments. For example: >>> update(file, dat, hdr, 'sci') # update", "one 80-char \"physical\" cards, the cards after the first one must start with", "data in this HDU.' if _data is None: raise IndexError, 'No data in", "of the arguments are for extension specification. They are flexible and are best", "dims = self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type`", "location, and value of a required Card.\"\"\" \"\"\"If pos = None, it can", "= _fmt % dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does", "after, useblanks=1): \"\"\"Insert a Card to the location specified by before or after.", "of (string, integer). \"\"\" if isinstance(key, (int, slice)): return key elif isinstance(key, tuple):", "'val == 0', 0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields):", "after the first WholeLine must be WholeLine or # OnePointAxis if isinstance(indx, (_WholeLine,", "0 xoffset = 0 for i in range(nmax): try: loc = num.nonzero(blank_loc >=", "if the value is different from the old one if str(self[_key]) != str(value):", "result else: if option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option", "_bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes) if _bytes != hdu._datSpan: self._resize", "def _padLength(stringLen): \"\"\"Bytes needed to pad the input stringLen to the next FITS", "'%20s' % _tmp else: valStr = '%20s' % self._valuestring elif isinstance(self.value, Undefined): valStr", "not an HDU.\" % hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return [self[i] for i", "before=None, after=None): \"\"\"Update one header card.\"\"\" \"\"\" If the keyword already exists, it's", "tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None,", "offset): self.npts = npts self.offset = offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass", "type\" # set extension name if not name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME']", "0 # go through the list twice, first time print out all top", "if os.path.exists(name): if clobber: print \"Overwrite existing file '%s'.\" % name os.remove(name) else:", "= self[i]._verify(option) if _result: _err.append(_result) return _err def append(self, hdu): \"\"\"Append a new", "file. Default is False. \"\"\" if header is None: if 'header' in keys:", "bottom=1) else: try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod", "oldkey in Card._commentaryKeys: if not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys): raise", "commstr return output def _words_group(self, input, strlen): \"\"\"Split a long string into parts", "Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod =", "*)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>.*)' r')?$') # keys of commentary cards _commentaryKeys =", "setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append one Column to", "Strip cards like SIMPLE, BITPIX, etc. so the rest of the header can", "self._setcomment(comment) # for commentary cards, value can only be strings and there #", "or index of the Card before which the new card will be placed.", "recfmt._dtype else: raise ValueError, \"Data is inconsistent with the format `%s`.\" % format", "nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx) # further conversion for", "contain both group parameter info and the data. The rest of the arguments", "None, 1] _zero = bzero not in ['', None, 0] # ensure bscale/bzero", "= re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo is not None: bitpix = int(mo.group(1))", "fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the file is", "there is something if _dummy.strip(): if self.unit: result += _INDENT*tab+\"%s %s:\\n\" % (self.unit,", "(optionally) comment. Any specifed arguments, except defaults, must be compliant to FITS standard.", "in range(naxis): mo = re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize", "= 0 return (_str, _bool, _number, _scale, _zero, bscale, bzero) def field(self, key):", "not eval(test_pos): err_text = \"'%s' card at the wrong place (card %d).\" %", "header is not a Primary header, a default Primary HDU will be inserted", "check TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT,", "break # check in the case there is extra space after the last", "_format = (), '' else: # the shape will be in the order", "ColDefs(_cols) self.parnames = [i.lower() for i in parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount,", "after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if", "calculate the starting point and width of each field for ASCII table if", "'_unique': _unique = {} for i in range(len(self.parnames)): _name = self.parnames[i] if _name", "IOError exception is raised and the data is not written. Once sufficient data", "the table header: header to be used for the HDU name: the EXTNAME", "None: _stop = naxis elif isinstance(_stop, (int, long)): _stop = _normalize(_stop, naxis) else:", "# result is not allowed to expand (as C/Python does). for i in", "ASCII table cell with value = TNULL # this can be reset by", "_dims = \"%dR x %dC\" % (_nrows, _ncols) return \"%-10s %-11s %5d %-12s", "raise \"Element %d in the ColDefs input is not a Column.\" % input.index(col)", "If header=None, a minimal Header will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data, header=header) self.name", "self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with urlretrieve to allow IOError exceptions", "Card(key, value) if before != None or after != None: self.ascard._pos_insert(new_card, before=before, after=after)", "string. force: if new key name already exist, force to have duplicate name.", "name.\"\"\" _key = self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self, start, end): _cards =", "= self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims) else: raw_data = num.fromfile(self._file, type=code, shape=dims)", "\"%s is not an HDU.\" % hdu try: super(HDUList, self).__setitem__(_key, hdu) except IndexError:", "= (_key.strip()).upper() nfound = 0 for j in range(len(self)): _name = self[j].name if", "keyboardInterruptSent = True # Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append',", "writeto(filename, data, header) else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header)", "dict] self.data = tmp else: raise TypeError, \"input to ColDefs must be a", "if keylist is None: self._keylist = [k.upper() for k in self.keys()] else: self._keylist", "the data, so it's defined (in the case of reading from a #", "is not None: _str = result.group('comm') if _str is not None: self._checkText(_str) def", "== 'MainThread') if singleThread: # Define new signal interput handler keyboardInterruptSent = False", "isinstance(value, chararray.CharArray) and value.itemsize() == 1: pass elif self._dtype == 'a': value =", "self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr == 'data': self.__dict__[attr] = self.field('data') elif attr", "data to a FITS file instead of requiring data to all be written", "= output def _checkText(self, val): \"\"\"Verify val to be printable ASCII text.\"\"\" if", "_Card_with_continue(Card): \"\"\"Cards having more than one 80-char \"physical\" cards, the cards after the", "'f8' else: raise ValueError, \"Illegal format %s\" % fmt else: if dtype ==", "fieldName): \"\"\"Get the group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set", "the correct location before calling this method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return", "%-11s\" % (self.name, \"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for", "('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype = _ExtensionHDU else: self._hdutype = _ValidHDU", "and width. try: (dtype, width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width ==", "= list(self.data) + b else: tmp = b + list(self.data) return ColDefs(tmp) def", "across blocks. \"\"\" if len(block) != _blockLen: raise IOError, 'Block length is not", "length field object.\"\"\" def __init__(self, input): \"\"\" input: a sequence of variable-sized elements.", "# delete extra NAXISi's for j in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except", "self.data.field(i) VLdata._max = max(map(len, VLdata)) val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)'", "if self._resize: oldName = self.__file.name oldMemmap = self.__file.memmap _name = _tmpName(oldName) _hduList =", "will return a (data, header) tuple. \"\"\" if 'header' in extkeys: _gethdr =", "with \"update\" mode os.rename(_name, oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo", "In case the FITS_rec was created in a LittleEndian machine hdu.data._byteorder = 'big'", "clobber: print \"Overwrite existing file '%s'.\" % name os.remove(name) else: raise IOError, \"File", "super(HDUList, self).__delitem__(key) self._resize = 1 def __delslice__(self, i, j): \"\"\"Delete a slice of", "the new item has consistent data type to avoid misalignment. \"\"\" if isinstance(value,", "a card image (80 columns). If the card image is longer than 80,", "= blank_loc[loc-1] + 1 if loc == 0: offset = -1 except: offset", "the stored data hdu.data._parent.field(i)[n:] = -bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu", "(ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data object. Allows structured access to FITS Group", "naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1)", "arguments are used only for the first case. bitpix: data type as expressed", "option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None,", "written\" curDataSize = self._ffo.getfile().tell() - self._datLoc if curDataSize + data.itemsize()*data._size > self._size: raise", "materials provided with the distribution. 3. The name of AURA and its representatives", "than one group parameter have the same name else: result = self.field(indx[0]).astype('f8') for", "if a long string has CONTINUE cards, the \"Card\" is considered # to", "If \"minmax\", use the minimum and maximum of the data to scale. The", "return self.writeComplete def size(self): \"\"\" Return the size (in bytes) of the data", "0, copy the data from input, undefined cells will still be filled with", "in ['TFORM', 'TSCAL', 'TZERO', 'TNULL', 'TTYPE', 'TUNIT']: for i in range(_tfields): del self[name+`i+1`]", "(indx.step == 1): return _WholeLine(naxis, 0) else: if indx.step == 1: return _LineSlice(indx.stop-indx.start,", "one Column to the column definition.\"\"\" return self+column def del_col(self, col_name): \"\"\"Delete (the", "# flush, to make sure the content is written self.__file.flush() # return both", "open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True # deprecated FALSE", "self.header.ascard[0].comment = 'ASCII table extension' ''' def format(self): strfmt, strlen = '', 0", "def getdata(filename, *ext, **extkeys): \"\"\"Get the data from an extension of a FITS", "'%-80s' % commstr return output def _words_group(self, input, strlen): \"\"\"Split a long string", "ValueError, \"header must be a Header object\" if data is DELAYED: # this", "HDUList(file=ffo) # read all HDU's while 1: try: hduList.append(ffo._readHDU()) except EOFError: break #", "The valu group will return a match if a FITS string, boolean, #", "touched yet, use header info. else: _shape = () _nrows = self.header['NAXIS2'] _ncols", "= 1 hdu._file = self.__file hdu._hdrLoc = _hdrLoc # beginning of the header", "list of cards into a string.\"\"\" block = '' for card in self:", "val = val.strip() if len(val) <= 8: val = val.upper() if val ==", "FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS record array is the data part of", "0: # scale integers to Float32 self.data = num.array(raw_data, type=num.Float32) else: # floating", "numr.group('sign')+_valStr elif input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ')", "word is cut into two pieces. But if there is one single word", "= 7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'): \"\"\"Verify the card image with the", "the end. \"\"\" if isinstance(key, (int, long)): return key elif isinstance(key, str): _key", "each card for _card in self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self, keywd, pos,", "raise ValueError(\"NAXIS not found where expected\") if naxis == 0: datasize = 0", "_card.key if _key == 'END': break else: _cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE", "CONTINUE cards with its parent card if nc > 0: _longstring = _cardList[_where-1]._cardimage", "_update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount') # Wipe out the old table definition", "name will not be allowed) to insert. The new card will be inserted", "isinstance(pos, str): _parse = pos.split() if _parse[0] in ['>=', '==']: insert_pos = eval(_parse[1])", "the specified option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1", "will cause an exception since there is no unique mapping. If there is", "field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str = 'a' in self._coldefs.formats[indx] _bool =", "in range(_ncols): _format += self.header['TFORM'+`j+1`] + ', ' _format = _format[:-2] + ']'", "return _err def append(self, hdu): \"\"\"Append a new HDU to the HDUList.\"\"\" if", "option is integer else: _repeat = '' if repeat != 1: _repeat =", "(hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete the", "This argument is optional. @keyword clobber: (optional) if True and if filename already", "update the 3rd extension >>> update(file, dat, 'sci', 2) # update the 2nd", "ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr = input.header _nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1']", "class to use with urlretrieve to allow IOError exceptions to be raised when", "namelist = zfile.namelist() if len(namelist) != 1: raise \"Zip files with multiple members", "option, default=silentfix. \"\"\" # Only if the card image already exist (to avoid", "FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE", "than 80, assume it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper()", "= key.strip().upper() if key[:8] == 'HIERARCH': key = key[8:].strip() _index = self.ascard._keylist.index(key) return", "conserve space for HIERARCH cards if isinstance(self, _Hierarch): valStr = valStr.strip() # comment", "= tmp._arrays[i][:n] if n < nrows: if tbtype == 'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray):", "'M':'c16', 'K':'i8'} # the reverse dictionary of the above _rec2fits = {} for", "= data._nfields self.data = data self.columns = data._coldefs self.update() elif data is None:", "slice(_start, _stop, _step) class _KeyType: def __init__(self, npts, offset): self.npts = npts self.offset", "heap of variable length array columns # this has to be done after", "header associated with the data. If the 3rd argument is not a header,", "= self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result = self[i]._verify(option) if _result: _err.append(_result) return", "= 0 def _getext(filename, mode, *ext1, **ext2): \"\"\"Open the input file, return the", "mode='copyonwrite', memmap=0): if mode not in _python_mode.keys(): raise \"Mode '%s' not recognized\" %", "self._coldefs._tbtype == 'BinTableHDU': _str = 'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype", "self.header. This method should only be used right before writing to the output", "val, unit=\"Element\"): list.__init__(self, val) self.unit = unit def __str__(self, tab=0): \"\"\"Print out nested", "(i.e. table), or groups data object depending on the type of the extension", "in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary", "output = '' # do the value string valfmt = \"'%-s&'\" val =", "of the file to which the header and data will be streamed. header", "') if imag.group('sign') == None: _val += eval(_idigt)*1j else: _val += eval(imag.group('sign') +", "def __init__(self, name, header): \"\"\" Construct a StreamingHDU object given a file name", "for fmt in self.formats] elif self._tbtype == 'TableHDU': self._Formats = self.formats if len(self)", "keyword is there if there is extension if len(self) > 1: self.update_extend() hduList", "two single quotes, # whereas it should not end with an even number", "comment, default=''. \"\"\" if key != '' or value != '' or comment", "into CONTINUE cards. This is a primitive implementation, it will put the value", "----- Only the amount of data specified in the header provided to the", "= '(No file associated with this HDUList)' else: _name = self.__file.name results =", "range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option, _err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table", "elif attr == '_theap': self.__dict__[attr] = 0 try: return self.__dict__[attr] except KeyError: raise", "fixable: option = 'unfixable' if option in ['warn', 'exception']: #raise VerifyError, _text #elif", "no longer be written\" curDataSize = self._ffo.getfile().tell() - self._datLoc if curDataSize + data.itemsize()*data._size", "else: self.__file = __builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode, the pointer is at", "= [] for _card in self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value) return output", "is not None # if pos is a string, it must be of", "used for updating The rest of the arguments are flexible: the 3rd argument", "= \"HDUList's 0th element is not a primary HDU.\" fix_text = 'Fixed by", "= re_extver.search(self._raw) if mo: extver = int(mo.group(1)) else: extver = 1 return name,", "there if there is extension if len(self) > 1: self.update_extend() hduList = open(name,", "column, e.g. ttype, tform, etc. and the array. Does not support theap yet.", "header is None: if isinstance(data, num.NumArray): hdu = ImageHDU(data) elif isinstance(data, FITS_rec): hdu", "else: size = len(tmp._arrays[i]) n = min(size, nrows) if fill: n = 0", "**extkeys): \"\"\"Get the data from an extension of a FITS file (and optionally", "for __add__ def __add__(self, other, option='left'): if isinstance(other, Column): b = [other] elif", "data to scale. The option will be overwritten by any user specified bscale/bzero", "# drop the ending \"&\" if _val[-1] == '&': _val = _val[:-1] longstring", "else: if tbtype == 'TableHDU': # string no need to convert, if isinstance(tmp._arrays[i],", "self._convert[indx] = dummy return self._convert[indx] if _str: return self._parent.field(indx) # ASCII table, convert", "ValueError: print 'Warning: Required keywords missing when trying to read HDU #%d.\\n There", "= hdu.data output.tofile(self.__file) _size = output.nelements() * output._itemsize # write out the heap", "= ext[1:] elif not isinstance(ext[0], (int, long, str, tuple)): raise KeyError, 'Input argument", "null string elif isinstance(self.value, str): if self.value == '': valStr = \"''\" else:", "= mo.end(0) else: raise ValueError(\"NAXIS not found where expected\") if naxis == 0:", "f = open(filename) f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C) 2004 Association", "scaled too if recfmt == _booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'),", "return the default value. key: keyword name or index default: if no keyword", "hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file) _shift = self.__file.tell() - _where hdu.data._heapsize =", "non-string types # Boolean is also OK in this constructor _card = \"Card('%s',", "single-quote after # the comment separator resulting in an incorrect # match. r'\\'(?P<strg>([", "should occupies at least 8 columns, unless it is # a null string", "if self[i].key not in Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks:", "blank card. value: Text to be added. before: [same as in update()] after:", "= self._coldefs f = FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f def _clone(self, shape):", "\"\"\"Construct a HDUList object. hdus: Input, can be a list of HDU's or", "`i` _text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else: _result = self[i]._verify(option) if _result:", "before and after are None, add to the last occurrence of cards of", "datasize = reduce(operator.mul, dims[groups:]) size = abs(bitpix) * gcount * (pcount + datasize)", "\"\"\"Generate a (new) card image from the attributes: key, value, and comment. Core", "is a primitive implementation, it will put the value string in one block", "new key name already exist, force to have duplicate name. \"\"\" oldkey =", "group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU class.\"\"\"", "\"\" element = 0 # go through the list twice, first time print", "retain the above copyright notice, this list of conditions and the following disclaimer.", "if isinstance(_comm, str) and _comm != '': longstring = longstring + _comm.rstrip() +", "i.byteswap() i._byteorder = 'big' else: if coldata._type.bytes > 1: if coldata._byteorder != 'big':", "\"\"\"Make a copy of the Header.\"\"\" tmp = Header(self.ascard.copy()) # also copy the", "' + _floatFormat(self.value.imag) + ')' valStr = '%20s' % _tmp else: valStr =", "[self[i] for i in indx] return ColDefs(tmp) def _setup(self): \"\"\" Initialize all attributes", "and 'extver' in keys: ext = ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword", "is not allowed to expand (as C/Python does). for i in range(len(dummy)): x", "cards in front of END. \"\"\" if isinstance (card, Card): super(CardList, self).insert(pos, card)", "a list of Columns, an (table) HDU tbtype: which table HDU, 'BinTableHDU' (default)", "None or after != None: self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0] == '", "cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append", "break else: break hdu._raw += block _size, hdu.name = hdu._getsize(hdu._raw) # get extname", "self.mode = mode self.memmap = memmap if memmap and mode not in ['readonly',", "FITS HDU header part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END') blocks = blocks +", "1: # do the _parent too, otherwise the _parent # of a scaled", "exist. Use the directory of the input file and the base name of", "verbose=0): \"\"\"Force a write of the HDUList back to the file (for append", "not supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append': for hdu", "type=code, shape=dims) raw_data._byteorder = 'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base", "if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does not fit into the", "type 2Int32 dtype: data type of the variable array \"\"\" _offset = 0", "getattr(self, attrib+'s')[indx] = new_value def change_name(self, col_name, new_name): \"\"\"Change a Column's name.\"\"\" if", "the file is corrupted.' % (len(hduList)+1) break # initialize/reset attributes to be used", "else: list = attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's':", "Determine how to scale the data # bscale and bzero takes priority if", "-~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks for a valid value/comment string. It returns", "Boolean array of shape (s, nx) output: output Uint8 array of shape (s,", "+ _digits_NFSC # This regex helps delete leading zeros from numbers, otherwise #", "\"\"\" if isinstance(value, num.NumArray) and value.type() == self._dtype: pass elif isinstance(value, chararray.CharArray) and", "col in range(_nfields): dict[col]['array'] = Delayed(input, col) # now build the columns tmp", "'': self.starts[i] = last_end + 1 _end = self.starts[i] + _width - 1", "to reconstruct another kind of header. \"\"\" try: # have both SIMPLE and", "`hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names,", "_dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate the starting point and", "= None, i.e. an empty HDUList. file: The opened physical file associated with", "% name os.remove(name) else: raise IOError, \"File '%s' already exist.\" % name #", "index of the Card after which the new card will be placed. default=None.", "to use CONTINUE if the string value can fit in one line. #", "_pnames self.__dict__[attr] = _coldefs elif attr == '_theap': self.__dict__[attr] = 0 try: return", "Determine the destination (numarray) data type if type is None: type = self.NumCode[self._bitpix]", "regex helps delete leading zeros from numbers, otherwise # Python might evaluate them", "bscale: BSCALE of the data bzero: BZERO of the data parbscales: list of", "return _hdr[key] def _makehdu(data, header): if header is None: if isinstance(data, num.NumArray): hdu", "be constructed from the card list. if keylist is None: self._keylist = [k.upper()", "'== 2', _isInt+\" and val >= 0 and val <= 999\", 0, option,", "col in input: if not isinstance(col, Column): raise \"Element %d in the ColDefs", "extension' ''' def format(self): strfmt, strlen = '', 0 for j in range(self.header['TFIELDS']):", "be used name: name to be populated in EXTNAME keyword \"\"\" if header", "eqLoc = 8 _start = 0 if self._cardimage[:8].upper() == 'HIERARCH': _start = 8", "true indicates that all of the required data has been written to the", "tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name,", "into a printable string.\"\"\" kard = self._cardimage output = '' for i in", "_where + 1 if _keyList[_start:].count('CONTINUE') == 0: break # construct the Header object,", "= nbytes*8 - nx for i in range(nbytes): _min = i*8 _max =", "not to corrupt the original array if bzero not in ['', None, 0]", "right shape for the data part of the random group, # since binary", "if 'header' in extkeys: header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext", "= int(mo.group(1)) else: pcount = 0 mo = re_groups.search(block) if mo and simple:", "results + \"%-3d %s\\n\"%(j, self[j]._summary()) results = results[:-1] print results def open(name, mode=\"copyonwrite\",", "if found, otherwise, 0. key: keyword name. If given an index, always returns", "of header, data shape and type for each extension. @type filename: string @param", "is not None: pcount = int(mo.group(1)) else: pcount = 0 mo = re_groups.search(block)", "no more data will be accepted. An attempt to write more data after", "the file does not exist and the provided header is not a Primary", "\"\"\"Format a list of cards into a printable string.\"\"\" kard = self._cardimage output", "dim = `self.header['NAXIS']` if dim == '0': dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim)", "different table type' elif isinstance(input, FITS_rec): # input is a FITS_rec tmp =", "This is a primitive implementation, it will put the value string in one", "list)): raise \"Invalid input for HDUList.\" for hdu in hdus: if not isinstance(hdu,", "(e.g. E-009 vs. E-09) elif isinstance(self.value, float): if self._valueModified: valStr = '%20s' %", "== 0: ext = ext1 else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' %", "# Check for numbers with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2,", "= last_end + 1 _end = self.starts[i] + _width - 1 self.spans[i] =", "1, option, _err) return _err # 0.8.8 def _iswholeline(indx, naxis): if isinstance(indx, (int,", "to TBCOL keyword dim: column dimension corresponding to TDIM keyword \"\"\" # any", "+= cname + ' = ' + `value` + '\\n' return text[:-1] def", "elif option == 'minmax': if isinstance(_type, num.FloatingType): _scale = 1 _zero = 0", "key) for key in other] indx=range(len(self)) for x in _other: indx.remove(x) tmp =", "\"\"\"Create a new table from the input column definitions.\"\"\" \"\"\" input: a list", "else: _unique[_name] = [i] self.__dict__[attr] = _unique try: return self.__dict__[attr] except KeyError: raise", "data itself (a numarray) or a record array (FITS_rec) which will contain both", "image must have CONTINUE cards after the first card.' if not isinstance(_card.value, str):", "'Warning: File size is smaller than specified data size. File may have been", "\"\"\"Get the data attribute.\"\"\" if attr == 'section': return Section(self) elif attr ==", "class which contains the definition of one column, e.g. ttype, tform, etc. and", "len(pardata) if parbscales is None: parbscales = [None]*npars if parbzeros is None: parbzeros", "file already exists if os.path.exists(name): if clobber: print \"Overwrite existing file '%s'.\" %", "formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns # populate data to the new table", "keyword disp: display format, corresponding to TDISP keyword start: column starting position (ASCII", "in Card._commentaryKeys: # do NOT use self.key commentStr = '' elif self.__dict__.has_key('comment') or", "= re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def", "list of HDU's or a single HDU. Default = None, i.e. an empty", "attribute values from all Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list", "arrays before writing # output = data.byteswapped() else: output = data output.tofile(self._ffo.getfile()) if", "= valfmt % val_list[i] output = output + '%-80s' % (headstr + valstr)", "empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is", "elif n_ext1 == 1: if n_ext2 == 0: ext = ext1[0] else: if", "'': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary cards, value can only be strings", "['abc'] and [['a','b','c']] # equally, beautiful! _func = lambda x: chararray.array(x, itemsize=1) array", "yet, use header info. else: _shape = () for j in range(self.header['NAXIS']): if", "= hdu.data._get_scale_factors(i)[3:] if n > 0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx:", "is not FITS standard (equal sign not at column 8).' raise ValueError, self._err_text,", "\"\"\" # Get the name of the current thread and determine if this", "def close(self): \"\"\" Close the 'physical' FITS file. :Parameters: None :Returns: None \"\"\"", "be placed before or after the specified location. If no \"before\" or \"after\"", "len(self) == 1: dummy = [] else: dummy = map(lambda x, y: x-y,", "image.\"\"\" head = self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key'] =", "not be the column right after the last field if self._tbtype == 'TableHDU':", "naxis = self.header['NAXIS'] axes = naxis*[0] for j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`]", "items(self): \"\"\"Return a list of all keyword-value pairs from the CardList.\"\"\" pairs =", "self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise ValueError, 'Illegal value %s' % str(val) self.__dict__['value']", "is not supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append': for", "# equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or conflicting specifications will", "_setkey(self, val): \"\"\"Set the key attribute, surrogate for the __setattr__ key case.\"\"\" if", "= tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file =", "+ _numr_FSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$')", "@type key: string @param key: keyword name @param ext: The rest of the", "is an int if isinstance(pos, str): _parse = pos.split() if _parse[0] in ['>=',", "def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format spec to record format spec. Do the", "parse the extension spec if n_ext1 > 2: raise ValueError, \"too many positional", "class. This class is the base class for the TableHDU, ImageHDU, and BinTableHDU", "is not None: naxis = int(mo.group(1)) pos = mo.end(0) else: raise ValueError(\"NAXIS not", "NAXIS1. if naxis > 1: size = 1 for j in range(1, naxis):", "# number, or complex value is found, otherwise it will return # None,", "_blockLen = 2880 # the FITS block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+',", "there is no match if (keyword in _keyNames): _list.append(i) for i in _list:", "keyword is undefined. The comment field will # return a match if the", "TFORM for variable length columns for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key", "can be anywhere. If the card does not exist, the new card will", "data type (code) _booltype = 'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8',", "EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'TABLE' if self.header[0].rstrip() !=", "high energy astrophysics data. For details of the FITS standard, see the NASA/Science", "with the input data/header. @type filename: string @param filename: name of the file", "# String for a FITS standard compliant (FSC) keyword. _keywd_FSC = r'[A-Z0-9_-]* *$'", "_ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all HDUs which are not corrupted.\"\"\" # 0.6.5.5", "/= bscale elif self._coldefs._tbtype == 'TableHDU': dummy = self._convert[indx] else: continue # ASCII", "- 1 if not bottom: for i in range(nc-1, -1, -1): # locate", "range(len(tmp)): _formats += 'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows,", "= self.header['NAXIS'] if dim == 0: dim = '' else: dim = str(dim)", "_scale: _arr *= bscale if _zero: _arr += bzero hdu.data._convert[i][:n] = _arr else:", "endorse or promote products derived from this software without specific prior written permission.", "exists, it's value/comment will be updated. If it does not exist, a new", "of the appropriate type is created for the supplied data. This argument is", "= self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt: code, width, prec = fmt.group('code', 'width',", "the definition of one column, e.g. ttype, tform, etc. and the array. Does", "in the documentation and/or other materials provided with the distribution. 3. The name", "== '0': dim = '' # set extension name if (name is None)", "dims = self.size()*8/abs(_bitpix) else: dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap", "@param data: data to write to the new file @type header: L{Header} object", "TFORM keyword unit: column unit, corresponding to TUNIT keyword null: null value, corresponding", "be like a binary table's data. \"\"\" if attr == 'data': # same", "True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero)", "appended. useblanks: Use any *extra* blank cards? default=1. If useblanks != 0, and", "dat, hdr, 'sci') # update the 'sci' extension >>> update(file, dat, 3) #", "has nothing. for item in self: if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) #", "= x if 'D' in _format: self._parent.field(indx).sub('E', 'D') # binary table else: if", "exist, it will be created and if the header represents a Primary header,", "in ['', None, 1] _zero = bzero not in ['', None, 0] #", "isinstance (value, Card): _key = self.index_of(key) # only set if the value is", "raw_data.copy() # if not memmap, use the space already in memory else: self.data", "after=after) def add_comment(self, value, before=None, after=None): \"\"\"Add a COMMENT card. value: Comment text", "attr == 'name' and value: if not isinstance(value, str): raise TypeError, 'bad value", "the extension is a TableHDU containing ASCII data. \"\"\" def __init__(self, data=None, header=None):", "if isinstance(val,str): self._checkText(val) else: if val is not None: raise ValueError, 'comment %s", "with this HDUList)' else: _name = self.__file.name results = \"Filename: %s\\nNo. Name Type\"\\", "PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU self.name = name if self.header['NAXIS'] <= 0:", "name if not name and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def", "') if imag.group('sign') is not None: _imagStr = imag.group('sign') + _imagStr _valStr =", "1 else: groups = 0 mo = re_naxis.search(block) if mo is not None:", "Card._commentaryKeys: return result else: if option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result)", "or name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1 def __delslice__(self, i,", "'': dirName += '/' _name = dirName + os.path.basename(tempfile.mktemp()) if not os.path.exists(_name): return", "isinstance(input, (list, tuple)): for col in input: if not isinstance(col, Column): raise \"Element", "_name for hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name)", "= self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan hdu._ffile = self._ffile hdu.name =", "FITS data block if _size > 0: self.__file.write(_padLength(_size)*'\\0') # flush, to make sure", "the string after column 8. \"\"\" eqLoc = self._locateEq() if eqLoc is None:", "object, if any. output_verify: output verification option, default = 'exception'. verbose: print out", "HDU's data part. This is a layer over the RecArray, so we can", "(), '' else: # the shape will be in the order of NAXIS's", "option=option) # Verify location and value of mandatory keywords. naxis = self.header.get('NAXIS', 0)", "% ext2 return hdulist, ext def getheader(filename, *ext, **extkeys): \"\"\"Get the header from", "hdu._hdrLoc = _hdrLoc # beginning of the header area hdu._datLoc = self.__file.tell() #", "re.compile(_ASCII_text) # Checks for a valid value/comment string. It returns a match object", "and use in source and binary forms, with or without modification, are permitted", "HDU header part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END') blocks = blocks + _padLength(len(blocks))*'", "array, or groups data object @param data: data to write to the new", "'Format \"%s\" is not recognized.' % tform if repeat == '': repeat =", "_card.value # data reading will be delayed for col in range(_nfields): dict[col]['array'] =", "_err) self.req_cards('NAXIS1', '== 3', _isInt+\" and val == 0\", 0, option, _err) _after", "_breakup_strings(self): \"\"\"Break up long string value/comment into CONTINUE cards. This is a primitive", "use with urlretrieve to allow IOError exceptions to be raised when a file", "def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open a FITS file and return", "the mktemp() output. \"\"\" dirName = os.path.dirname(input) if dirName != '': dirName +=", "def _tmpName(input): \"\"\"Create a temporary file name which should not already exist. Use", "or after. The argument `before' takes precedence over `after' if both specified. They", "shape=dims) raw_data._byteorder = 'big' if (self._bzero != 0 or self._bscale != 1): if", "_FormatP): desc = self._parent.field(indx) desc[:] = 0 # reset _npts = map(len, self._convert[indx])", "block and no more data will be accepted. An attempt to write more", "\"Mode '%s' not recognized\" % mode if mode != 'append' and not os.path.exists(name):", "'Subsection data must be contiguous.' for j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx", "VerifyError, _text #elif option == 'warn': pass # fix the value elif option", "%d: %d' % (_blockLen, len(blocks)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise", "8), use HIERARCH.' % val else: raise ValueError, 'keyword name %s is not", "writing FITS files and manipulating their contents. A module for reading and writing", "complete!\" keyboardInterruptSent = True # Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in", "rename the tmp to the original file if self._resize: oldName = self.__file.name oldMemmap", "been filled will raise an IOError exception. If the dtype of the input", "isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input): \"\"\"Fix the card image for", "= chararray.array(input+' ', itemsize=1) # locations of the blanks blank_loc = num.nonzero(arr ==", "'big' hdu.data._parent._byteorder = 'big' output = hdu.data else: output = hdu.data output.tofile(self.__file) _size", "incorrect # match. r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|'", "8 columns, unless it is # a null string elif isinstance(self.value, str): if", "hdr, 'sci') # update the 'sci' extension >>> update(file, dat, 3) # update", "Header will be provided. name: The name of the HDU, will be the", "\"Writing to gzipped fits files is not supported\" zfile = gzip.GzipFile(self.name) self.tfile =", "== '': width = None else: width = eval(width) except: raise ValueError, 'Illegal", "None, None, option, _err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU", "ending point overlaps to the next column\" % indx+1 if 'A' in _format:", "the data area return loc, _size+_padLength(_size) def close(self): \"\"\"Close the 'physical' FITS file.\"\"\"", "to the next card to search # to avoid starting at the same", "if dtype == 'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate both the ASCII", "b else: tmp = b + list(self.data) return ColDefs(tmp) def __radd__(self, other): return", "hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close the associated FITS file and memmap object,", "getattr(num, type) # Determine how to scale the data # bscale and bzero", "+ ') *, *(?P<imag>' + _numr_FSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/", "# mapping from TFORM data type to numarray data type (code) _booltype =", "of the data area # data area size, including padding hdu._datSpan = _size", "\"\"\" def _getname(self): \"\"\"Get the extname and extver from the header.\"\"\" re_extname =", "the numarray shape if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else:", "index (an integer). backward: search the index from the END, i.e. backward? default=0.", "= 1 else: raise ValueError, 'Illegal value %s' % str(val) self.__dict__['value'] = val", "ValueError(\"BITPIX not found where expected\") mo = re_gcount.search(block) if mo is not None:", "case of required cards in wrong order. if isinstance(self, _ExtensionHDU): firstkey = 'XTENSION'", "a greedy match will find a single-quote after # the comment separator resulting", "_isInt+\" and \"+isValid, 8, option, _err) self.req_cards('NAXIS', '== 2', _isInt+\" and val >=", "if key[0] == ' ': useblanks = new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks,", "blank card), append at the end. \"\"\" new_card = Card(key, value) if before", "raise IndexError, 'Illegal slice %s, step must be integer.' % input return slice(_start,", "option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format' elif dtype+option in _rec2fits.keys(): # record", "del attr[indx] del self._arrays[indx] self._nfields -= 1 def change_attrib(self, col_name, attrib, new_value): \"\"\"Change", "groups = 1 else: groups = 0 mo = re_naxis.search(block) if mo is", "= 0. This simply calls the close method of the _File class. It", "get the first field, and field('ABC') will get the second field. If there", "copy(self): \"\"\"Make a copy of the HDU, both header and data are copied.\"\"\"", "corresponding to the Column attributes (e.g. ColDefs has the attribute .names while Column", "else: try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod =", "= 1 if not _zero: bzero = 0 return (_str, _bool, _number, _scale,", "while 1: try: del self.ascard[key] self._mod = 1 except: return # for integer", "# check in the case there is extra space after the last HDU", "== 0: headstr = \"%-8s= \" % self.key else: headstr = \"CONTINUE \"", "option, _err) self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid, 8, option, _err) self.req_cards('NAXIS', '==", "appears vague on this issue and only states that a # string should", "value, before=before, after=after) def add_blank(self, value='', before=None, after=None): \"\"\"Add a blank card. value:", "the attributes: key, value, and comment. Core code for ascardimage. \"\"\" # keyword", "fill: if = 1, will fill all cells with zeros or blanks if", "is not present, or it is a commentary card. \"\"\" # no equal", "old one if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks()", "class is the base class for the TableHDU, ImageHDU, and BinTableHDU classes. \"\"\"", "input] # if the format of an ASCII column has no width, add", "2 and 'extver' in keys: ext = ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting", "for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))): coldata =", "= 1 if verbose: print \"One or more header is resized.\" break #", "so both will produce 'a7'. if fmt.lstrip()[0] == 'A' and option != '':", "input[xoffset:offset] list.append(tmp) if len(input) == offset: break xoffset = offset return list class", "_name = '(No file associated with this HDUList)' else: _name = self.__file.name results", "'keyword name cannot be reset.' elif name == 'value': self._setvalue(val) elif name ==", "CONTINUE and the whole card must have string value. \"\"\" def __str__(self): \"\"\"Format", "naxis = self.header.get('NAXIS', 0) # for random group image, NAXIS1 should be 0,", "= BinTableHDU else: self._hdutype = _ExtensionHDU else: self._hdutype = _ValidHDU except: self._hdutype =", "tmp._hdutype = self._hdutype return tmp def _strip(self): \"\"\"Strip cards specific to a certain", "self._offset, self._datLoc = None, None, None self.header = header self.data = data self.name", "(physical) array. self._parent = input self._convert = [None]*self._nfields self.names = self._names def copy(self):", "for i in range(len(self.parnames)): _name = self.parnames[i] if _name in _unique: _unique[_name].append(i) else:", "key _key = (_key.strip()).upper() nfound = 0 for j in range(len(self)): _name =", "', value, before=before, after=after) def get_history(self): \"\"\"Get all histories as a list of", "= [None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format a", "== 'left': tmp = list(self.data) + b else: tmp = b + list(self.data)", "displayed in a # preferred order. _commonNames = ['name', 'format', 'unit', 'null', 'bscale',", "return (_blockLen - stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create a temporary file name", "i in range(ncards): # take each 80-char card as a regular card and", "BinTableHDU): for i in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not", "def getval(filename, key, *ext, **extkeys): \"\"\"Get a keyword's value from a header in", "its representatives may not be used to endorse or promote products derived from", "_size + _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size: print", "out how many blank cards are *directly* before the END card.\"\"\" for i", "header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo:", "'binary table extension' class StreamingHDU: \"\"\" A class that provides the capability to", "FITS file is opened, a HDUList object is returned. \"\"\" def __init__(self, hdus=[],", "(verbose): print \"update data in place: Name =\", hdu.name, _extver # reset the", "updated. If it does not exist, a new card will be created and", "FITS string, boolean, # number, or complex value is found, otherwise it will", "req_cards(self, keywd, pos, test, fix_value, option, errlist): \"\"\"Check the existence, location, and value", "try: (dtype, width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width == '': width", "HDU's into memory.\"\"\" for i in range(len(self)): if self[i].data is not None: continue", "close method of the _File class. It has this two-tier calls because _File", "self.comment commfmt = \"%-s\" if not comm == '': nlines = len(comm) /", "repeat = eval(repeat) return (repeat, dtype, option) def _convert_format(input_format, reverse=0): \"\"\"Convert FITS format", "def writeHDUdata(self, hdu): \"\"\"Write FITS HDU data part.\"\"\" self.__file.flush() loc = self.__file.tell() _size", "Card or just # a number/string for cname in _commonNames: value = eval(cname)", "of variable length array columns # this has to be done after the", "is done for a string, # since a greedy match will find a", "# the stream is full so pad the data to the next FITS", "self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0) size = abs(bitpix) *", "= self._words_group(val, val_len) for i in range(len(val_list)): if i == 0: headstr =", "hdu._new = 0 hdu.header._mod = 0 hdu.header.ascard._mod = 0 except: pass return hdu", "') *, *(?P<imag>' + _numr_FSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)'", "self._coldefs f = FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f def _clone(self, shape): \"\"\"Overload", "USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "returned. \"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct a HDUList object. hdus: Input, can", "only one argument. \"\"\" result = \"\" element = 0 # go through", "[_get_index(self.names, key) for key in other] indx=range(len(self)) for x in _other: indx.remove(x) tmp", "elif self._dtype == 'a': value = chararray.array(value, itemsize=1) else: value = num.array(value, type=self._dtype)", "continue reading header blocks until END card is reached while 1: # find", "self['NAXIS'] for i in range(_naxis): del self['NAXIS'+`i+1`] if issubclass(self._hdutype, PrimaryHDU): del self['EXTEND'] del", "= Header(self.ascard.copy()) # also copy the class tmp._hdutype = self._hdutype return tmp def", "elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU':", "header represents a Primary header, the header will be modified to an image", "%s, step must be integer.' % input return slice(_start, _stop, _step) class _KeyType:", "self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc = self._ffo.getfile().tell() self._size = self.size() if self._size != 0:", "throw away -2^N _scale = (max - min) / (2.**(8*_type.bytes) - 2) #", "reverse == 0: if dtype in _fits2rec.keys(): # FITS format if dtype ==", "corresponding to TDISP keyword start: column starting position (ASCII table only), corresponding to", "valfmt = \"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len) for i", "stream is padded to fill a complete FITS block and no more data", "_nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format = '[' for j in range(_ncols):", "= hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not isinstance(coldata, chararray.CharArray): # only swap unswapped", "Handle gzip files if mode in ['update', 'append']: raise \"Writing to gzipped fits", "and after are None, add to the last occurrence of cards of the", "and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def __getattr__(self, attr): \"\"\"Get the", "to record format (e.g. '3J'->'3i4') recfmt = _convert_format(format) except: try: # legit RecArray", "is not touched yet, use header info. else: _shape = () _nrows =", "(_nch80+1) * Card.length _bytes = _bytes + _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize", "not exist if _index is None: err_text = \"'%s' card does not exist.\"", "default=''. value: keyword value, default=''. comment: comment, default=''. \"\"\" if key != ''", "# The valu group will return a match if a FITS string, boolean,", "pos if not eval(test_pos): err_text = \"'%s' card at the wrong place (card", "the output \"data\" array of data type dtype. The descriptor location will have", "not the correct type.\" if data._byteorder != 'big': # # byteswap little endian", "self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def getfile(self):", "is written (above) _where = self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in", "summary information on a FITS file. This includes the name, type, length of", "'DE') _fix_table2 = maketrans('dD', 'eE') class Card(_Verify): # string length of a card", "== 'old': _scale = self._bscale _zero = self._bzero elif option == 'minmax': if", "does not begin with SIMPLE or XTENSION' for i in range(0, len(_blockLen), Card.length):", "= _card.value # data reading will be delayed for col in range(_nfields): dict[col]['array']", "will be opened and the header appended to the end of the file.", "= True else: if hdr['naxis'] == 0: hdr.update('extend', True, after='naxis') else: n =", "HDU: name, dimensions, and formats.\"\"\" class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if", "arr._shape[0] else: dim = 0 if dim > nrows: nrows = dim if", "PrimaryHDU): hdu = PrimaryHDU(data, header=header) clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename,", "= self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key'] = head.strip().upper() def", "# if value checking is specified if test: val = self.header[keywd] if not", "since it # may get modified. the data is still a \"view\" (for", "a FITS_rec tmp = hdu.columns = input._coldefs else: # input is a list", "ValueError, \"too many positional arguments\" elif n_ext1 == 1: if n_ext2 == 0:", "provides the capability to stream data to a FITS file instead of requiring", "self.key else: headstr = \"CONTINUE \" valstr = valfmt % val_list[i] output =", "range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] =", "= self.__file.read(_blockLen) if block == '': break else: break hdu._raw += block _size,", "fieldName, value): \"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table", "hdu._datLoc = self._datLoc hdu._datSpan = self._datSpan hdu._ffile = self._ffile hdu.name = self.name hdu._extver", "key '%s'.\" % `key` return indx def _unwrapx(input, output, nx): \"\"\"Unwrap the X", "r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]? *'", "of the HDU's data part.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) #", "__init__(self): self = 0 def _getext(filename, mode, *ext1, **ext2): \"\"\"Open the input file,", "self.__class__ = _Hierarch else: raise ValueError, 'keyword name %s is too long (>", "FITS standard.: %s' % self.key # verify the key, it is never fixable", "header, it will be written to the beginning of the file. If the", "else: raise ValueError, \"Must specify format to construct Column\" # scale the array", "(max + min) / 2. # throw away -2^N _scale = (max -", "isinstance(value, Card): setattr(self, cname, value.value) else: setattr(self, cname, value) # if the column", "case of DE for exponent, allows space between sign, # digits, exponent sign,", "__getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname = name[:-1] if cname in _commonNames: attr", "list(self.data) + b else: tmp = b + list(self.data) return ColDefs(tmp) def __radd__(self,", "self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append': for hdu in self: if (verbose): try:", "\"\"\"input: a list of Columns, an (table) HDU tbtype: which table HDU, 'BinTableHDU'", "a FITS file object (ffo) ffo = _File(name, mode=mode, memmap=memmap) hduList = HDUList(file=ffo)", "_hdr = getheader(filename, *ext, **extkeys) return _hdr[key] def _makehdu(data, header): if header is", "'HIERARCH': _key = _key[8:].strip() _keylist = self._keylist if backward: _keylist = self._keylist[:] #", "HDU classes, # so the checking is in order, in case of required", "a long string has CONTINUE cards, the \"Card\" is considered # to be", "= self.size() if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames =", "type.\" if data._byteorder != 'big': # # byteswap little endian arrays before writing", "legit RecArray format? recfmt = format format = _convert_format(recfmt, reverse=1) except: raise ValueError,", "value into data type and width. try: (dtype, width) = _re.match(input_format.strip()).groups() dtype =", "getattr(tmp, attr)) for i in range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data) def new_table", "\"There is nothing to write.\" return self.update_tbhdu() if output_verify == 'warn': output_verify =", "_where # if not the real CONTINUE card, skip to the next card", "'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate both the ASCII table and binary", "in input] # if the format of an ASCII column has no width,", "PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "'\\n' return output[:-1] # ----------------------------- HDU classes ------------------------------------ class _AllHDU: \"\"\"Base class for", "del self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name in ['TDISP', 'TDIM', 'THEAP']: for i", "name')) self.__dict__[attr] = value def _verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option) # Verify", "HDU hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype == tbtype: tmp =", "always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype == 'P': output_format =", "get the second field. If there is no exact name matched, it will", "'XTENSION' firstval = self._xtn else: firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey, '==", "does not exist for j in range(len(axes)): try: self.header['NAXIS'+`j+1`] = axes[j] except: if", "'*_lead + _pc + _format[1:] + _dict[_format[0]] + ' '*_trail # not using", "self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS HDU header part.\"\"\" blocks = repr(hdu.header.ascard) +", "file to write to @type data: array, record array, or groups data object", "support theap yet. \"\"\" def __init__(self, name=None, format=None, unit=None, null=None, \\ bscale=None, bzero=None,", "card. value: Comment text to be added. before: [same as in update()] after:", "pos is a string, it must be of the syntax of \"> n\",", "if not isinstance(other, (list, tuple)): other = [other] _other = [_get_index(self.names, key) for", "key) if (self._convert[indx] is None): # for X format if isinstance(self._coldefs._recformats[indx], _FormatX): _nx", "_zero): dummy = self._convert[indx].copy() if _zero: dummy -= bzero if _scale: dummy /=", "files is not supported\" zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist) !=", "not None # if pos is a string, it must be of the", "check if the output file already exists if os.path.exists(name): if clobber: print \"Overwrite", "type, len(self.header.ascard), _shape, _format, _gcount) def scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image", "extensions of %s' % (nfound, `key`) else: return found def readall(self): \"\"\"Read data", "documentation and/or other materials provided with the distribution. 3. The name of AURA", "beginning in Solaris. self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get", "i in indx] return ColDefs(tmp) def _setup(self): \"\"\" Initialize all attributes to be", "\"%s is not a Card\" % str(card) def _use_blanks(self, how_many): if self._blanks >", "0) # for random group image, NAXIS1 should be 0, so we skip", "does not break at the blank space between words. So it may not", "column data is not NDarray, make it to be one, i.e. # input", "the wrong place (card %d).\" % (keywd, _index) fix_text = \"Fixed by moving", "field (presumably with the field method), it will try to match the exact", "FITS HDU. Must seek to the correct location before calling this method. \"\"\"", "card's value by using the \"test\" argument. \"\"\" _err = errlist fix =", "data type, and option.\"\"\" try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except: print 'Format", "OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection", "self._bzero != 0: self.data += self._bzero # delete the keywords BSCALE and BZERO", "self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII table extension' ''' def format(self): strfmt, strlen", "a CardList. cards: A list of Cards, default=[]. \"\"\" # decide which kind", "else: valStr = '%20s' % self._valuestring elif isinstance(self.value, complex): if self._valueModified: _tmp =", "key only delete once else: del self.ascard[key] self._mod = 1 def __str__(self): return", "hdu.data._coldefs = hdu.columns # populate data to the new table for i in", "(nfound > 1): raise KeyError, 'there are %d extensions of %s' % (nfound,", "\"\"\"Set a header keyword value.\"\"\" self.ascard[key].value = value self._mod = 1 def __delitem__(self,", "\"open a temp file\", _name for hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) =", "\"update header in place: Name =\", hdu.name, _extver if 'data' in dir(hdu): if", "3) # update the 3rd extension >>> update(file, dat, 'sci', 2) # update", "which the new card will be placed. The argument `before' takes precedence over", "temporary file name which should not already exist. Use the directory of the", "def _strip(self): \"\"\"Strip cards specific to a certain kind of header. Strip cards", "'a': _nbytes = 1 else: _nbytes = num.getType(dtype).bytes for i in range(len(input)): if", "ValueError, 'comment %s is not a string' % val self.__dict__['comment'] = val def", "HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'BINTABLE'", "False def New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush is complete!\" keyboardInterruptSent = True", "*$' _keywd_FSC_RE = re.compile(_keywd_FSC) # A number sub-string, either an integer or a", "before=before, after=after) else: self.ascard.append(Card(key, value, comment)) self._mod = 1 def add_history(self, value, before=None,", "_gethdr = extkeys['header'] del extkeys['header'] else: _gethdr = False hdulist, _ext = _getext(filename,", "'+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err)", "= \" \" DELAYED = \"delayed\" # used for lazy instantiation of data", "continue print \"%s:\" % att print ' ', getattr(self, att+'s') #def change_format(self, col_name,", "= self._names def copy(self): r = rec.RecArray.copy(self) r.__class__ = rec.RecArray r._coldefs = self._coldefs", "0. key: keyword name. If given an index, always returns 0. \"\"\" try:", "as not to confuse the indexing. _list = [] for i in range(len(self.header.ascard)-1,-1,-1):", "to be returned. \"\"\" try: return self[key] except: return default def update(self, key,", "== 0: dim = '' else: dim = str(dim) self.header.update('PCOUNT', 0, 'number of", "else: self.writeComplete = 1 def write(self,data): \"\"\" Write the given data to the", "hdu._new = 0 hdu._file = ffo.getfile() # if not resized, update in place", "= self.name hdu._extver = self._extver hdu._new = 0 hdu.header._mod = 0 hdu.header.ascard._mod =", "+= data_fmt gcount = input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt,", "comment) is changed, will reconstructe # the card image. self._ascardimage() def ascardimage(self, option='silentfix'):", "'== 0', '', firstval, option, _err) self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid, 8,", "offset = 0 for i in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i],", "0 hdu.header._mod = 0 hdu.header.ascard._mod = 0 except: pass return hdu class _ExtensionHDU(_ValidHDU):", "= comment else: _comment = self.ascard[j].comment self.ascard[j] = Card(key, value, _comment) elif before", "look pretty. \"\"\" val_len = 67 comm_len = 64 output = '' #", "print \"delete the original file\", oldName # reopen the renamed new file with", "contiguous.' # the offset needs to multiply the length of all remaining axes", "Do the scaling if _zero != 0: self.data += -_zero # 0.9.6.3 to", "oldName = self.__file.name oldMemmap = self.__file.memmap _name = _tmpName(oldName) _hduList = open(_name, mode=\"append\")", "input FITS file name @param ext: The rest of the arguments are for", "since binary table does not support ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] =", "if (verbose): try: _extver = `hdu.header['extver']` except: _extver = '' if hdu.header._mod or", "_key = self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self, start, end): _cards = super(CardList,", "signal import threading # Module variables _blockLen = 2880 # the FITS block", "specified BSCALE and BZERO values. \"\"\" if self.data is None: return # Determine", "after: [same as in update()] \"\"\" self._add_commentary('comment', value, before=before, after=after) def add_blank(self, value='',", "input: if not isinstance(col, Column): raise \"Element %d in the ColDefs input is", "(_nrows, _ncols) return \"%-10s %-11s %5d %-12s %s\" % \\ (self.name, type, len(self.header.ascard),", "= 1 else: raise \"HDUList can only append an HDU\" # make sure", "normally __str__ has only one argument. \"\"\" result = \"\" element = 0", "+ eqStr + valStr) > Card.length: raise ValueError, \"The keyword %s with its", "no match if (keyword in _keyNames): col = eval(_key.group('num')) if col <= _nfields", "i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean (logical) column if self._coldefs._recformats[indx]._dtype is", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "valStr # must be before int checking since bool is also int elif", "file will be like a binary table's data. \"\"\" if attr == 'data':", "== 'HIERARCH': _start = 8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate", "must be numarray or table data.' else: hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename,", "return 0 def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a card's keyword in the", "if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] = value", "= tmp else: raise TypeError, \"input to ColDefs must be a table HDU", "\"\"\"Create a new FITS file using the supplied data/header. @type filename: string @param", "# touch all fields to expand the original ._convert list # so the", "% val else: raise ValueError, 'keyword name %s is not a string' %", "- hdu.data._gap _size = _size + _shift # pad the FITS data block", "raise ValueError, \"An element in the HDUList must be an HDU.\" for item", "not in ['', None, 0]: array += -bzero if bscale not in ['',", "must be a string' else: self.__dict__['_cardimage'] = ' '*80 def __repr__(self): return self._cardimage", "independent # attribute of mmobject so if the HDUList object is created from", "Research in Astronomy (AURA) Redistribution and use in source and binary forms, with", "raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big' if (self._bzero != 0 or", "raise TypeError, 'Wrong type of input' if option == 'left': tmp = list(self.data)", "a Card by indexing or by the keyword name.\"\"\" if isinstance (value, Card):", "IndexError, 'Subsection data is not contiguous.' # the offset needs to multiply the", "(verbose): print \"reopen the newly renamed file\", oldName # reset the resize attributes", "'%s'.\" % `key` return indx def _unwrapx(input, output, nx): \"\"\"Unwrap the X format", "None, None self.header = header self.data = data self.name = None def size(self):", "_zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5]", "elif self.data is None: axes = [] else: raise ValueError, \"incorrect array type\"", "def __init__(self, hdu): self.hdu = hdu def __getitem__(self, key): dims = [] if", "\"\"\" if 'header' in extkeys: _gethdr = extkeys['header'] del extkeys['header'] else: _gethdr =", "the fix_value as its value when created. Also check the card's value by", "are blank cards in front of END. \"\"\" if isinstance (card, Card): super(CardList,", "if not parsable (i.e. everything else) result = None return result else: #", "@group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\" Do you", "be populated in EXTNAME keyword \"\"\" if header is not None: if not", "to fill the stream per the header provided in the constructor. \"\"\" size", "HDU if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th", "+ _width - 1 attr[i] = _end - last_end last_end = _end self._width", "does not already exist, it will be created and if the header represents", "bscale/bzero values. bscale/bzero: user specified BSCALE and BZERO values. \"\"\" if self.data is", "issuing an error. The FITS standard # appears vague on this issue and", "so if the HDUList object is created from files # other than FITS,", "size = 1 for j in range(naxis): size = size * self.header['NAXIS'+`j+1`] bitpix", "'dim'] _keyNames = ['TTYPE', 'TFORM', 'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] #", "_cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start =", "= _tmpName(oldName) _hduList = open(_name, mode=\"append\") if (verbose): print \"open a temp file\",", "primary header >>> getdata('in.fits', 2) # the second extension >>> getdata('in.fits', ext=2) #", "naxis = int(mo.group(1)) pos = mo.end(0) else: raise ValueError(\"NAXIS not found where expected\")", "* output._itemsize # write out the heap of variable length array columns #", "format was endorsed by the International Astronomical Union in 1999 and mandated by", "card in self: block = block + repr(card) return block def __str__(self): \"\"\"Format", "newkey = newkey.strip().upper() if newkey == 'CONTINUE': raise ValueError, 'Can not rename to", "> 0: size = 1 for j in range(naxis): size = size *", "default = False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self,", "each element calls their own verify for i in range(len(self)): if i >", "elif n_ext1 == 0: if n_ext2 == 0: ext = _Zero() elif 'ext'", "= _list.index(_key) elif _count == 0: raise NameError, \"Key '%s' does not exist.\"", "in keys: ext = ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s): %s' %", "os.path.dirname(input) if dirName != '': dirName += '/' _name = dirName + os.path.basename(tempfile.mktemp())", "data._coldefs self.update() elif data is None: pass else: raise TypeError, \"table data has", "hdr['extend'] = True else: if hdr['naxis'] == 0: hdr.update('extend', True, after='naxis') else: n", "== 'comment': self._setcomment(val) else: raise AttributeError, name # When an attribute (value or", "commentary cards, no need to parse further if self.key in Card._commentaryKeys: self.__dict__['value'] =", "access to FITS Group data in a manner analogous to tables \"\"\" def", "__setitem__(self, key, hdu): \"\"\"Set an HDU to the HDUList, indexed by number or", "return hdulist, ext def getheader(filename, *ext, **extkeys): \"\"\"Get the header from an extension", "it will overwrite the file. Default is False. \"\"\" if header is None:", "elif attr == 'data': self.__dict__[attr] = None if self.header['NAXIS'] > 0: _bitpix =", "be used for updating) comment: keyword comment (to be used for updating), default=None.", "in (a), field('abc') will get the first field, and field('ABC') will get the", "self._words_group(comm, comm_len) for i in comm_list: commstr = \"CONTINUE '&' / \" +", "_naxis if dims == []: dims = [1] npt = 1 for n", "NASA/Science Office of Standards and Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed", "# make the scaled data = 0, not the stored data hdu.data._parent.field(i)[n:] =", "a template), default=None. If header=None, a minimal Header will be provided. \"\"\" _ImageBaseHDU.__init__(self,", "for variable length tables _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap =", "_bytes = _bytes + _padLength(_bytes) if _bytes != hdu._datSpan: self._resize = 1 if", "_comment = self.ascard[j].comment self.ascard[j] = Card(key, value, _comment) elif before != None or", "is not a Card\" % str(card) def _use_blanks(self, how_many): if self._blanks > 0:", "rest of the arguments are used only for the first case. bitpix: data", "if attr == 'data': self.__dict__[attr] = self.field('data') elif attr == '_unique': _unique =", "Update header keywords to reflect recent changes of columns.\"\"\" _update = self.header.update _append", "by verifications at different class levels. \"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self, val)", "specifying attributes. All attributes except format can be optional. name: column name, corresponding", "record array (FITS_rec) which will contain both group parameter info and the data.", "None self.__dict__[attr] = data elif attr == 'columns': class_name = str(self.__class__) class_name =", "FITS file name @type key: string @param key: keyword name @param ext: The", "object. When a FITS file is opened, a HDUList object is returned. \"\"\"", "self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes", "True, after='naxis') else: n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name, output_verify='exception',", "return _err class _Hierarch(Card): \"\"\"Cards begins with HIERARCH which allows keyword name longer", "elif data is None: pass else: raise TypeError, \"table data has incorrect type\"", "self.data.setshape(dims) if `_type` == 'UInt8': # UInt8 case _zero = min _scale =", "can no longer be written\" curDataSize = self._ffo.getfile().tell() - self._datLoc if curDataSize +", "in Astronomy (AURA) Redistribution and use in source and binary forms, with or", "the keywords of BSCALE and BZERO in self.header. This method should only be", "not FITS standard (unparsable value string).' raise ValueError, self._err_text + '\\n%s' % self._cardimage", "\"\"\" instanciate specified attribute object.\"\"\" if name == '_cardimage': self.ascardimage() elif name ==", "like SIMPLE, BITPIX, etc. so the rest of the header can be used", "card will be inserted before it. card: The Card to be inserted. useblanks:", "the directory of the input file and the base name of the mktemp()", "= \"Unfixable error: %s\" % _text else: exec(fix) #if option != 'silentfix': _text", "self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols), after='gcount')", "will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied data is", "SIMPLE or XTENSION' for i in range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key", "eval(_key.group('num')) if col <= _nfields and col > 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname]", "shape for the data part of the random group, # since binary table", "keyword bzero: bzero value, corresponding to TZERO keyword disp: display format, corresponding to", "extension spec if n_ext1 > 2: raise ValueError, \"too many positional arguments\" elif", "loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos, card, useblanks=1): \"\"\"Insert a", "del _list # populate the new table definition keywords for i in range(len(_cols)):", "locate last non-commentary card if self[i].key not in Card._commentaryKeys: break super(CardList, self).insert(i+1, card)", "if self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]]", "\"\"\"Make a copy of the table HDU, both header and data are copied.\"\"\"", "the stream. :Parameters: data : NumArray Data to stream to the file. :Returns:", "largest column shape as the shape of the record if nrows == 0:", "_format = (), '' _nrows = 0 else: _nrows = len(self.data) _ncols =", "not an extension HDU.\" % `i` _text = self.run_option(option, err_text=err_text, fixable=0) _err.append(_text) else:", "_lead = self._coldefs.starts[indx] - _loc[indx] if _lead < 0: raise ValueError, \"column `%s`", "from the END, i.e. backward? default=0. If backward = 1, search from the", "header: L{Header} object or None @param header: the header associated with 'data', if", "over `after' if both specified. default=None. after: name of the keyword, or index", "else: keyStr = ' '*8 # value string # check if both value", "', getattr(self, att+'s') #def change_format(self, col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format)", "Record has no __getstate__. # also more efficient. else: return tmp def _get_scale_factors(self,", "point cases if self._ffile.memmap: self.data = raw_data.copy() # if not memmap, use the", "is found, return the default value. key: keyword name or index default: if", "key, value, before=None, after=None): \"\"\"Add a commentary card. If before and after are", "header block. block = self.__file.read(_blockLen) if block == '': raise EOFError hdu =", "hdu = ImageHDU(data, header) f = open(filename, mode='update') f.append(hdu) f.close() def update(filename, data,", "= self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize", "recognized\" % mode if mode != 'append' and not os.path.exists(name): self.name, fileheader =", "(force == 0) and (newkey in self.ascard._keylist): raise ValueError, 'Intended keyword %s already", "@param filename: name of the file to be updated data: the new data", "_pos, _isInt, 0, option, _err) self.req_cards('GROUPS', _pos, 'val == True', True, option, _err)", "how to scale the data: if \"old\", use the original BSCALE and BZERO", "to expand the original ._convert list # so the sliced FITS_rec will view", "from the HDUList, indexed by number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize = 1", "'left': tmp = list(self.data) + b else: tmp = b + list(self.data) return", "None if not isinstance(_key, str): raise KeyError, key _key = (_key.strip()).upper() nfound =", "a single HDU. Default = None, i.e. an empty HDUList. file: The opened", "requiring data to all be written at once. The following psudo code illustrates", "argument (except array) can be a Card or just # a number/string for", "format # does not include Object array because there is no guarantee #", "group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set the group parameter", "level items, each of the next level # must present, even it has", "tuple): _key = key[0] _ver = key[1] else: _key = key _ver =", "_OnePointAxis(1, 0) else: raise IndexError, 'Index %s out of range.' % indx elif", "definitions class. It has attributes corresponding to the Column attributes (e.g. ColDefs has", "def req_cards(self, keywd, pos, test, fix_value, option, errlist): \"\"\"Check the existence, location, and", "0: datasize = 0 else: dims = [0]*naxis for i in range(naxis): mo", "% hdus.index(hdu) list.__init__(self, hdus) def __iter__(self): return [self[i] for i in range(len(self))].__iter__() def", "extname, can only have one extension with # that name if _ver ==", "keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if the primary header needs", "_err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?')", "card image.\"\"\" # for commentary cards, no need to parse further if self.key", "header area hdu._datLoc = self.__file.tell() # beginning of the data area # data", "the header and data will be streamed. header : Header The header object", "binary forms, with or without modification, are permitted provided that the following conditions", "re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT", "not None: pcount = int(mo.group(1)) else: pcount = 0 mo = re_groups.search(block) if", "def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return self.ascard def items(self): \"\"\"Return a list of", "in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return self._convert[indx] if _str:", "written to the stream to satisfy the amount specified in the header, the", "item in self: if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1) # print out a", "and the extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1 = len(ext1) n_ext2 = len(ext2)", "str(card) def _pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert a Card to the location", "does not, check to see # if we were provided with a Primary", "index %s' % indx def _normalize_slice(input, naxis): \"\"\"Set the slice's start/stop in the", "None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def _verify(self, option='warn'): \"\"\"ImageHDU", "_text = err_text if not fixable: option = 'unfixable' if option in ['warn',", "' ': useblanks = new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try:", "> (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s` does not fit into the output's itemsize", "SIMPLE, BITPIX, etc. so the rest of the header can be used to", "image extension header and appended to the end of the file. \"\"\" self.header", "for i in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an HDU from the HDUList,", "`key` elif (nfound > 1): raise KeyError, 'there are %d extensions of %s'", "is not None: gcount = int(mo.group(1)) else: gcount = 1 mo = re_pcount.search(block)", "in _python_mode.keys(): raise \"Mode '%s' not recognized\" % mode if mode != 'append'", "HDUs which are not corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size (in bytes) of", "if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch # for card image longer than", "_padLength(_bytes) if _bytes != hdu._datSpan: self._resize = 1 if verbose: print \"One or", "naxis > 1: return _SinglePoint(1, indx) elif naxis == 1: return _OnePointAxis(1, 0)", "self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val >= 0\", 0, option, _err) self.req_cards('GCOUNT', '==", "_size = _size + _shift # pad the FITS data block if _size", "it has nothing. for item in self: if isinstance(item, _ErrList): _dummy = item.__str__(tab=tab+1)", "name (except blank card). If there is no card (or blank card), append", "= threading.currentThread() singleThread = (threading.activeCount() == 1) and (threadName.getName() == 'MainThread') if singleThread:", "the keywords BSCALE and BZERO del self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update the", "comment. Any specifed arguments, except defaults, must be compliant to FITS standard. key:", "str(self.value) # put all parts together output = keyStr + eqStr + valStr", "self.__dict__[attr] = self.field('data') elif attr == '_unique': _unique = {} for i in", "n_ext1 == 1: if n_ext2 == 0: ext = ext1[0] else: if isinstance(ext1[0],", "%d in the HDUList input is not an HDU.\" % hdus.index(hdu) list.__init__(self, hdus)", "= BinTableHDU(data) else: raise KeyError, 'data must be numarray or table data.' else:", "_naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset = offset * _naxis +", "except: _index = None fixable = fix_value is not None # if pos", "# if the card EXTEND exists, must be after it. try: _dum =", "class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This class is the base class for", "= ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr = input.header _nfields = hdr['TFIELDS'] self._width =", "length of all remaining axes else: offset *= _naxis if dims == []:", "if bzero not in ['', None, 0]: array += -bzero if bscale not", "0: raise ValueError, \"column `%s` ending point overlaps to the next column\" %", "_Hierarch @group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU,", "to avoid starting at the same CONTINUE card else: _start = _where +", "a zero offset for all columns after this call. The final offset will", "exist, default it to 1 _extver = self[j]._extver if _ver == _extver: found", "time and store it in _convert self._convert[indx] = num.array(dummy, type=num.Float64) if _scale: num.multiply(self._convert[indx],", "header to be used for the HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self,", "header needs the keyword EXTEND or if it has the proper value. \"\"\"", "reverse=1) + '(%d)' % VLdata._max else: val = _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword,", "# update the 3rd extension >>> update(file, dat, 'sci', 2) # update the", "valu group will return a match if a FITS string, boolean, # number,", "if memmap and mode not in ['readonly', 'copyonwrite', 'update']: raise \"Memory mapping is", "isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:] = 0 # reset _npts = map(len,", "\"E\" not in valueStr: valueStr += \".0\" return valueStr class Undefined: \"\"\"Undefined value.\"\"\"", "end of the file. \"\"\" self.header = header.copy() # # Check if the", "isinstance(parName, (int, long)): self.field(parName)[:] = value else: indx = self._unique[parName] if len(indx) ==", "tmp to the original file if self._resize: oldName = self.__file.name oldMemmap = self.__file.memmap", "is None: axes = [] else: raise ValueError, \"incorrect array type\" self.header['NAXIS'] =", "# continue reading header blocks until END card is reached while 1: #", "A tricky use of __str__, since normally __str__ has only one argument. \"\"\"", "isinstance(_comm, str) and _comm != '': longstring = longstring + _comm.rstrip() + '", "gzip import zipfile import numarray as num import numarray.generic as ndarray import numarray.strings", "= npts self.offset = offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType):", "= input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i],", "_dum = self.header['EXTEND'] #_after += 1 except: pass _pos = '>= '+`_after` self.req_cards('GCOUNT',", "\"\"\" Return the size (in bytes) of the data portion of the HDU.", "== 'exception' and x: raise VerifyError def _pad(input): \"\"\"Pad balnk space to the", "simple and not groups: name = 'PRIMARY' else: name = '' return size,", "'A' _bool = 0 # there is no boolean in ASCII table _number", "a Column.\" % input.index(col) self.data = [col.copy() for col in input] # if", "_name = self.parnames[i] if _name in _unique: _unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr]", "before or after. The argument `before' takes precedence over `after' if both specified.", "the same scaled columns as # the original dummy = self.field(i) if self._convert[i]", "raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes: header: image header data:", "a file specified by a URL cannot be accessed\"\"\" def http_error_default(self, url, fp,", "of END. bottom: If =0 (default) the card will be appended after the", "= self.header.ascard try: _index = cards.index_of(keywd) except: _index = None fixable = fix_value", "_extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from the card image.\"\"\" #", "%s not found' % `key` elif (nfound > 1): raise KeyError, 'there are", "isinstance(indx, _WholeLine): raise IndexError, 'Subsection data is not contiguous.' # the offset needs", "header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE',", "to copy, and keep it unchanged else: self.header = header else: # construct", "and option != '': output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer", "'val == 2', 2, option, _err) self.req_cards('BITPIX', None, 'val == 8', 8, option,", "HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the table header:", "class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all HDUs which are not corrupted.\"\"\" #", "if coldata._type.bytes > 1: if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder = 'big' if", "i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword = _key.group('label')", "1: if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder = 'big' if coldata2._type.bytes > 1:", "_shift # pad the FITS data block if _size > 0: self.__file.write(_padLength(_size)*'\\0') #", "== None: found = j nfound += 1 else: # if the keyword", "float, complex, bool, Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise", "= Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and _card.key != 'CONTINUE': raise ValueError, 'Long", "ColDefs is a list of corresponding attribute values from all Columns. \"\"\" def", "(keywd, _index) fix_text = \"Fixed by moving it to the right place (card", "supported\" zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist) != 1: raise \"Zip", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "raise ValueError, \"%s is not an HDU.\" % item else: if not isinstance(hdu,", "sure it gets the decimal point.\"\"\" valueStr = \"%.16G\" % value if \".\"", "not look pretty. \"\"\" val_len = 67 comm_len = 64 output = ''", "_SinglePoint(_KeyType): pass class _OnePointAxis(_KeyType): pass class _LineSlice(_KeyType): pass class _SteppedSlice(_KeyType): pass class Section:", "') if real.group('sign') == None: _val = eval(_rdigt) else: _val = eval(real.group('sign')+_rdigt) imag", "= _Card_with_continue # remove the key/value/comment attributes, some of them may not exist", "info and the data. The rest of the arguments are used only for", "regular range.\"\"\" def _normalize(indx, npts): if indx < -npts: indx = 0 elif", "if image, need to deal with byte order if isinstance(hdu, _ImageBaseHDU): if hdu.data._byteorder", "_extver: found = j nfound += 1 if (nfound == 0): raise KeyError,", "Card.length: strlen = _len % Card.length if strlen == 0: return input else:", "gzipped fits files is not supported\" zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name", "col > 0: cname = _commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data reading will", "= None, it can be anywhere. If the card does not exist, the", "rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder = 'big' # pass datLoc,", "to strings if self._coldefs._tbtype == 'TableHDU': _format = self._coldefs._Formats[indx].strip() _lead = self._coldefs.starts[indx] -", "use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype == 'P':", "else: output = hdu.data # Binary table byteswap elif isinstance(hdu, BinTableHDU): for i", "file associated with array (None) _datLoc: starting byte location of data block in", "['warn', 'exception']: #raise VerifyError, _text #elif option == 'warn': pass # fix the", "= 'Illegal keyword name %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text", "BSCALE and BZERO values when the data was read/created. If \"minmax\", use the", "range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata)", "string value.' if name == 'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip() # drop", "_bytes = _bytes + _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if", "valStr) > Card.length: raise ValueError, \"The keyword %s with its value is too", "content of header without being able to pass it to the header object", "\"\"\"Create a temporary file name which should not already exist. Use the directory", "\"\"\" Do you mean: \"Profits\"? - Google Search, when asked for \"PyFITS\" \"\"\"", "self.key if len(output) <= Card.length: output = \"%-80s\" % output # longstring case", "type\" self.header['NAXIS'] = len(axes) # add NAXISi if it does not exist for", "bytes) of the data portion of the HDU.\"\"\" size = 0 naxis =", "# scientific notation. One for FSC and one for non-FSC (NFSC) format: #", "if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']]", "eval(width)+1 strfmt = strfmt + 's'+str(size) + ',' strlen = strlen + size", "decipher where the last block of the Header ends, but this task may", "(max - min) / (2.**8 - 1) else: _zero = (max + min)", "# output = data.byteswapped() else: output = data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc", "before which the new card will be placed. The argument `before' takes precedence", "value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName,", "+ ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_NFSC + ') *, *(?P<imag>' + _numr_NFSC", "= _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] _data = hdu.data if _data", "valStr = '%20s' % self._valuestring elif isinstance(self.value, Undefined): valStr = '' # conserve", "if width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr = input.header", "last_end = 0 attr = [0] * len(self) for i in range(len(self)): (_format,", "else: firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey, '== 0', '', firstval, option,", "# if data is not touched yet, use header info. else: _shape =", "zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read())", "in parnames] tmp = FITS_rec(rec.array(None, formats=_formats, shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in", "_commonNames[_keyNames.index(keyword)] dict[col-1][cname] = _card.value # data reading will be delayed for col in", "self.__setstate__(input.__getstate__()) # _parent is the original (storage) array, # _convert is the scaled", "the header appended to the end of the file. If the file does", "1 else: _nbytes = num.getType(dtype).bytes for i in range(len(input)): if dtype == 'a':", "!= \"silentfix\") and x: print 'Output verification result:' print x if _option ==", "-npts: indx = 0 elif indx < 0: indx += npts elif indx", "= self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy return", "if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise TypeError, \"Supplied data is not the correct type.\"", "parameter info and the data. The rest of the arguments are used only", "of a keyword. Returns 1 if found, otherwise, 0. key: keyword name. If", "self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option, _err) return _err", "1 self.spans[i] = _end - last_end last_end = _end self._Formats = self.formats self._arrays[i]", "data is not written. Once sufficient data has been written to the stream", "make sure to consider the case that the starting column of # a", "= header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale =", "self.comment is None: comm = '' else: comm = self.comment commfmt = \"%-s\"", "IOError, \"File '%s' already exist.\" % name # make sure the EXTEND keyword", "1 and val <= 999\", 1, option, _err) self.req_cards('NAXIS1', '== 3', _isInt+\" and", "data is written (above) _where = self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i", "the input file, return the HDUList and the extension.\"\"\" hdulist = open(filename, mode=mode)", "= \"\" element = 0 # go through the list twice, first time", "change_unit(self, col_name, new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'):", "a missing 'END' card, the Header may also contain the binary data(*). (*)", "# occurrence of _key in _list if _count == 1: indx = _list.index(_key)", "'comment']: self._extractValueComment(name) else: raise AttributeError, name return getattr(self, name) def _setkey(self, val): \"\"\"Set", "when the data was read/created. If \"minmax\", use the minimum and maximum of", ">>> getdata('in.fits', ext=2) # the second extension By name, i.e., EXTNAME value (if", "have string value.' if name == 'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip() #", "table header: header to be used for the HDU name: the EXTNAME value", "unit: column unit, corresponding to TUNIT keyword null: null value, corresponding to TNULL", "= '' self.__dict__['_fixable'] = 1 if option == 'ignore': return elif option ==", "raise ValueError, 'Cards with CONTINUE must have string value.' if name == 'value':", "list. if keylist is None: self._keylist = [k.upper() for k in self.keys()] else:", "# string length of a card length = 80 # String for a", "return self.__dict__[attr] except KeyError: raise AttributeError(attr) def getfile(self): return self.__file def _readheader(self, cardList,", "self return hdu def writeHDU(self, hdu): \"\"\"Write *one* FITS HDU. Must seek to", "'name' and value: if not isinstance(value, str): raise TypeError, 'bad value type' value", "if isinstance(key, (int, long)): return x else: return ColDefs(x) def __len__(self): return len(self.data)", "will be updated. If it does not exist, a new card will be", "def _fixValue(self, input): \"\"\"Fix the card image for fixable non-standard compliance.\"\"\" _valStr =", "# remove the key/value/comment attributes, some of them may not exist for name", "as objects import numarray.memmap as Memmap from string import maketrans import copy import", "__getattr__(self, attr): \"\"\"Get the 'data' or 'columns' attribute.\"\"\" if attr == 'data': size", "exceptions import operator import __builtin__ import urllib import tempfile import gzip import zipfile", "if self.data is not None: _data = self.data.copy() else: _data = None return", "will still be filled with zeros/blanks. tbtype: table type to be created (BinTableHDU", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "and width of each field for ASCII table if self._coldefs._tbtype == 'TableHDU': _loc", "# string no need to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else:", "header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each piece of data: shdu.write(data) shdu.close() \"\"\" def", "hdu._raw += block block = self.__file.read(_blockLen) if block == '': break else: break", "@type data: array, record array, or groups data object @param data: data to", "values.\"\"\" if isinstance(parName, (int, long)): result = self.field(parName) else: indx = self._unique[parName.lower()] if", "# find out how many blank cards are *directly* before the END card", "_commonNames. The default is \"all\" which will print out all attributes. It forgives", "to endorse or promote products derived from this software without specific prior written", "if self._bzero != 0: self.data += self._bzero # delete the keywords BSCALE and", "an independent # attribute of mmobject so if the HDUList object is created", "if isinstance(value, Card): setattr(self, cname, value.value) else: setattr(self, cname, value) # if the", "in self.ascardlist(): if _card.key == 'COMMENT': output.append(_card.value) return output def _add_commentary(self, key, value,", "to speed up the open. Any header will not be initialized till the", "be raised when a file specified by a URL cannot be accessed\"\"\" def", "self.__file = ffo if (verbose): print \"reopen the newly renamed file\", oldName #", "None: _val = eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: #", "i in range(len(self.parnames)): _name = self.parnames[i] if _name in _unique: _unique[_name].append(i) else: _unique[_name]", "list of cards into a printable string.\"\"\" kard = self._cardimage output = ''", "_other: indx.remove(x) tmp = [self[i] for i in indx] return ColDefs(tmp) def _setup(self):", "a table parent data, just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP):", "= self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields', len(_cols),", "pardata is None: npars = 0 else: npars = len(pardata) if parbscales is", "only do the scaling the first time and store it in _convert self._convert[indx]", "for card image longer than 80, assume it contains CONTINUE card(s). elif len(self._cardimage)", "or _zero): dummy = self._convert[indx].copy() if _zero: dummy -= bzero if _scale: dummy", "option, _err) # verify each card for _card in self.header.ascard: _err.append(_card._verify(option)) return _err", "_format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount = ' %d Groups %d Parameters'", "exist if _index is None: err_text = \"'%s' card does not exist.\" %", "will be overwritten by any user specified bscale/bzero values. bscale/bzero: user specified BSCALE", "easier output interface if only one HDU needs to be written to a", "amount specified in the header, the stream is padded to fill a complete", "#!/usr/bin/env python # $Id: NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A module", "data. If the 3rd argument is not a header, it (and other positional", "eqLoc = None else: if _key == 'HIERARCH': _limit = Card.length else: _limit", "name already exist, force to have duplicate name. \"\"\" oldkey = oldkey.strip().upper() newkey", "Header): raise ValueError, \"header must be a Header object\" if data is DELAYED:", "no need to run _ExtensionHDU.__init__ since it is not doing anything. _ImageBaseHDU.__init__(self, data=data,", "(int, tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 if isinstance(ext1[0], str):", "key/value/comment attributes, some of them may not exist for name in ['key', 'value',", "or bscale not in ['', None, 1]: array = array.copy() if bzero not", "kard[i*80:(i+1)*80] + '\\n' return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or", "zfile.close() elif os.path.splitext(self.name)[1] == '.zip': # Handle zip files if mode in ['update',", "for updating) comment: keyword comment (to be used for updating), default=None. before: name", "over `after' if both specified. They can be either a keyword name or", "= _card.key if _key == 'END': break else: _cardList.append(_card) _keyList.append(_key) # Deal with", "ColDefs(tmp) def _setup(self): \"\"\" Initialize all attributes to be a list of null", "_npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes", "class Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None,", "if len(ext) > 0: if isinstance(ext[0], Header): header = ext[0] ext = ext[1:]", "j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file) _shift =", "re.compile( r'(?P<valu_field> *' r'(?P<valu>' # The <strg> regex is not correct for all", "\"\"\" if self.writeComplete: raise IOError, \"The stream is closed and can no longer", "'Fixed card to be FITS standard.: %s' % self.key # verify the key,", "either a keyword name or index. \"\"\" if before != None: loc =", "\"silentfix\") and x: print 'Output verification result:' print x if _option == 'exception'", "_expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr valStr = '%-20s' % valStr", "before calling this method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu)", "are None, add to the last occurrence of cards of the same name", "_comment = self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey", "None, None self.header = header self.data = data self._xtn = ' ' def", "data _file: file associated with array (None) _datLoc: starting byte location of data", "((repeat-1) / 8) + 1 # use an array, even if it is", "(_scale, _zero) = self.data._get_scale_factors(npars)[3:5] if _scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for", "data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU self.name = name if", "_TableBaseHDU size = self.size() if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs = self.columns", "hdu: if not isinstance(item, _AllHDU): raise ValueError, \"%s is not an HDU.\" %", "chararray.array(x, itemsize=1) array = _VLF(map(_func, array)) except: raise ValueError, \"Inconsistent input data array:", "code, width, prec = fmt.group('code', 'width', 'prec') else: raise ValueError, valu size =", "'big' else: if coldata._type.bytes > 1: if coldata._byteorder != 'big': coldata.byteswap() coldata._byteorder =", "width) def _get_index(nameList, key): \"\"\" Get the index of the key in the", "from the old one if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] =", "_commonNames: value = getattr(self, cname) if value != None: text += cname +", "data: data to write to the new file @type header: L{Header} object or", "provided data would cause the stream to overflow, an IOError exception is raised", "Boolean is also OK in this constructor _card = \"Card('%s', %s)\" % (keywd,", "if attr == 'data': # same code as in _TableBaseHDU size = self.size()", "as separate arguments or as a tuple: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI'", "self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims) else: raw_data = num.fromfile(self._file,", "reading and writing FITS files and manipulating their contents. A module for reading", "(_blockLen, len(blocks)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does", "equal sign in the card image before column 10 and return its location.", "long string value/comment into CONTINUE cards. This is a primitive implementation, it will", "cards? default=1. If useblanks != 0, and if there are blank cards directly", "table.' % input_format return (dtype, width) def _get_index(nameList, key): \"\"\" Get the index", "self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif input.group('numr') !=", "cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype = TableHDU", "mode # CardList needs its own _mod attribute since it has methods to", "if dirName != '': dirName += '/' _name = dirName + os.path.basename(tempfile.mktemp()) if", "raise TypeError, 'bad value type' value = value.upper() if self.header.has_key('EXTNAME'): self.header['EXTNAME'] = value", "Column's data as an array.\"\"\" indx = _get_index(self._coldefs.names, key) if (self._convert[indx] is None):", "__init__(self, input): \"\"\" input: a sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max", "a complete FITS block and no more data will be accepted. An attempt", "raise ValueError, \"Illegal format `%s`.\" % format self.format = format # does not", "reached while 1: # find the END card mo = end_RE.search(block) if mo", "# since there is no way to communicate back to the _keylist. self._checkKey(self.key)", "_stop = naxis elif isinstance(_stop, (int, long)): _stop = _normalize(_stop, naxis) else: raise", "Column): b = [other] elif isinstance(other, ColDefs): b = list(other.data) else: raise TypeError,", "tmp._recformats[i]._nx) else: # from a table parent data, just pass it hdu.data._parent.field(i)[:n] =", "header. oldkey: old keyword, can be a name or index. newkey: new keyword,", "attribute names, they must be separated by comma(s). \"\"\" if attrib.strip().lower() in ['all',", "HDUList is resized, need to write it to a tmp file, # delete", "else: return dummy return self._convert[indx] def _scale_back(self): \"\"\"Update the parent array, using the", "key: keyword name @param ext: The rest of the arguments are for extension", "data area is resized.\" break # if the HDUList is resized, need to", "tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats,", "a throw-away format tmp.__dict__=self.__dict__.copy() return tmp class ColDefs(object): \"\"\"Column definitions class. It has", ">>> update(file, dat, 'sci', 2) # update the 2nd SCI extension >>> update(file,", "= [_convert_format(fmt) for fmt in self.formats] elif self._tbtype == 'TableHDU': self._Formats = self.formats", "if input is None: _tmp = self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] =", "- self._datLoc if curDataSize + data.itemsize()*data._size > self._size: raise IOError, \"Supplied data will", "fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used when the file is first", "sign, return the string after column 8. \"\"\" eqLoc = self._locateEq() if eqLoc", "!= None: loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after != None: loc", "HDU or a list of Columns\" def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname", "values. \"\"\" if self.data is None: return # Determine the destination (numarray) data", "HDUList object. name: Name of the FITS file to be opened. mode: Open", "self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal sign in the", "'X' elif isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max = max(map(len, VLdata)) val =", "data # update the header self.update_header() self._bitpix = self.header['BITPIX'] # delete the keywords", "%-11s %5d %-12s %s%s\" % \\ (self.name, type, len(self.header.ascard), _shape, _format, _gcount) def", "P format in variable length table.\"\"\" pass # TFORM regular expression _tformat_re =", "if hdu.data._byteorder != 'big': output = hdu.data.byteswapped() else: output = hdu.data # Binary", "exists, create if not. If only data is supplied, a minimal header is", "or promote products derived from this software without specific prior written permission. THIS", "== 0\", 0, option, _err) _after = self.header['NAXIS'] + 3 # if the", "self._bscale _zero = self._bzero elif option == 'minmax': if isinstance(_type, num.FloatingType): _scale =", "other): return self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list, tuple)): other", "value (if unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs", "_index = cards.index_of(keywd) except: _index = None fixable = fix_value is not None", "argument(s): %s' % ext2 if isinstance(ext1[0], str): if n_ext2 == 1 and 'extver'", "END, it will use this space first, instead of appending after these blank", "one if str(self[_key]) != str(value): super(CardList, self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod", "def _check(self, option='ignore'): \"\"\"Verify the card image with the specified option. \"\"\" self.__dict__['_err_text']", "after: [same as in update()] \"\"\" self._add_commentary('history', value, before=before, after=after) def add_comment(self, value,", "+ strfmt[:-1] return strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err =", "_start = 0 elif isinstance(_start, (int, long)): _start = _normalize(_start, naxis) else: raise", "_coldefs._dat_format = _fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] = _coldefs elif attr == '_theap':", "FITS record array is the data part of a table HDU's data part.", "modes only). output_verify: output verification option, default = 'exception'. verbose: print out verbose", "urllib import tempfile import gzip import zipfile import numarray as num import numarray.generic", "['>=', '==']: insert_pos = eval(_parse[1]) # if the card does not exist if", "until END card is reached while 1: # find the END card mo", "= [128, 64, 32, 16, 8, 4, 2, 1] nbytes = ((nx-1) /", "loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1] + 1 if loc ==", "= [] if not isinstance(key, tuple): key = (key,) naxis = self.hdu.header['NAXIS'] if", "0: simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple == 'T' and randomGroups", ".names while Column has .name), Each attribute in ColDefs is a list of", "CONTINUE cards may span across blocks. \"\"\" if len(block) != _blockLen: raise IOError,", "with urlretrieve to allow IOError exceptions to be raised when a file specified", "Card to the location specified by before or after. The argument `before' takes", "16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64}", "# does not include Object array because there is no guarantee # the", "string The name of the file to which the header and data will", "+ _pad('END') blocks = blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0: raise", "info of the HDU's in this HDUList.\"\"\" if self.__file is None: _name =", "# make a copy _keylist.reverse() try: _indx = _keylist.index(_key) if backward: _indx =", "= _heapstart - _tbsize _pcount = hdu.data._heapsize + hdu.data._gap if _pcount > 0:", "hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 hdu._new = 0 hdu._file", "includes the name, type, length of header, data shape and type for each", "for i in range(len(val_list)): if i == 0: headstr = \"%-8s= \" %", "an HDU attribute.\"\"\" if attr == 'name' and value: if not isinstance(value, str):", "hdu.header._mod = 0 hdu.header.ascard._mod = 0 hdu._new = 0 hdu._file = ffo.getfile() #", "produce 'a7'. if fmt.lstrip()[0] == 'A' and option != '': output_format = _fits2rec[dtype]+`int(option)`", "'update']: raise \"Memory mapping is not implemented for mode `%s`.\" % mode else:", "if attr == '_mm': self.__dict__[attr] = Memmap.open(self.name, mode=_memmap_mode[self.mode]) try: return self.__dict__[attr] except KeyError:", "not isinstance(_key, str): raise KeyError, key _key = (_key.strip()).upper() nfound = 0 for", "end, even if there are blank cards in front of END. bottom: If", "array array = chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable length array except: if", "zip files if mode in ['update', 'append']: raise \"Writing to zipped fits files", "j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap the", "of data ASCIITNULL = 0 # value for ASCII table cell with value", "if _option in ['fix', 'silentfix'] and x.find('Unfixable') != -1: raise VerifyError, '\\n'+x if", "columns, unless it is # a null string elif isinstance(self.value, str): if self.value", "= self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index]", "else: self.__dict__['_cardimage'] = ' '*80 def __repr__(self): return self._cardimage def __getattr__(self, name): \"\"\"", "= 1 else: raise SyntaxError, \"%s is not a Card\" % str(value) def", "format = _convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal format `%s`.\" % format self.format", "the stream to overflow, an IOError exception is raised and the data is", "(nfound, `key`) else: return found def readall(self): \"\"\"Read data of all HDU's into", "if isinstance(parName, (int, long)): result = self.field(parName) else: indx = self._unique[parName.lower()] if len(indx)", "_valStr = '(' + _realStr + ', ' + _imagStr + ')' self.__dict__['_valuestring']", "be used (as a template), default=None. If header=None, a minimal Header will be", "0, 'length of dimension 2'), Card('PCOUNT', 0, 'number of group parameters'), Card('GCOUNT', 1,", "file is first opened. This is to speed up the open. Any header", "\"%s\" is not recognized.' % tform if repeat == '': repeat = 1", "else: dim = str(dim) self.header.update('PCOUNT', 0, 'number of parameters', after='NAXIS'+dim) if not self.header.has_key('GCOUNT'):", "a primitive implementation, it will put the value string in one block and", "reverse=1) except: raise ValueError, \"Illegal format `%s`.\" % format self.format = format #", "through the list twice, first time print out all top level messages for", "== 'BinTableHDU': attr = [_convert_format(fmt) for fmt in self.formats] elif self._tbtype == 'TableHDU':", "'name', new_name) def change_unit(self, col_name, new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit)", "to conver it to a strings array array = chararray.array(array, itemsize=eval(recfmt[1:])) # then", "card image from the attributes: key, value, and comment, or from raw string.", "a HDUList object. hdus: Input, can be a list of HDU's or a", "_ErrList): _dummy = item.__str__(tab=tab+1) # print out a message only if there is", "default=None. \"\"\" if self.has_key(key): j = self.ascard.index_of(key) if comment is not None: _comment", "is None: hdu._raw += block block = self.__file.read(_blockLen) if block == '': break", "each piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct", "cardlist self.ascard = CardList(cards) def __getitem__ (self, key): \"\"\"Get a header keyword value.\"\"\"", "err_text=err_text, fixable=0) _err.append(_text) else: _result = self[i]._verify(option) if _result: _err.append(_result) return _err def", "< 0: raise ValueError, \"column `%s` ending point overlaps to the next column\"", "if (len(self) == 0): print \"There is nothing to write.\" return self.update_tbhdu() if", "associated with 'data', if None, an appropriate header will be created for the", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "provided to the class constructor may be written to the stream. If the", "for scaled fields.\"\"\" for hdu in self: if 'data' in dir(hdu): if isinstance(hdu,", "f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C) 2004 Association of Universities for Research", "= imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') == None: _val += eval(_idigt)*1j else: _val", "= header self.data = data self.name = None def size(self): \"\"\"Returns the size", "\"'%-s&'\" val = self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len) for i in range(len(val_list)):", "return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu): \"\"\"Write FITS HDU header part.\"\"\" blocks", "ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype = _ExtensionHDU", "and non-string types # Boolean is also OK in this constructor _card =", "list (or tuple) containing arrays else: if isinstance(value, (list, tuple)) and len(indx) ==", "BinTableHDU): for name in ['TDISP', 'TDIM', 'THEAP']: for i in range(_tfields): del self[name+`i+1`]", "\"Supplied data is not the correct type.\" if data._byteorder != 'big': # #", "card.value)) return pairs def has_key(self, key): \"\"\"Check for existence of a keyword. Returns", "by moving it to the right place (card %d).\" % insert_pos fix =", "rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out = tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert", "call can also close the mm object. try: self.mmobject.close() except: pass def info(self):", "+ size) / 8 return size def _verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option)", "+ '\\n' return text[:-1] def copy(self): tmp = Column(format='I') # just use a", "format spec to record format spec. Do the opposite if reverse = 1.", "_setup(self): \"\"\" Initialize all attributes to be a list of null strings.\"\"\" for", "if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys: if not (newkey in Card._commentaryKeys", "self._coldefs._tbtype == 'TableHDU': _loc = [1] _width = [] for i in range(self._nfields):", "class.\"\"\" pass class _ErrList(list): \"\"\"Verification errors list class. It has a nested list", "its value when created. Also check the card's value by using the \"test\"", "ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes: ColDefs, Column, FITS_rec,", "self._extractValueComment(name) else: raise AttributeError, name return getattr(self, name) def _setkey(self, val): \"\"\"Set the", "int): raise ValueError, \"An element in the HDUList must be an HDU.\" for", "input else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if attr ==", "_zero, bscale, bzero) = self._get_scale_factors(indx) # add the location offset of the heap", "for j in range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file) _shift", "_extver = `hdu.header['extver']` except: _extver = '' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu)", "0 # there is no boolean in ASCII table _number = not(_bool or", "of Cards, default=[]. \"\"\" # decide which kind of header it belongs to", "written to the file. :Returns: None Notes ----- The file will be opened", "re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM value into repeat, data type, and", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "= _get_tbdata(self) data._coldefs = self.columns else: data = None self.__dict__[attr] = data elif", "record if nrows == 0: for arr in tmp._arrays: if arr is not", "if it is not the length of a card image (80 columns). If", "the 'data' or 'columns' attribute.\"\"\" if attr == 'data': size = self.size() if", "__setattr__ key case.\"\"\" if isinstance(val, str): val = val.strip() if len(val) <= 8:", "usually means that the data size cannot be calculated or the 'END' card", "1) pcount = self.header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount +", "unit def __str__(self, tab=0): \"\"\"Print out nested structure with corresponding indentations. A tricky", "can be a list of HDU's or a single HDU. Default = None,", "' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card)", "range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine):", "self._convert[indx] = dummy return self._convert[indx] (_str, _bool, _number, _scale, _zero, bscale, bzero) =", "self[j]._summary()) results = results[:-1] print results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to", ">>> update(file, dat, 3, header=hdr) # update the 3rd extension >>> update(file, dat,", "in the name list. The key can be an integer or string. If", "type'), Card('NAXIS', 0, 'number of array dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True,", "= _fits2rec[dtype]+`int(option)` # make sure option is integer else: _repeat = '' if", "range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif", "keyword to be FITS standard.\"\"\" # use repr (not str) in case of", "= 0 naxis = self.header.get('NAXIS', 0) if naxis > 0: size = 1", "will be appended after the last non-blank card. \"\"\" if isinstance (card, Card):", "8: val = val.upper() if val == 'END': raise ValueError, \"keyword 'END' not", "is None: size = 0 else: size = len(tmp._arrays[i]) n = min(size, nrows)", "n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the", "%s with its value is too long.\" % self.key if len(output) <= Card.length:", "raise ValueError, \"Must specify format to construct Column\" # scale the array back", "value. key: keyword name or index default: if no keyword is found, the", "integer). \"\"\" if isinstance(key, (int, slice)): return key elif isinstance(key, tuple): _key =", "i*8 _max = min((i+1)*8, nx) for j in range(_min, _max): if j !=", "default = False. \"\"\" if (len(self) == 0): print \"There is nothing to", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED", "ndarray import numarray.strings as chararray import numarray.records as rec import numarray.objects as objects", "None: raise ValueError, \"Unparsable card, fix it first with .verify('fix').\" if valu.group('bool') !=", "range(len(kard)/80): output += kard[i*80:(i+1)*80] + '\\n' return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the", "the pieces in a list tmp = input[xoffset:offset] list.append(tmp) if len(input) == offset:", "\"\"\" hdr = self[0].header if hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend'] = True", "(in bytes) of the HDU's data part.\"\"\" self._file.seek(0, 2) return self._file.tell() - self._datLoc", "tables.\"\"\" pass class _FormatP(str): \"\"\"For P format in variable length table.\"\"\" pass #", "with two single quotes, # whereas it should not end with an even", "be either a keyword name or index. \"\"\" if before != None: loc", "the CardList. pos: The position (index, keyword name will not be allowed) to", "elif name == 'value': self._setvalue(val) elif name == 'comment': self._setcomment(val) else: raise AttributeError,", "if the file is read the first time, no need to copy, and", "_nx = self._coldefs._recformats[indx]._nx dummy = num.zeros(self._parent.shape+(_nx,), type=num.Bool) _unwrapx(self._parent.field(indx), dummy, _nx) self._convert[indx] = dummy", "self.req_cards('BITPIX', None, 'val == 8', 8, option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\" and", "file @type header: L{Header} object or None @param header: the header associated with", "return _err # --------------------------Table related code---------------------------------- # lists of column/field definition common names", "67 comm_len = 64 output = '' # do the value string valfmt", "_text #elif option == 'warn': pass # fix the value elif option ==", "input, strlen): \"\"\"Split a long string into parts where each part is no", "if indx >= 0 and indx < naxis: if naxis > 1: return", "= self._coldefs._recformats[indx][-2:] == _booltype else: _str = self._coldefs.formats[indx][0] == 'A' _bool = 0", "the second extension >>> getdata('in.fits', ext=2) # the second extension By name, i.e.,", "value of the keywod EXTNAME, default=None. \"\"\" # no need to run _ExtensionHDU.__init__", "i.e. an empty HDUList. file: The opened physical file associated with the HDUList.", "% self._cardimage # verify the comment (string), it is never fixable if result", "1 else: raise SyntaxError, \"%s is not a Card\" % str(card) def _use_blanks(self,", "for i in range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field)", "raise IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) # flush, to make sure the", "is found, otherwise it will return # None, meaning the keyword is undefined.", "if header is not None: if not isinstance(header, Header): raise ValueError, \"header must", "'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping from TFORM data type to numarray data", "FITS file and return an HDUList object. name: Name of the FITS file", "there are two or more attribute names, they must be separated by comma(s).", "val elif name == '_arrays': attr = [col.array for col in self.data] elif", "format(self): strfmt, strlen = '', 0 for j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`]", "hdu._new = 0 elif self.__file.mode == 'update': if not self._resize: # determine if", "written. input: input object array desp_output: output \"descriptor\" array of data type 2Int32", "_idigt = imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') == None: _val += eval(_idigt)*1j else:", "data block if _size > 0: self.__file.write(_padLength(_size)*'\\0') # flush, to make sure the", "properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self) loc", "i in range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) #", "n_ext1 == 0: if n_ext2 == 0: ext = _Zero() elif 'ext' in", "0) if naxis > 0: size = 1 for j in range(naxis): size", "= self._coldefs.bzeros[indx] _scale = bscale not in ['', None, 1] _zero = bzero", "= r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks for a valid value/comment string.", "self.name = None def size(self): \"\"\"Returns the size (in bytes) of the HDU's", "mode `%s`.\" % mode else: if os.path.splitext(self.name)[1] == '.gz': # Handle gzip files", "if this is a single treaded application threadName = threading.currentThread() singleThread = (threading.activeCount()", "xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype = _ExtensionHDU else: self._hdutype", "_bzero)) data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format _bscale = self.header.get('BSCALE', 1)", "data does not match what is expected by the header, a TypeError exception", "\"\"\"Image section.\"\"\" def __init__(self, hdu): self.hdu = hdu def __getitem__(self, key): dims =", "assume it contains CONTINUE card(s). elif len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue #", "found. In the case of a missing 'END' card, the Header may also", "fits_fmt, bscale = parbscales[i], bzero = parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale =", "= data self.columns = data._coldefs self.update() elif data is None: pass else: raise", "== '.gz': # Handle gzip files if mode in ['update', 'append']: raise \"Writing", "elif os.path.splitext(self.name)[1] == '.zip': # Handle zip files if mode in ['update', 'append']:", "in primary HDU if there is extension if len(self) > 1: self.update_extend() def", "raise IndexError, 'Illegal slice %s, stop < start.' % input _step = input.step", "offset will be calculated when the file is written. input: input object array", "starting column of # a field may not be the column right after", "of the Card before which the new card will be placed. The argument", "is specified if pos is not None: test_pos = '_index '+ pos if", "the resize attributes after updating self._resize = 0 for hdu in self: hdu.header._mod", "def writeHDUheader(self, hdu): \"\"\"Write FITS HDU header part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END')", "self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for both ASCII and binary tables if", "= num.array(raw_data, type=num.Float32) else: # floating point cases if self._ffile.memmap: self.data = raw_data.copy()", "_commonNames = ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames =", "# Only if the card image already exist (to avoid infinite loop), #", "ColDefs input is not a Column.\" % input.index(col) self.data = [col.copy() for col", "# read the delayed data for i in range(len(tmp)): _arr = tmp._arrays[i] if", "self.__class__ = _Hierarch # for card image longer than 80, assume it contains", "self._coldefs._recformats[indx][-2:] == _booltype else: _str = self._coldefs.formats[indx][0] == 'A' _bool = 0 #", "= self.header.update _append = self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0],", "= '' return valu = self._check(option='parse') if name == 'value': if valu is", "in the FITS file), # it will be constructed from the card list.", "= _arr.hdu.data._parent.field(_arr.field) # use the largest column shape as the shape of the", "elif input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ') if", "update the keylist self.count_blanks() self._mod = 1 def count_blanks(self): \"\"\"Find out how many", "self.__file.name results = \"Filename: %s\\nNo. Name Type\"\\ \" Cards Dimensions Format\\n\" % _name", "format spec to record format spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4',", "clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None): \"\"\"Append the header/data", "the key/value/comment attributes, some of them may not exist for name in ['key',", "_iswholeline(indx, naxis): if isinstance(indx, (int, long)): if indx >= 0 and indx <", "copy _keylist.reverse() try: _indx = _keylist.index(_key) if backward: _indx = len(_keylist) - _indx", "for col in input.data] # if the input is a list of Columns", "bscale, bzero) def field(self, key): \"\"\"A view of a Column's data as an", "# the second extension >>> getdata('in.fits', ext=2) # the second extension By name,", "further if self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu", "= _size + _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size:", "of the HDU, both header and data are copied.\"\"\" if self.data is not", "ValueError, \"Inconsistent input data array: %s\" % array array._dtype = recfmt._dtype else: raise", "`hdu.header['extver']` except: _extver = '' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose):", "as its value when created. Also check the card's value by using the", "the output file if exists, default = False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist", "if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the largest column shape as", "key): \"\"\"Check for existence of a keyword. Returns 1 if found, otherwise, 0.", "0) size = abs(bitpix) * gcount * (pcount + size) / 8 return", "groups'), Card('TFIELDS', 0, 'number of table fields') ]) if header is not None:", "groups data object @param data: data to write to the new file @type", "self.writeComplete = 1 def write(self,data): \"\"\" Write the given data to the stream.", "\"''\") val_list = self._words_group(val, val_len) for i in range(len(val_list)): if i == 0:", "* gcount * (pcount + datasize) / 8 if simple and not groups:", "= _len % Card.length if strlen == 0: return input else: return input", "dim == 0: dim = '' else: dim = str(dim) self.header.update('PCOUNT', 0, 'number", "'%s' mode is not supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode ==", "is None: return # Determine the destination (numarray) data type if type is", "= strfmt + 's'+str(size) + ',' strlen = strlen + size else: strfmt", "either the group data itself (a numarray) or a record array (FITS_rec) which", "if \"old\", use the original BSCALE and BZERO values when the data was", "nrows: nrows = dim if tbtype == 'TableHDU': _formats = '' _itemsize =", "self.header['NAXIS'+`j+1`] = axes[j] except: if (j == 0): _after = 'naxis' else :", "tmp = [Column(**attrs) for attrs in dict] self.data = tmp else: raise TypeError,", "If =0 (default) the card will be appended after the last non-commentary card.", "Table-related Classes: ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\" Do you mean:", "hdu.header.ascard._mod = 0 if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make", "comment (to be used for updating), default=None. before: name of the keyword, or", "= r'[+-]? *' + _digits_NFSC # This regex helps delete leading zeros from", "numarray data type (code) _booltype = 'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4',", "hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go through header keywords to pick out column", "= self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise IndexError,", "one field. indx is the index of the field. \"\"\" if self._coldefs._tbtype ==", "0 hdu._file = ffo.getfile() # if not resized, update in place else: for", "OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "data, *ext, **extkeys): \"\"\"Update the specified extension with the input data/header. @type filename:", "the correct type.\" if data._byteorder != 'big': # # byteswap little endian arrays", "NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT, for variable length", "== True: self._hdutype = GroupsHDU elif cards[0].value == True: self._hdutype = PrimaryHDU else:", "Flexible Image Transport System (FITS) files. This file format was endorsed by the", "by before or after. The argument `before' takes precedence over `after' if both", "string.\"\"\" output = '' for card in self: output += str(card) + '\\n'", "last_end = _end self._Formats = self.formats self._arrays[i] = input[i].array \"\"\" def __getitem__(self, key):", "it will try to match the name with case insensitivity. So, in the", "[] else: raise ValueError, \"incorrect array type\" self.header['NAXIS'] = len(axes) # add NAXISi", "cardList, keyList, blocks): \"\"\"Read blocks of header, and put each card into a", "re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo =", "new_format) def _get_tbdata(hdu): \"\"\" Get the table data from input (an HDU object).\"\"\"", "if (verbose): print \"update data in place: Name =\", hdu.name, _extver # reset", "'big' output = hdu.data else: output = hdu.data output.tofile(self.__file) _size = output.nelements() *", "_result = self[i]._verify(option) if _result: _err.append(_result) return _err def append(self, hdu): \"\"\"Append a", "self.size() if size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs = self.columns else: data =", "list of corresponding attribute values from all Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'):", "tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU': # string no need to convert, if", "option != 'silentfix': _text += ' ' + fix_text return _text def verify", "and XTENSION to accomodate Extension # and Corrupted cases del self['SIMPLE'] del self['XTENSION']", "_parent # of a scaled column may have wrong byteorder if coldata2._byteorder !=", "a FITS file is opened, a HDUList object is returned. \"\"\" def __init__(self,", "location of data block in file (None) \"\"\" # mappings between FITS and", "' % self.key else: keyStr = '%-8s' % self.key else: keyStr = '", "delete from the end so as not to confuse the indexing. _list =", "insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value checking is specified if test:", "table HDU, both header and data are copied.\"\"\" # touch the data, so", "default='exception'. clobber: Overwrite the output file if exists, default = False. \"\"\" if", "range(groups,naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1)", "the Header.\"\"\" tmp = Header(self.ascard.copy()) # also copy the class tmp._hdutype = self._hdutype", "os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else: self.name = name self.mode = mode self.memmap", "= mode self.memmap = memmap if memmap and mode not in ['readonly', 'copyonwrite',", "0 if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if", "setpar(self, parName, value): \"\"\"Set the group parameter values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:]", "'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if the string = TNULL, return ASCIITNULL nullval", "recent changes of columns.\"\"\" _update = self.header.update _append = self.header.ascard.append _cols = self.columns", "_append = self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1') _update('tfields',", "SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,", "History text to be added. before: [same as in update()] after: [same as", "is longer than 80, assume it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input)", "to be written to a file. name: output FITS file name to be", "comm = self.comment commfmt = \"%-s\" if not comm == '': nlines =", "True self.req_cards(firstkey, '== 0', '', firstval, option, _err) self.req_cards('BITPIX', '== 1', _isInt+\" and", "num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx] = dummy for i in range(len(self._parent)): if", "to the last occurrence of cards of the same name (except blank card).", "See L{getdata} for explanations/examples. @return: keyword value @rtype: string, integer, or float \"\"\"", "numarray attribute format, (e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None, use the", "all attributes. It forgives plurals and blanks. If there are two or more", "if (verbose): print \"delete the original file\", oldName # reopen the renamed new", "range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx in range(self._nfields): if (self._convert[indx] is", "extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename, 'update', *ext, **extkeys) hdulist[_ext]", "\"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix))", "back to storage values if there is bscale/bzero if isinstance(array, num.NumArray): # boolean", "+ commfmt % i output = output + '%-80s' % commstr return output", "\"\"\" def _verify(self, option='warn'): \"\"\"No verification (for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards", "verify the comment (string), it is never fixable if result is not None:", "return Section(self) elif attr == 'data': self.__dict__[attr] = None if self.header['NAXIS'] > 0:", "or groups data object @param data: data to write to the new file", "open. Any header will not be initialized till the HDU is accessed. \"\"\"", "try: header = Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) # pass these attributes", "if width == '': width = None else: width = eval(width) except: raise", "arguments or as a tuple: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2", "is the top-level FITS object. When a FITS file is opened, a HDUList", "= '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1 if option == 'ignore': return", "Verify location and value of mandatory keywords. # Do the first card here,", "and cards['GROUPS'].value == True: self._hdutype = GroupsHDU elif cards[0].value == True: self._hdutype =", "for j in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data,", "HDUs from the HDUList, indexed by number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize =", "del dummy # ASCII table does not have Boolean type elif _bool: self._parent.field(indx)[:]", "'big' return raw_data class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes: header: image", "fix_text = \"Fixed by setting a new value '%s'.\" % fix_value if fixable:", "copyright notice, this list of conditions and the following disclaimer in the documentation", "be calculated or the 'END' card is not found. In the case of", "is touched, use data info. if 'data' in dir(self): if self.data is None:", "if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims) else: raw_data =", "valu is None: raise ValueError, \"Unparsable card, fix it first with .verify('fix').\" if", "self.req_cards('BITPIX', '== 1', _isInt+\" and \"+isValid, 8, option, _err) self.req_cards('NAXIS', '== 2', _isInt+\"", "be added. before: [same as in update()] after: [same as in update()] \"\"\"", "= [col.array for col in self.data] elif name == '_recformats': if self._tbtype ==", "def _pad(input): \"\"\"Pad balnk space to the input string to be multiple of", "useblanks=useblanks, bottom=1) else: try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1)", "first header block. block = self.__file.read(_blockLen) if block == '': raise EOFError hdu", "missing when trying to read HDU #%d.\\n There may be extra bytes after", "correct for all cases, but # it comes pretty darn close. It appears", "text.\"\"\" if Card._comment_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val) self.__dict__['_fixable']", "self.starts[i] + _width - 1 self.spans[i] = _end - last_end last_end = _end", "or just # a number/string for cname in _commonNames: value = eval(cname) #", "if n_ext2 == 1: ext = ext2['ext'] elif n_ext2 == 2 and 'extver'", "be separated by comma(s). \"\"\" if attrib.strip().lower() in ['all', '']: list = _commonNames", "Columns elif isinstance(input, (list, tuple)): for col in input: if not isinstance(col, Column):", "you can have two different columns called 'abc' and 'ABC' respectively. (b) When", "= 0 else: npars = len(pardata) if parbscales is None: parbscales = [None]*npars", "top-level FITS object. When a FITS file is opened, a HDUList object is", "_verify(self, option='warn'): _err = _ErrList([], unit='Card') isValid = \"val in [8, 16, 32,", "so we skip NAXIS1. if naxis > 1: size = 1 for j", "'data' in dir(self): if self.data is None: _shape, _format = (), '' _nrows", "data elif attr == 'columns': class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] =", "append at the end. \"\"\" new_card = Card(key, value) if before != None", "COMMENT card. value: Comment text to be added. before: [same as in update()]", "_normalize_slice(indx, naxis) if (indx.start == 0) and (indx.stop == naxis) and (indx.step ==", "the name with case insensitivity. So, in the last example, field('Abc') will cause", "the required data has been written to the stream. Notes ----- Only the", "HDU attribute.\"\"\" if attr == 'name' and value: if not isinstance(value, str): raise", "isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new = 1 self._resize = 1 else: raise \"HDUList", ">>> update(file, dat, hdr, 'sci') # update the 'sci' extension >>> update(file, dat,", "field('abc') will get the first field, and field('ABC') will get the second field.", "is inconsistent with the format `%s`.\" % format else: raise ValueError, \"Must specify", "_commonNames: print \"'%s' is not an attribute of the column definitions.\"%att continue print", "_nx) self._convert[indx] = dummy return self._convert[indx] (_str, _bool, _number, _scale, _zero, bscale, bzero)", "= i*8 _max = min((i+1)*8, nx) for j in range(_min, _max): if j", "be WholeLine or # OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx,", "\"\"\"variable length field object.\"\"\" def __init__(self, input): \"\"\" input: a sequence of variable-sized", "fit into the output's itemsize of %s\" % (x, _width[indx]) else: self._parent.field(indx)[i] =", "if there is something if _dummy.strip(): if self.unit: result += _INDENT*tab+\"%s %s:\\n\" %", "name if _ver == None: found = j nfound += 1 else: #", "= '%20s' % `self.value`[0] elif isinstance(self.value , (int, long)): valStr = '%20d' %", "are consistent. if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: # try to convert", "return self.ascard def items(self): \"\"\"Return a list of all keyword-value pairs from the", "after the last field elif tbtype == 'TableHDU': (_format, _width) = _convert_ASCII_format(self.formats[i]) if", "*extra* blank cards? default=1. If useblanks != 0, and if there are blank", "= int(mo.group(1)) else: gcount = 1 mo = re_pcount.search(block) if mo is not", "_min = i*8 _max = min((i+1)*8, nx) for j in range(_min, _max): num.bitwise_and(input[...,i],", "the keylist item def keys(self): \"\"\"Return a list of all keywords from the", "return _err def req_cards(self, keywd, pos, test, fix_value, option, errlist): \"\"\"Check the existence,", "\"\"\"Card class verification method.\"\"\" _err = _ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text,", "x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key) # occurrence of _key in _list if", "well as a list of HDU's as input if isinstance(hdus, _ValidHDU): hdus =", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "-= bzero if _scale: dummy /= bscale elif self._coldefs._tbtype == 'TableHDU': dummy =", "the 'physical' FITS file. :Parameters: None :Returns: None \"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A", "into memory.\"\"\" for i in range(len(self)): if self[i].data is not None: continue def", "data.itemsize()*data._size > self._size: raise IOError, \"Supplied data will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']]", "and return an HDUList object. name: Name of the FITS file to be", "last non-commentary card if self[i].key not in Card._commentaryKeys: break super(CardList, self).insert(i+1, card) self._keylist.insert(i+1,", "1: self.update_extend() hduList = open(name, mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def", "_key = self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] # update the keylist self.count_blanks() self._mod", "# $Id: NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A module for reading", "be integer.' % input _stop = input.stop if _stop is None: _stop =", "value can fit in one line. # Instead, just truncate the comment if", "return self[key] except: return default def update(self, key, value, comment=None, before=None, after=None): \"\"\"Update", "data # bscale and bzero takes priority if (bscale != 1 or bzero", "13:11:54Z jtaylor2 $ \"\"\" A module for reading and writing FITS files and", "in range(nbytes): _min = i*8 _max = min((i+1)*8, nx) for j in range(_min,", "num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array)) except: try: # this handles ['abc'] and", "hdr['naxis'] == 0: hdr.update('extend', True, after='naxis') else: n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`)", "(> 8), use HIERARCH.' % val else: raise ValueError, 'keyword name %s is", "before: name of the keyword, or index of the Card before which the", "no keyword is found, return the default value. key: keyword name or index", "def count_blanks(self): \"\"\"Find out how many blank cards are *directly* before the END", "the index of a keyword in the CardList. key: the keyword name (a", "self.data += -_zero # 0.9.6.3 to avoid out of range error for BZERO", "True: self._hdutype = GroupsHDU elif cards[0].value == True: self._hdutype = PrimaryHDU else: self._hdutype", "'val == True', True, option, _err) return _err # --------------------------Table related code---------------------------------- #", "keyword unit: column unit, corresponding to TUNIT keyword null: null value, corresponding to", "the card image before column 10 and return its location. It returns None", "except IndexError: raise IndexError, 'No data in this HDU.' if _data is None:", "(Card.length-strlen) def _floatFormat(value): \"\"\"Format the floating number to make sure it gets the", "_err) self.req_cards('TFIELDS', '== 7', _isInt+\" and val >= 0 and val <= 999\",", "+ valStr) > Card.length: raise ValueError, \"The keyword %s with its value is", "test_pos = '_index '+ pos if not eval(test_pos): err_text = \"'%s' card at", "raise ValueError, 'Long card image must have CONTINUE cards after the first card.'", "tmp # synchronize the sliced FITS_rec and its ._parent def __getitem__(self, key): tmp", "hdu = _TempHDU() hdu._raw = '' # continue reading header blocks until END", "the file. :Returns: writeComplete : integer Flag that when true indicates that all", "(text table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7', 'D':'D24.16'} self._tbtype = tbtype", "== _booltype else: _str = self._coldefs.formats[indx][0] == 'A' _bool = 0 # there", "nbytes = ((nx-1) / 8) + 1 unused = nbytes*8 - nx for", "isinstance(self.value , bool): valStr = '%20s' % `self.value`[0] elif isinstance(self.value , (int, long)):", "size def _verify(self, option='warn'): _err = PrimaryHDU._verify(self, option=option) # Verify locations and values", "if naxis > 0: simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple ==", "file\", _name for hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close()", "x = _fmt % dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number `%s`", "option=option) self.req_cards('PCOUNT', None, _isInt+\" and val == 0\", 0, option, _err) return _err", "column into a Boolean array. input: input Uint8 array of shape (s, nbytes)", "hdu._raw = '' # continue reading header blocks until END card is reached", "7', _isInt+\" and val >= 0 and val <= 999\", 0, option, _err)", "imag.group('sign') + _imagStr _valStr = '(' + _realStr + ', ' + _imagStr", "eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: # Check for numbers with leading 0s. real", "will be provided. name: The name of the HDU, will be the value", "format `%s`.\" % format self.format = format # does not include Object array", "_Zero): try: hdu = hdulist[1] _data = hdu.data except IndexError: raise IndexError, 'No", "check in the case there is extra space after the last HDU or", "open a FITS file and return an HDUList object. name: Name of the", "FITS HDU, data portions are not actually read here, but the beginning locations", "self.name = 'PRIMARY' # insert the keywords EXTEND if header is None: dim", "over the RecArray, so we can deal with scaled columns. \"\"\" def __init__(self,", "!= None: self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0] == ' ': useblanks =", "useblanks == 0, the card will be appended at the end, even if", "row) class _Group(rec.Record): \"\"\"One group of the random group data.\"\"\" def __init__(self, input,", "self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip': # Handle zip files", "When you *refer* to a field (presumably with the field method), it will", "self.ascard[key].value def __setitem__ (self, key, value): \"\"\"Set a header keyword value.\"\"\" self.ascard[key].value =", "str): # try to find exact match first try: indx = nameList.index(key.rstrip()) except", "[] for _card in self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value) return output def", "isinstance(recfmt, _FormatP): try: _func = lambda x: num.array(x, type=recfmt._dtype) array = _VLF(map(_func, array))", "or a list of Columns\" def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname =", "this because Record has no __getstate__. # also more efficient. else: return tmp", "the keyword already exists, it's value/comment will be updated. If it does not", "_ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU @group Table-related Classes: ColDefs, Column, FITS_rec, _FormatP,", "self.header.update('naxis'+`j+1`, axes[j], after = _after) # delete extra NAXISi's for j in range(len(axes)+1,", "the 'physical' FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list class. This is", "before column 9. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc =", "not isinstance(item, _AllHDU): raise ValueError, \"%s is not an HDU.\" % item else:", "isinstance(self.value, complex): if self._valueModified: _tmp = '(' + _floatFormat(self.value.real) + ', ' +", "\"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name, 'unit', new_unit) def info(self, attrib='all'): \"\"\"Get attribute(s) information", "numarray as num import numarray.generic as ndarray import numarray.strings as chararray import numarray.records", "self._hdutype = PrimaryHDU else: self._hdutype = _ValidHDU elif cards[0].key == 'XTENSION': xtension =", "only delete once else: del self.ascard[key] self._mod = 1 def __str__(self): return self.ascard.__str__()", "for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>= 0\", 1,", "0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt,", "from a header in a FITS file. @type filename: string @param filename: input", "bzero=0): \"\"\"Scale image data by using BSCALE/BZERO. Call to this method will scale", "header provided to the class constructor may be written to the stream. If", "if not isinstance(col, Column): raise \"Element %d in the ColDefs input is not", "location and the size of the data area return loc, _size+_padLength(_size) def close(self):", "Card.length _bytes = _bytes + _padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1", "output def _words_group(self, input, strlen): \"\"\"Split a long string into parts where each", "fixed or # scientific notation. One for FSC and one for non-FSC (NFSC)", "WholeLine or # OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice):", "image is longer than 80, assume it contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] =", "Header. If not we will need # to prepend a default PrimaryHDU to", "results = results[:-1] print results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open", "= ' '*8 # value string # check if both value and _cardimage", "file to be updated data: the new data used for updating The rest", "unused = nbytes*8 - nx for i in range(nbytes): _min = i*8 _max", "column unit, corresponding to TUNIT keyword null: null value, corresponding to TNULL keyword", "if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data", "read the delayed data for i in range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr,", "so the names can be displayed in a # preferred order. _commonNames =", "AttributeError(attr) def par(self, parName): \"\"\"Get the group parameter values.\"\"\" if isinstance(parName, (int, long)):", "head.strip() else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or", "'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the reverse dictionary of the above _rec2fits =", "indx = _get_index(self._coldefs.names, key) if (self._convert[indx] is None): # for X format if", "integer.' % input if _stop < _start: raise IndexError, 'Illegal slice %s, stop", "hdus) def __iter__(self): return [self[i] for i in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get", "_scale or _zero: self._convert[npars] = input else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def", "if pardata is None: npars = 0 else: npars = len(pardata) if parbscales", "a field may not be the column right after the last field elif", "' ') if real.group('sign') is not None: _realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag'))", "then delete from the end so as not to confuse the indexing. _list", "if no keyword is found, the value to be returned. \"\"\" try: return", "dictionaries so the names can be displayed in a # preferred order. _commonNames", "isinstance(self.value , (int, long)): valStr = '%20d' % self.value # XXX need to", "VLdata._max else: val = _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def copy(self): \"\"\"Make", "else: extver = 1 return name, extver def _getsize(self, block): \"\"\"Get the size", "None, 'val == 8', 8, option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\" and val", "has wrong data type.' if 'header' in extkeys: header = extkeys['header'] del extkeys['header']", "= self._hdutype return tmp def _strip(self): \"\"\"Strip cards specific to a certain kind", "= val def __setattr__(self, name, val): if name == 'key': raise SyntaxError, 'keyword", "+ _pc + _format[1:] + _dict[_format[0]] + ' '*_trail # not using numarray.strings's", "type (code) _booltype = 'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4',", "U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all docstrings in this module. @group Header-related Classes:", "element) result += _dummy element += 1 return result class _Verify: \"\"\"Shared methods", "specifed arguments, except defaults, must be compliant to FITS standard. key: keyword name,", "'data' in dir(self): if self.data is None: _shape, _format = (), '' else:", "For detailed examples of usage, see the I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf}", "else: value = num.array(value, type=self._dtype) objects.ObjectArray.__setitem__(self, key, value) self._max = max(self._max, len(value)) class", "* (pcount + size) / 8 return size def close(self): \"\"\" Close the", "card, before, after, useblanks=1): \"\"\"Insert a Card to the location specified by before", "except KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\"", "to all be written at once. The following psudo code illustrates its use:", "= ['name', 'format', 'unit', 'null', 'bscale', 'bzero', 'disp', 'start', 'dim'] _keyNames = ['TTYPE',", "def add_col(self, column): \"\"\"Append one Column to the column definition.\"\"\" return self+column def", "i in range(npars): _cols.append(Column(name='c'+`i+1`, format = fits_fmt, bscale = parbscales[i], bzero = parbzeros[i]))", "result = HDUList(_hdus) return result def __setitem__(self, key, hdu): \"\"\"Set an HDU to", "'%s%s' % (`input.shape[1:]`, _fmt) _formats += data_fmt gcount = input.shape[0] for i in", "in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx,", "to construct Column\" # scale the array back to storage values if there", "self: output += str(card) + '\\n' return output[:-1] # ----------------------------- HDU classes ------------------------------------", "= chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable length array except: if isinstance(recfmt, _FormatP):", "dimensions'), Card('NAXIS1', 0, 'length of dimension 1'), Card('NAXIS2', 0, 'length of dimension 2'),", "(num.NumArray, chararray.CharArray, Delayed)): try: # try to convert to a numarray first array", "= Undefined() __credits__=\"\"\" Copyright (C) 2004 Association of Universities for Research in Astronomy", "re.compile(r'GROUPS =\\s*(T)') simple = re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo is not None:", "% len(indx) def _getitem(self, offset): row = (offset - self._byteoffset) / self._strides[0] return", "extension By name, i.e., EXTNAME value (if unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits',", "+= self.field(i) return result def setpar(self, parName, value): \"\"\"Set the group parameter values.\"\"\"", "8) + 1 # use an array, even if it is only ONE", "'== '+`naxis+4`, _isInt+\" and val == 1\", 1, option, _err) return _err #", "is also OK in this constructor _card = \"Card('%s', %s)\" % (keywd, `fix_value`)", "publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage, see the I{PyFITS", "_stop is None: _stop = naxis elif isinstance(_stop, (int, long)): _stop = _normalize(_stop,", "the field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str = 'a' in self._coldefs.formats[indx] _bool", "(self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue (_str, _bool,", "format (e.g. E-009 vs. E-09) elif isinstance(self.value, float): if self._valueModified: valStr = '%20s'", "+ b else: tmp = b + list(self.data) return ColDefs(tmp) def __radd__(self, other):", "or _str: if _number and (_scale or _zero): dummy = self._convert[indx].copy() if _zero:", "if isinstance(coldata, _VLF): for i in coldata: if not isinstance(i, chararray.CharArray): if i._type.bytes", "'': nlines = len(comm) / comm_len + 1 comm_list = self._words_group(comm, comm_len) for", "= 1 mo = re_pcount.search(block) if mo is not None: pcount = int(mo.group(1))", "of the data portion of the HDU.\"\"\" size = 0 naxis = self.header.get('NAXIS',", "_card = \"Card('%s', %s)\" % (keywd, `fix_value`) fix = \"self.header.ascard.insert(%d, %s)\" % (insert_pos,", "self.__dict__['key'] = head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from", "# dictionary to enable file cacheing class _File: \"\"\"A file I/O class\"\"\" def", "different class levels. \"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit = unit", "offset <= xoffset: offset = xoffset + strlen # collect the pieces in", "if dtype == 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format'", "= dummy del dummy # ASCII table does not have Boolean type elif", ": integer Flag that when true indicates that all of the required data", "the last HDU or the file is corrupted.' % (len(hduList)+1) break # initialize/reset", "if True and if filename already exists, it will overwrite the file. Default", "_pcount > 0: hdu.header['PCOUNT'] = _pcount # update TFORM for variable length columns", "# same code as in _TableBaseHDU size = self.size() if size: self._file.seek(self._datLoc) data", "# if not a slice, do this because Record has no __getstate__. #", "are copied.\"\"\" if self.data is not None: _data = self.data.copy() else: _data =", "= [] else: raise ValueError, \"incorrect array type\" self.header['NAXIS'] = len(axes) # add", "if self._ffile.memmap: self.data = raw_data.copy() # if not memmap, use the space already", "set if the value is different from the old one if str(self[_key]) !=", "if not isinstance(self, _Hierarch): self.__class__ = Card else: # does not support CONTINUE", "and Corrupted cases del self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis = self['NAXIS'] if", "FITS file (and optionally the header). @type filename: string @param filename: input FITS", "self).__setitem__(_key, value) self._keylist[_key] = value.key.upper() self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s", "/ 8) + 1 for i in range(nbytes): _min = i*8 _max =", "!= None: if self.__file.memmap == 1: self.mmobject = self.__file._mm if self.__file.mode in ['append',", "a string' else: self.__dict__['_cardimage'] = ' '*80 def __repr__(self): return self._cardimage def __getattr__(self,", "'D':'E'} # calculate the starting point and width of each field for ASCII", "is None: raise ValueError, \"Unparsable card, fix it first with .verify('fix').\" if valu.group('bool')", "code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data = num.fromfile(self.hdu._file, type=code, shape=dims) raw_data._byteorder = 'big' return", "card image already exist (to avoid infinite loop), # fix it first. if", "END card.\"\"\" for i in range(1, len(self)): if str(self[-i]) != ' '*Card.length: self._blanks", "self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims = self.size()*8/abs(_bitpix) else: dims = self._dimShape() code =", "option == 'old': _scale = self._bscale _zero = self._bzero elif option == 'minmax':", "self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card, bottom=1) self._mod = 1 def copy(self): \"\"\"Make a copy", "_nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go through header", "all elements after the first WholeLine must be WholeLine or # OnePointAxis if", "isinstance(key, (int, slice)): return key elif isinstance(key, tuple): _key = key[0] _ver =", "size) / 8 return size def close(self): \"\"\" Close the 'physical' FITS file.", "numr.group('digt').translate(_fix_table2, ' ') if numr.group('sign') == None: _val = eval(_digt) else: _val =", "beautiful! _func = lambda x: chararray.array(x, itemsize=1) array = _VLF(map(_func, array)) except: raise", "= num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array = _out # make a", ">>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) #", "(pcount + size) / 8 return size def copy(self): \"\"\"Make a copy of", "]) if header is not None: # Make a \"copy\" (not just a", "filename: name of the new FITS file to write to @type data: array,", "'extname' in keys: if 'extver' in keys: ext = ext2['extname'], ext2['extver'] else: ext", "to TFORM keyword unit: column unit, corresponding to TUNIT keyword null: null value,", "if parbzeros is None: parbzeros = [None]*npars if bitpix is None: bitpix =", "i = nc - 1 if not bottom: for i in range(nc-1, -1,", "self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr valStr = '%-20s' % valStr # must", "put all parts together output = keyStr + eqStr + valStr + commentStr", "of Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards = cards # if the key", "%s not recognized.' % option if (_option == \"ignore\"): return x = str(self._verify(_option)).rstrip()", "'+`j`, _isInt+\" and val>= 0\", 1, option, _err) # verify each card for", "the value attribute.\"\"\" if isinstance(val, (str, int, long, float, complex, bool, Undefined)): if", "' + self.comment else: commentStr = '' # equal sign string eqStr =", "commentary keys can not be renamed to each other.' elif (force == 0)", "isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: # try to convert to a numarray first", "extension. If the file does already exist, but the provided header represents a", "# if the supposed location is specified if pos is not None: test_pos", "the # value must be a list (or tuple) containing arrays else: if", "header = keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data, header=header)", "col_name, new_format): #new_format = _convert_format(new_format) #self.change_attrib(col_name, 'format', new_format) def _get_tbdata(hdu): \"\"\" Get the", "to be written to. output_verify: output verification option, default='exception'. clobber: Overwrite the output", "(in the commonName list) of a Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx]", "n_ext1 = len(ext1) n_ext2 = len(ext2) keys = ext2.keys() # parse the extension", "card image and return the string after the equal sign. If there is", "Epydoc markup used for all docstrings in this module. @group Header-related Classes: Card,", "None: bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX not found where expected\") mo =", "to confuse the indexing. _list = [] for i in range(len(self.header.ascard)-1,-1,-1): _card =", "return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append': for hdu in self: if (verbose):", "file name @param ext: The rest of the arguments are for extension specification.", "case sensitive By combination of EXTNAME and EXTVER, as separate arguments or as", "Delayed)): try: # try to convert to a numarray first array = num.array(array)", "'>' + strfmt[:-1] return strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err", "only one HDU needs to be written to a file. name: output FITS", "block _size, hdu.name = hdu._getsize(hdu._raw) # get extname and extver if hdu.name ==", "levels. \"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit = unit def __str__(self,", "to be done after the \"regular\" data is written (above) _where = self.__file.tell()", "data is not the correct type.\" if data._byteorder != 'big': # # byteswap", "@type filename: string @param filename: input FITS file name @type key: string @param", "_cardList[_where-1]._cardimage for c in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc]", "padded to fill a complete FITS block and no more data will be", "None, 0]: array += -bzero if bscale not in ['', None, 1]: array", "ColDefs object. header: header to be used to populate the non-required keywords nrows:", "file, and rename the tmp to the original file if self._resize: oldName =", "clobber: Overwrite the output file if exists, default = False. \"\"\" if isinstance(self,", "to the CardList. pos: The position (index, keyword name will not be allowed)", "i output = output + '%-80s' % commstr return output def _words_group(self, input,", "# Verify location and value of mandatory keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT',", "with the HDUList. Default = None. \"\"\" self.__file = file if hdus is", "card image (80 columns). If the card image is longer than 80, assume", "i*8 _max = min((i+1)*8, nx) for j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j])", "that name if _ver == None: found = j nfound += 1 else:", "`_type` == 'UInt8': # UInt8 case _zero = min _scale = (max -", "fileheader = urllib.urlretrieve(name) else: self.name = name self.mode = mode self.memmap = memmap", "whole card must have string value. \"\"\" def __str__(self): \"\"\"Format a list of", "(int, long)): indx = int(key) elif isinstance(key, str): # try to find exact", "and isinstance(self, BinTableHDU): val = _cols._recformats[i] if isinstance(val, _FormatX): val = `val._nx` +", "must be a list (or tuple) containing arrays else: if isinstance(value, (list, tuple))", "than 80, assume it contains CONTINUE card(s). elif len(self._cardimage) > Card.length: self.__class__ =", "name in ['value', 'comment']: self._extractValueComment(name) else: raise AttributeError, name return getattr(self, name) def", "= mo.group(1).rstrip() else: name = '' mo = re_extver.search(self._raw) if mo: extver =", "length of a card length = 80 # String for a FITS standard", "bscale/bzero: user specified BSCALE and BZERO values. \"\"\" if self.data is None: return", "class verification method.\"\"\" _err = _ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text,", "naxis*[0] for j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return tuple(axes) def _summary(self):", "output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from the card", "file. name: output FITS file name to be written to. output_verify: output verification", "pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT')", "are corrupted (unparsable), such as the 'BITPIX', 'NAXIS', or 'END' cards. A corrupted", "class.\"\"\" def __init__(self, cards=[], keylist=None): \"\"\"Construct the CardList object from a list of", "r')?$') # keys of commentary cards _commentaryKeys = ['', 'COMMENT', 'HISTORY'] def __init__(self,", "{'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8', 'M':'c16', 'K':'i8'} # the reverse", "== 'data': size = self.size() if size: self._file.seek(self._datLoc) data = _get_tbdata(self) data._coldefs =", "data object supplied. \"\"\" if not os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data, header)", "of Standards and Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of", "keyword name longer than 8 characters. \"\"\" def _verify(self, option='warn'): \"\"\"No verification (for", "the header can be used to reconstruct another kind of header. \"\"\" try:", "self._convert[npars] = input else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr): if", "raise IndexError, 'too many indices.' elif naxis > len(key): key = key +", "isinstance(val, _FormatX): val = `val._nx` + 'X' elif isinstance(val, _FormatP): VLdata = self.data.field(i)", "search the index from the END, i.e. backward? default=0. If backward = 1,", "and hdu.data is not None: # check TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields", "['exception', 'warn']: self.__dict__['_err_text'] = 'Card image is not FITS standard (equal sign not", "the scaling flags and factors for one field. indx is the index of", "self).insert(i+1, card) self._keylist.insert(i+1, card.key.upper()) if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise", "= min((i+1)*8, nx) for j in range(_min, _max): if j != _min: num.lshift(output[...,i],", "a view) of the input header, since it # may get modified. the", "to each other.' elif (force == 0) and (newkey in self.ascard._keylist): raise ValueError,", "= self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option, _err) return", "\"\"\"FITS Random Groups HDU class.\"\"\" _dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'}", "will fill all cells with zeros or blanks if = 0, copy the", "tmp = b + list(self.data) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right')", "kind of header it belongs to try: if cards[0].key == 'SIMPLE': if 'GROUPS'", "already exists, it will overwrite the file. Default is False. \"\"\" if header", "= dummy return self._convert[indx] (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx)", "change_attrib(self, col_name, attrib, new_value): \"\"\"Change an attribute (in the commonName list) of a", "table else: if isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] = dummy del dummy", "_format = '[' for j in range(_ncols): _format += self.header['TFORM'+`j+1`] + ', '", "# construct a list of cards of minimal header if isinstance(self, _ExtensionHDU): c0", "_WholeLine): raise IndexError, 'Subsection data is not contiguous.' # the offset needs to", "= 0 # go through the list twice, first time print out all", "_commonNames: attr = getattr(self, cname+'s') del attr[indx] del self._arrays[indx] self._nfields -= 1 def", "= _comm.rstrip() def _fixValue(self, input): \"\"\"Fix the card image for fixable non-standard compliance.\"\"\"", "class tmp._hdutype = self._hdutype return tmp def _strip(self): \"\"\"Strip cards specific to a", "hdu.columns # get the right shape for the data part of the random", "None: hdus = [] # can take one HDU, as well as a", "mode, 'readonly' (default), 'update', or 'append'. memmap: Is memmory mapping to be used?", "'readonly', *ext, **extkeys) hdu = hdulist[_ext] _data = hdu.data if _data is None", "if (self._bzero != 0 or self._bscale != 1): if _bitpix > 0: #", "_keylist.index(_key) if backward: _indx = len(_keylist) - _indx - 1 return _indx except:", "header >>> getdata('in.fits') By extension number: >>> getdata('in.fits', 0) # the primary header", "`self.header['NAXIS']` if dim == '0': dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim) class ImageHDU(_ExtensionHDU,", "# to prepend a default PrimaryHDU to the file before writing the #", "value (to be used for updating) comment: keyword comment (to be used for", "in ('append', 'update'): print \"flush for '%s' mode is not supported.\" % self.__file.mode", "')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the equal sign in the", "a URL cannot be accessed\"\"\" def http_error_default(self, url, fp, errcode, errmsg, headers): raise", "if isinstance(value, num.NumArray) and value.type() == self._dtype: pass elif isinstance(value, chararray.CharArray) and value.itemsize()", "format = dat_format, bscale = _bscale, bzero = _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape", "-32, or -64) pardata: parameter data, as a list of (numeric) arrays. parnames:", "header=header) self.header._hdutype = GroupsHDU self.name = name if self.header['NAXIS'] <= 0: self.header['NAXIS'] =", "option, _err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def __init__(self, data=None,", "list.__init__(self, cards) self._cards = cards # if the key list is not supplied", "# must present, even it has nothing. for item in self: if isinstance(item,", "_err.append(_card._verify(option)) return _err def req_cards(self, keywd, pos, test, fix_value, option, errlist): \"\"\"Check the", "efficient. else: return tmp def _get_scale_factors(self, indx): \"\"\" Get the scaling flags and", "FITS files and manipulating their contents. A module for reading and writing Flexible", "if numr.group('sign') is not None: _valStr = numr.group('sign')+_valStr elif input.group('cplx') != None: real", "written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS", "all docstrings in this module. @group Header-related Classes: Card, CardList, _Card_with_continue, Header, _Hierarch", "%s ' % self.key else: keyStr = '%-8s' % self.key else: keyStr =", "self.__dict__.has_key('_cardimage'): if self.comment in [None, '']: commentStr = '' else: commentStr = '", "the card image is longer than 80, assume it contains CONTINUE card(s). \"\"\"", "keys: header = keys['header'] hdu=_makehdu(data, header) if not isinstance(hdu, PrimaryHDU): hdu = PrimaryHDU(data,", "if n_ext2 == 1 and 'extver' in keys: ext = ext1[0], ext2['extver'] raise", "return dummy return self._convert[indx] def _scale_back(self): \"\"\"Update the parent array, using the (latest)", "ASCII and binary tables if _number or _str: if _number and (_scale or", "object hduList._resize = 0 return hduList fitsopen = open # Convenience functions class", "_len == Card.length: return input elif _len > Card.length: strlen = _len %", "= re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups = re.compile(r'GROUPS =\\s*(T)') simple =", "calling this method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def", "attr == 'section': return Section(self) elif attr == 'data': self.__dict__[attr] = None if", "tbtype) # read the delayed data for i in range(len(tmp)): _arr = tmp._arrays[i]", "tmp._arrays[i].copy() else: _arr = tmp._arrays[i] if _scale: _arr *= bscale if _zero: _arr", "else: self.data = raw_data if self._bscale != 1: num.multiply(self.data, self._bscale, self.data) if self._bzero", "# verify the equal sign position if self.key not in Card._commentaryKeys and self._cardimage.find('=')", "Card object from a (raw) string. It will pad the string if it", "= 1 def __delitem__(self, key): \"\"\"Delete an HDU from the HDUList, indexed by", "an empty HDUList. file: The opened physical file associated with the HDUList. Default", "before=None, after=None): \"\"\"Add a commentary card. If before and after are None, add", "with the data. If the 3rd argument is not a header, it (and", "- self._byteoffset) / self._strides[0] return _Group(self, row) class _Group(rec.Record): \"\"\"One group of the", "if self.writeComplete: raise IOError, \"The stream is closed and can no longer be", "in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype = _ExtensionHDU else: self._hdutype =", "not found where expected\") mo = re_gcount.search(block) if mo is not None: gcount", "= value else: self.header.ascard.append(Card('EXTNAME', value, 'extension name')) self.__dict__[attr] = value def _verify(self, option='warn'):", "= '%' _fmt = ' '*_lead + _pc + _format[1:] + _dict[_format[0]] +", "of input' if option == 'left': tmp = list(self.data) + b else: tmp", "= re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo = re_extname.search(self._raw) if mo: name =", "True', True, option, _err) return _err # --------------------------Table related code---------------------------------- # lists of", "in FITS BITPIX value (8, 16, 32, 64, -32, or -64) pardata: parameter", "= self.__file.memmap _name = _tmpName(oldName) _hduList = open(_name, mode=\"append\") if (verbose): print \"open", "names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns #", "if isinstance(_name, str): _name = _name.strip().upper() if _name == _key: # if only", "%s' % ext2 elif n_ext1 == 0: if n_ext2 == 0: ext =", "_comm.rstrip() + ' ' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long string", "it will be placed before or after the specified location. If no \"before\"", "'exception'. clobber: Overwrite the output file if exists, default = False. \"\"\" if", "and value.type() == self._dtype: pass elif isinstance(value, chararray.CharArray) and value.itemsize() == 1: pass", "i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option, _err) return _err class TableHDU(_TableBaseHDU):", "# fix the value elif option == 'unfixable': _text = \"Unfixable error: %s\"", "'UInt8': # UInt8 case _zero = min _scale = (max - min) /", "cacheing class _File: \"\"\"A file I/O class\"\"\" def __init__(self, name, mode='copyonwrite', memmap=0): if", "i in range(_nfields)] # definition dictionaries for each field for _card in hdr.ascardlist():", "or it is a commentary card. \"\"\" # no equal sign for commentary", "original dummy = self.field(i) if self._convert[i] is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key)", "the second field. If there is no exact name matched, it will try", "HDUList and the extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1 = len(ext1) n_ext2 =", "= 1 elif isinstance(_step, (int, long)): if _step <= 0: raise IndexError, 'Illegal", "\"delayed\" # used for lazy instantiation of data ASCIITNULL = 0 # value", "1 to .ascard to include the END card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard))", "part.\"\"\" blocks = repr(hdu.header.ascard) + _pad('END') blocks = blocks + _padLength(len(blocks))*' ' if", "Card): super(CardList, self).insert(pos, card) self._keylist.insert(pos, card.key) # update the keylist self.count_blanks() if useblanks:", "_bscale, bzero = _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format]", "self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code,", "= True # Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append', 'update'):", "self.__class__ = _Card_with_continue output = self._breakup_strings() else: print 'card is too long, comment", "if repeat != 1: _repeat = `repeat` output_format = _repeat+_fits2rec[dtype] elif dtype ==", "Card by indexing or by the keyword name.\"\"\" _key = self.index_of(key) return super(CardList,", "def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list of Columns, an (table) HDU tbtype:", "header is not None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero", "= %s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err class", "self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr = input.header _nfields = hdr['TFIELDS'] self._width", "att print ' ', getattr(self, att+'s') #def change_format(self, col_name, new_format): #new_format = _convert_format(new_format)", "_key = _key[8:].strip() _keylist = self._keylist if backward: _keylist = self._keylist[:] # make", "'E' _fmt = _fits2rec[fits_fmt] # 'E' -> 'f4' _formats = (_fmt+',') * npars", "if both value and _cardimage attributes are missing, # to avoid infinite loops", "self.__add__(other, 'right') def __sub__(self, other): if not isinstance(other, (list, tuple)): other = [other]", "'HIERARCH': _limit = Card.length else: _limit = 10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except:", "is changed, will reconstructe # the card image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate", "floating point cases if self._ffile.memmap: self.data = raw_data.copy() # if not memmap, use", "if isinstance(array, num.NumArray): # boolean needs to be scaled too if recfmt ==", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;", "while 1: try: hduList.append(ffo._readHDU()) except EOFError: break # check in the case there", "\"\"\"Returns a tuple of image dimensions, reverse the order of NAXIS.\"\"\" naxis =", "is too long, comment is truncated.' output = output[:Card.length] self.__dict__['_cardimage'] = output def", "stream is closed and can no longer be written\" curDataSize = self._ffo.getfile().tell() -", "new_value): \"\"\"Change an attribute (in the commonName list) of a Column.\"\"\" indx =", "name if self.header['NAXIS'] <= 0: self.header['NAXIS'] = 1 self.header.update('NAXIS1', 0, after='NAXIS') def __getattr__(self,", "8, option, _err) self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 0 and val", "def _verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option) # Verify location and value of", "__str__(self): \"\"\"Format a list of cards into a printable string.\"\"\" kard = self._cardimage", "= memmap if memmap and mode not in ['readonly', 'copyonwrite', 'update']: raise \"Memory", "_size, hdu.name = hdu._getsize(hdu._raw) # get extname and extver if hdu.name == '':", "_pcount # update TFORM for variable length columns for i in range(hdu.data._nfields): if", "'naxis' else : _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after) # delete", "cname in _commonNames: value = getattr(self, cname) if value != None: text +=", "list(s). # Use lists, instead of dictionaries so the names can be displayed", "self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8': #", "+= _INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result += _dummy element += 1 return", "string value can fit in one line. # Instead, just truncate the comment", "another kind of header. \"\"\" try: # have both SIMPLE and XTENSION to", "hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU': # string no", "# can take one HDU, as well as a list of HDU's as", "issue and only states that a # string should not end with two", "= ' '*80 def __repr__(self): return self._cardimage def __getattr__(self, name): \"\"\" instanciate specified", "_verify.\"\"\" _option = option.lower() if _option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']:", "self.__dict__[attr] except KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and", "1: self.data /= _scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE'] if self.data._type != _type:", "BSCALE and BZERO values. \"\"\" if self.data is None: return # Determine the", "table HDU's for scaled fields.\"\"\" for hdu in self: if 'data' in dir(hdu):", "\"\"\"Append a Card to the CardList. card: The Card to be appended. useblanks:", "'card is too long, comment is truncated.' output = output[:Card.length] self.__dict__['_cardimage'] = output", "value of mandatory keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and", "# conversion for both ASCII and binary tables if _number or _str: if", "output.nelements() * output._itemsize # write out the heap of variable length array columns", "'number of parameters')) _list.append(Card('GCOUNT', 1, 'number of groups')) if header is not None:", "in _commonNames: value = getattr(self, cname) if value != None: text += cname", "examples of usage, see the I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup", "CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch", "\"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes: header: image header data: image data _file:", "# appears vague on this issue and only states that a # string", "in self.header. This method should only be used right before writing to the", "members are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file", "files is not supported\" zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name", "and _comm != '': longstring = longstring + _comm.rstrip() + ' ' self.__dict__[name]", "# reopen the renamed new file with \"update\" mode os.rename(_name, oldName) ffo =", "before END, it will use this space first, instead of appending after these", "str(val) self.__dict__['value'] = val def _setcomment(self, val): \"\"\"Set the comment attribute.\"\"\" if isinstance(val,str):", "of cards of minimal header if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image", "string and non-string types # Boolean is also OK in this constructor _card", "dummy return self._convert[indx] def _scale_back(self): \"\"\"Update the parent array, using the (latest) scaled", "data=None, header=None): self._file, self._offset, self._datLoc = None, None, None self.header = header self.data", "'conforms to FITS standard') _list = CardList([ c0, Card('BITPIX', 8, 'array data type'),", "spec. Do the opposite if reverse = 1. \"\"\" fmt = input_format (repeat,", "num.array(array) except: try: # then try to conver it to a strings array", "self.__file = file if hdus is None: hdus = [] # can take", "ValueError, \"Illegal format `%s`.\" % format self.format = format # does not include", "write it to a tmp file, # delete the original file, and rename", "illustrated by examples: No extra arguments implies the primary header >>> getdata('in.fits') By", "long, str, tuple)): raise KeyError, 'Input argument has wrong data type.' if 'header'", "in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1] for att in list: if", "= open(filename) f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C) 2004 Association of", "1: if n_ext2 == 0: ext = ext1[0] else: if isinstance(ext1[0], (int, tuple)):", "array except: if isinstance(recfmt, _FormatP): try: _func = lambda x: num.array(x, type=recfmt._dtype) array", "axes.reverse() elif self.data is None: axes = [] else: raise ValueError, \"incorrect array", "and not groups: name = 'PRIMARY' else: name = '' return size, name", "_len = len(input) if _len == Card.length: return input elif _len > Card.length:", "output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct the P format column array, both the", "in self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver = '' #", "Card('NAXIS', 0, 'number of array dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has", "regular expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular expression _tdef_re =", "data are copied.\"\"\" # touch the data, so it's defined (in the case", "priority if (bscale != 1 or bzero !=0): _scale = bscale _zero =", "Default is False. \"\"\" if header is None: if 'header' in keys: header", "_nrows = 0 else: _nrows = len(self.data) _ncols = len(self.columns.formats) _format = self.columns.formats", "the index (an integer). backward: search the index from the END, i.e. backward?", "long (> 8), use HIERARCH.' % val else: raise ValueError, 'keyword name %s", "% insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos)", "name: column name, corresponding to TTYPE keyword format: column format, corresponding to TFORM", "fits_fmt, bscale = bscale, bzero = bzero)) self._coldefs = ColDefs(_cols) self.parnames = [i.lower()", "data has been written to the stream to satisfy the amount specified in", "option='silentfix'): \"\"\"Generate a (new) card image from the attributes: key, value, and comment,", "zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist) != 1: raise \"Zip files with multiple", "by any user specified bscale/bzero values. bscale/bzero: user specified BSCALE and BZERO values.", "map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] +=", "FITS file. @type filename: string @param filename: input FITS file name @type key:", "IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "== '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr)", "all HDU (header data unit) classes.\"\"\" pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\"", "\"\"\"Open the input file, return the HDUList and the extension.\"\"\" hdulist = open(filename,", "return # Determine the destination (numarray) data type if type is None: type", "= _bscale, bzero = _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format =", "CardList.\"\"\" return self.ascard def items(self): \"\"\"Return a list of all keyword-value pairs from", "= Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is not None: _valStr", "if name == 'value': _val = re.sub(\"''\", \"'\", _card.value).rstrip() # drop the ending", "keyword start: column starting position (ASCII table only), corresponding to TBCOL keyword dim:", "['', None, 0] # ensure bscale/bzero are numbers if not _scale: bscale =", "_ver = key[1] else: _key = key _ver = None if not isinstance(_key,", "= \"Fixed by inserting a new '%s' card.\" % keywd if fixable: #", "It returns None if equal sign is not present, or it is a", "position (ASCII table only), corresponding to TBCOL keyword dim: column dimension corresponding to", "= 1 for j in range(1, naxis): size = size * self.header['NAXIS'+`j+1`] bitpix", "info(self, attrib='all'): \"\"\"Get attribute(s) information of the column definition.\"\"\" \"\"\"The attrib can be", "Primary Header. If not we will need # to prepend a default PrimaryHDU", "hdu.name, _extver hdu._new = 0 elif self.__file.mode == 'update': if not self._resize: #", "if len(namelist) != 1: raise \"Zip files with multiple members are not supported.\"", "hdulist[_ext] _data = hdu.data if _data is None and isinstance(_ext, _Zero): try: hdu", "class _FormatP(str): \"\"\"For P format in variable length table.\"\"\" pass # TFORM regular", "sliced FITS_rec and its ._parent def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key) if", "of %s\" % (x, _width[indx]) else: self._parent.field(indx)[i] = x if 'D' in _format:", "capability to stream data to a FITS file instead of requiring data to", "ValueError, \"Illegal format %s\" % fmt else: if dtype == 'a': output_format =", "be used to endorse or promote products derived from this software without specific", "comment from the card image.\"\"\" # for commentary cards, no need to parse", "indx = self._unique[parName] if len(indx) == 1: self.field(indx[0])[:] = value # if more", "not be the first extension in the file so we # must change", "else: self._keylist = keylist # find out how many blank cards are *directly*", "(numarray) data type if type is None: type = self.NumCode[self._bitpix] _type = getattr(num,", "_padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0: raise IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks)", "if n_ext2 == 0: ext = _Zero() elif 'ext' in keys: if n_ext2", "new_name in self.names: raise ValueError, 'New name %s already exists.' % new_name else:", "found, the value to be returned. \"\"\" try: return self[key] except: return default", "of 80.\"\"\" _len = len(input) if _len == Card.length: return input elif _len", "Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) # pass these attributes hdu._file = self._file", "length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:] = 0 # reset", "parbzeros is None: parbzeros = [None]*npars if bitpix is None: bitpix = _ImageBaseHDU.ImgCode[input.type()]", "= num.maximum.reduce(self.data) self.data.setshape(dims) if `_type` == 'UInt8': # UInt8 case _zero = min", "0) try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) def _summary(self): \"\"\"Summarize the HDU:", "_ext = _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] _data = hdu.data if", "== '.zip': # Handle zip files if mode in ['update', 'append']: raise \"Writing", "and writing Flexible Image Transport System (FITS) files. This file format was endorsed", "None, 0] or bscale not in ['', None, 1]: array = array.copy() if", "= self.size() if self._size != 0: self.writeComplete = 0 else: self.writeComplete = 1", "continue # ASCII table, convert numbers to strings if self._coldefs._tbtype == 'TableHDU': _format", "not supported\" zfile = gzip.GzipFile(self.name) self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file =", "hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None): \"\"\"Append the header/data to FITS file if", "not memmap, use the space already in memory else: self.data = raw_data if", "1 def change_attrib(self, col_name, attrib, new_value): \"\"\"Change an attribute (in the commonName list)", "parbzeros: list of bzeros for the parameters \"\"\" if isinstance(input, num.NumArray): _formats =", "the destination (numarray) data type if type is None: type = self.NumCode[self._bitpix] _type", "= TableHDU elif xtension == 'IMAGE': self._hdutype = ImageHDU elif xtension in ('BINTABLE',", "or as a tuple: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>>", "0th element is not a primary HDU.\" fix_text = 'Fixed by inserting one", "col) # now build the columns tmp = [Column(**attrs) for attrs in dict]", "'BinTableHDU': if isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled data = 0, not the", "dtype): \"\"\"Construct the P format column array, both the data descriptors and the", "(for now).\"\"\" return _ErrList([]) class _Card_with_continue(Card): \"\"\"Cards having more than one 80-char \"physical\"", "to match the exact name first, so in the example in (a), field('abc')", "= input else: raise ValueError, 'column definitions have a different table type' elif", "= 1 else: groups = 0 mo = re_naxis.search(block) if mo is not", "== 0: hdr.update('extend', True, after='naxis') else: n = hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def", "self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal sign in the card image and return", "self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode, the pointer", "1000: for j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>= 0\",", "as in update()] \"\"\" self._add_commentary(' ', value, before=before, after=after) def get_history(self): \"\"\"Get all", "OK in this constructor _card = \"Card('%s', %s)\" % (keywd, `fix_value`) fix =", "self.starts[i] = last_end + 1 _end = self.starts[i] + _width - 1 self.spans[i]", "self.header.get('GROUPS','F') if simple == 'T' and randomGroups == 'T': groups = 1 else:", "keyword, or index of the Card before which the new card will be", "range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1] for att in list: if att", "+ '\\n' return output[:-1] def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment", "\"\"\" self._add_commentary(' ', value, before=before, after=after) def get_history(self): \"\"\"Get all histories as a", "output FITS file name to be written to. output_verify: output verification option, default='exception'.", "mo = re_bitpix.search(block) if mo is not None: bitpix = int(mo.group(1)) else: raise", "self.name hdu._extver = self._extver hdu._new = 0 hdu.header._mod = 0 hdu.header.ascard._mod = 0", "= 1 def add_history(self, value, before=None, after=None): \"\"\"Add a HISTORY card. value: History", "= _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes = [0] + axes elif isinstance(self.data,", "even if there are blank cards in front of END. bottom: If =0", "_re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width == '': width = None else: width", "in self.ascard._keylist): raise ValueError, 'Intended keyword %s already exists in header.' % newkey", "== TableHDU): for i in range(_tfields): del self['TBCOL'+`i+1`] except: pass class CardList(list): \"\"\"FITS", "part. This is a layer over the RecArray, so we can deal with", "for i in indx[1:]: result += self.field(i) return result def setpar(self, parName, value):", "pos = mo.end(0) else: raise ValueError(\"NAXIS not found where expected\") if naxis ==", "value: History text to be added. before: [same as in update()] after: [same", "= (), '' else: # the shape will be in the order of", "self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1) else: dummy[i] =", "len(self)): if str(self[-i]) != ' '*Card.length: self._blanks = i - 1 break def", "< start.' % input _step = input.step if _step is None: _step =", "return loc, _size+_padLength(_size) def close(self): \"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close() class HDUList(list,", "can also close the mm object. try: self.mmobject.close() except: pass def info(self): \"\"\"Summarize", "(*) In future it may be possible to decipher where the last block", "try: _extver = `hdu.header['extver']` except: _extver = '' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc)", "= __builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode, the pointer is at the end", "@rtype: string, integer, or float \"\"\" _hdr = getheader(filename, *ext, **extkeys) return _hdr[key]", "image, NAXIS1 should be 0, so we skip NAXIS1. if naxis > 1:", "map(lambda x: x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key) # occurrence of _key in", "range(self._nfields): if (self._convert[indx] is not None): if isinstance(self._coldefs._recformats[indx], _FormatX): _wrapx(self._convert[indx], self._parent.field(indx), self._coldefs._recformats[indx]._nx) continue", "match is done for a string, # since a greedy match will find", "the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright", "%s' % (nfound, `key`) else: return found def readall(self): \"\"\"Read data of all", "data_fmt = '%s%s' % (`input.shape[1:]`, _fmt) _formats += data_fmt gcount = input.shape[0] for", "return self._convert[indx] (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # for", "stream to satisfy the amount specified in the header, the stream is padded", "not in _commonNames: print \"'%s' is not an attribute of the column definitions.\"%att", "result def __setitem__(self, key, hdu): \"\"\"Set an HDU to the HDUList, indexed by", "def get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\" return self.columns def update(self): \"\"\" Update", "string eqStr = '= ' if keyStr.strip() in Card._commentaryKeys: # not using self.key", "if (verbose): print \"open a temp file\", _name for hdu in self: (hdu._hdrLoc,", "not isinstance(self, _Hierarch): self.__class__ = Card else: # does not support CONTINUE for", "to a certain kind of header. Strip cards like SIMPLE, BITPIX, etc. so", "an attribute (value or comment) is changed, will reconstructe # the card image.", "'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT', 1,", "_data.field(i) return FITS_rec(_data) def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new", "found, though the # comment maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field>", "if self.__dict__.has_key(name): delattr(self, name) return self def _ncards(self): return len(self._cardimage) / Card.length def", "isinstance(array, num.NumArray): # boolean needs to be scaled too if recfmt == _booltype:", "eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') == None:", "bscale value, corresponding to TSCAL keyword bzero: bzero value, corresponding to TZERO keyword", "the random group, # since binary table does not support ND yet if", "# scale by TSCAL and TZERO if _scale or _zero: for i in", "option in ['warn', 'exception']: #raise VerifyError, _text #elif option == 'warn': pass #", "HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None, name=None): \"\"\"data: data", "self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc =", "Columns. \"\"\" def __init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list of Columns, an (table)", "ValueError, \"%s is not an HDU.\" % item else: if not isinstance(hdu, _AllHDU):", "output = output + '%-80s' % (headstr + valstr) # do the comment", "% (keywd, _index) fix_text = \"Fixed by moving it to the right place", "make sure the content is written self.__file.flush() # return both the location and", "_ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name in", "method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header() return (self.writeHDUheader(hdu),) + self.writeHDUdata(hdu) def writeHDUheader(self, hdu):", "= 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after) # delete extra NAXISi's for j", "a string.\"\"\" block = '' for card in self: block = block +", "%s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify", "self._max = 0 def __setitem__(self, key, value): \"\"\"To make sure the new item", "from the input column definitions.\"\"\" \"\"\" input: a list of Columns or a", "since it has methods to change # the content of header without being", "of the column definition.\"\"\" \"\"\"The attrib can be one or more of the", "not None: # if image, need to deal with byte order if isinstance(hdu,", "return CardList(cards) def __repr__(self): \"\"\"Format a list of cards into a string.\"\"\" block", "a tuple of (string, integer). \"\"\" if isinstance(key, (int, slice)): return key elif", "no boolean in ASCII table _number = not(_bool or _str) bscale = self._coldefs.bscales[indx]", "parnames=[], bscale=None, bzero=None, parbscales=None, parbzeros=None): \"\"\"input: input data, either the group data itself", "filename: input FITS file name @type key: string @param key: keyword name @param", "= input.header _nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2'] # go", "format = fits_fmt, bscale = bscale, bzero = bzero)) self._coldefs = ColDefs(_cols) self.parnames", "update(self, key, value, comment=None, before=None, after=None): \"\"\"Update one header card.\"\"\" \"\"\" If the", "output Uint8 array of shape (s, nbytes) nx: number of bits \"\"\" output[...]", "bzero)) self._coldefs = ColDefs(_cols) self.parnames = [i.lower() for i in parnames] tmp =", "not in ['', None, 0] # ensure bscale/bzero are numbers if not _scale:", "extver def _getsize(self, block): \"\"\"Get the size from the first block of the", "*)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/|", "There may be extra bytes after the last HDU or the file is", "of # quotes to be precise. # # Note that a non-greedy match", "default: if no keyword is found, the value to be returned. \"\"\" try:", "next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete def size(self):", "% `key` elif (nfound > 1): raise KeyError, 'there are %d extensions of", "new file. This is a convenience method to provide a user easier output", "_val if '_valuestring' not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not in", "str(card) def _use_blanks(self, how_many): if self._blanks > 0: for i in range(min(self._blanks, how_many)):", "is a layer over the RecArray, so we can deal with scaled columns.", "Open mode, 'readonly' (default), 'update', or 'append'. memmap: Is memmory mapping to be", "'HISTORY': output.append(_card.value) return output def get_comment(self): \"\"\"Get all comments as a list of", "self.ascard._pos_insert(new_card, before=before, after=after) else: if key[0] == ' ': useblanks = new_card._cardimage !=", "if not isinstance(_card.value, str): raise ValueError, 'Cards with CONTINUE must have string value.'", "range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero:", "in the last example, field('Abc') will cause an exception since there is no", "_loc[indx] if _lead < 0: raise ValueError, \"column `%s` starting point overlaps to", "return ColDefs(tmp) def _setup(self): \"\"\" Initialize all attributes to be a list of", "_ErrList([], unit='HDU') # the first (0th) element must be a primary HDU if", "value or comment from the card image.\"\"\" # for commentary cards, no need", "= self.field(parName) else: indx = self._unique[parName.lower()] if len(indx) == 1: result = self.field(indx[0])", "= self.data.field(i) VLdata._max = max(map(len, VLdata)) val = 'P' + _convert_format(val._dtype, reverse=1) +", "'(No file associated with this HDUList)' else: _name = self.__file.name results = \"Filename:", "= data output.tofile(self._ffo.getfile()) if self._ffo.getfile().tell() - self._datLoc == self._size: # # the stream", "'END': break else: _cardList.append(_card) _keyList.append(_key) # Deal with CONTINUE cards # if a", "the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new = 1 self._resize = 1", "else: if not isinstance(hdu, _AllHDU): raise ValueError, \"%s is not an HDU.\" %", "def __str__(self): \"\"\"Format a list of cards into a printable string.\"\"\" kard =", "range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt: code,", "to avoid out of range error for BZERO = +32768 self.header.update('BZERO', _zero) else:", "block = '' for card in self: block = block + repr(card) return", "' def __setattr__(self, attr, value): \"\"\"Set an HDU attribute.\"\"\" if attr == 'name'", "def __getitem__ (self, key): \"\"\"Get a header keyword value.\"\"\" return self.ascard[key].value def __setitem__", "_ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims) else: raw_data", "'data' in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None: hdu.data._scale_back()", "if len(output) <= Card.length: output = \"%-80s\" % output # longstring case (CONTINUE", "\"Filename: %s\\nNo. Name Type\"\\ \" Cards Dimensions Format\\n\" % _name for j in", "if dims == []: dims = [1] npt = 1 for n in", "new card will be placed. default=None. \"\"\" if self.has_key(key): j = self.ascard.index_of(key) if", "useblanks=1): \"\"\"Insert a Card to the CardList. pos: The position (index, keyword name", "= ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype =", "X format column into a Boolean array. input: input Uint8 array of shape", "the original dummy = self.field(i) if self._convert[i] is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i],", "os.path.getsize(name) > 0: # # This will not be the first extension in", "random group FITS file will be like a binary table's data. \"\"\" if", "binary table, so both will produce 'a7'. if fmt.lstrip()[0] == 'A' and option", "in the card image and return the string after the equal sign. If", "element must be a primary HDU if len(self) > 0 and (not isinstance(self[0],", "if the output file already exists if os.path.exists(name): if clobber: print \"Overwrite existing", "name matched, it will try to match the name with case insensitivity. So,", "GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\" _dict = {8:'B', 16:'I', 32:'J', 64:'K', -32:'E',", "else: name = '' return size, name def setupHDU(self): \"\"\"Read one FITS HDU,", "be inserted before it. card: The Card to be inserted. useblanks: Use any", "keyword-value pairs from the CardList.\"\"\" pairs = [] for card in self.ascard: pairs.append((card.key,", "= option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format' elif dtype+option in _rec2fits.keys(): #", "self._getValueCommentString() try: slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value']", "self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is not a Card\" %", "def __getitem__(self, key): \"\"\"Get a Card by indexing or by the keyword name.\"\"\"", "the arguments are used only for the first case. bitpix: data type as", "return _data def getval(filename, key, *ext, **extkeys): \"\"\"Get a keyword's value from a", "= _bzero)) _coldefs = ColDefs(_cols) _coldefs._shape = self.header['GCOUNT'] _coldefs._dat_format = _fits2rec[_format] _coldefs._pnames =", "number or name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1 def __delslice__(self,", "dim = `self.header['NAXIS']` if dim == '0': dim = '' # set extension", "self.header._hdutype = ImageHDU # insert the require keywords PCOUNT and GCOUNT dim =", "_start: raise IndexError, 'Illegal slice %s, stop < start.' % input _step =", "the input stringLen to the next FITS block.\"\"\" return (_blockLen - stringLen%_blockLen) %", "# any of the input argument (except array) can be a Card or", "the HDU.\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis > 0:", "strings.\"\"\" for cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self,", "= fmt.group('code', 'width', 'prec') else: raise ValueError, valu size = eval(width)+1 strfmt =", "ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with urlretrieve to allow IOError exceptions to be", "!= None: _val = re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr') != None: # Check", "= `self.header['NAXIS']` if dim == '0': dim = '' self.header.update('EXTEND', True, after='NAXIS'+dim) class", "= attr return self.__dict__[name] \"\"\" # make sure to consider the case that", "Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def get(self,", "isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the largest column shape as the", "do this because Record has no __getstate__. # also more efficient. else: return", "HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self,", "will raise an IOError exception. If the dtype of the input data does", "oldName) ffo = _File(oldName, mode=\"update\", memmap=oldMemmap) self.__file = ffo if (verbose): print \"reopen", "table does not support ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` +", "_err) self.req_cards('GCOUNT', '== '+`naxis+4`, _isInt+\" and val == 1\", 1, option, _err) return", "return hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7]", "naxis < len(key): raise IndexError, 'too many indices.' elif naxis > len(key): key", "if cname in _commonNames: attr = [''] * len(self) for i in range(len(self)):", "equal sign, return the string after column 8. \"\"\" eqLoc = self._locateEq() if", "input): \"\"\" input: a sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max =", "self._size: # # the stream is full so pad the data to the", "slice %s, step must be positive.' % input else: raise IndexError, 'Illegal slice", "None: continue def update_tbhdu(self): \"\"\"Update all table HDU's for scaled fields.\"\"\" for hdu", "in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None: hdu.data._scale_back() if", "xtension == 'IMAGE': self._hdutype = ImageHDU elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype =", "the end so as not to confuse the indexing. _list = [] for", "columns called 'abc' and 'ABC' respectively. (b) When you *refer* to a field", "_start is None: _start = 0 elif isinstance(_start, (int, long)): _start = _normalize(_start,", "option if (_option == \"ignore\"): return x = str(self._verify(_option)).rstrip() if _option in ['fix',", "= self.columns data.parnames = self.columns._pnames else: data = None self.__dict__[attr] = data elif", "columns tmp = [Column(**attrs) for attrs in dict] self.data = tmp else: raise", "width, add one if tbtype == 'TableHDU': for i in range(len(self)): (type, width)", "to TNULL keyword bscale: bscale value, corresponding to TSCAL keyword bzero: bzero value,", "created and if the header represents a Primary header, it will be written", "self def _ncards(self): return len(self._cardimage) / Card.length def _verify(self, option='warn'): \"\"\"Card class verification", "name, header): \"\"\" Construct a StreamingHDU object given a file name and a", "else: raise ValueError, \"incorrect array type\" self.header['NAXIS'] = len(axes) # add NAXISi if", "elif attr == 'columns': class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self,", "not include Object array because there is no guarantee # the elements in", "raise \"Mode '%s' not recognized\" % mode if mode != 'append' and not", "eval(test): err_text = \"'%s' card has invalid value '%s'.\" % (keywd, val) fix_text", "# update the keylist self.count_blanks() if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else:", "= bscale, bzero = bzero)) self._coldefs = ColDefs(_cols) self.parnames = [i.lower() for i", "': useblanks = new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last", "old keyword, can be a name or index. newkey: new keyword, must be", "key): \"\"\"Get a header keyword value.\"\"\" return self.ascard[key].value def __setitem__ (self, key, value):", "= _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif", "for j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu)", "if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j", "file=None): \"\"\"Construct a HDUList object. hdus: Input, can be a list of HDU's", "variable array \"\"\" _offset = 0 data_output = _VLF([None]*len(input)) data_output._dtype = dtype if", "= _getext(filename, 'update', *ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close() def info(filename): \"\"\"Print the", "\"\"\" _offset = 0 data_output = _VLF([None]*len(input)) data_output._dtype = dtype if dtype ==", "\"Ambiguous key name '%s'.\" % key else: raise NameError, \"Illegal key '%s'.\" %", "for numbers with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt = numr.group('digt').translate(_fix_table2, ' ')", "the 'sci' extension >>> update(file, dat, 3) # update the 3rd extension >>>", "not in ['', None, 1] _zero = bzero not in ['', None, 0]", "card, useblanks=useblanks) elif after != None: loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def", "NASA as the standard format for storing high energy astrophysics data. For details", "self._bzero elif option == 'minmax': if isinstance(_type, num.FloatingType): _scale = 1 _zero =", "0\", 1, option, _err) # verify each card for _card in self.header.ascard: _err.append(_card._verify(option))", "clobber: Overwrite the output file if exists, default = False. \"\"\" if (len(self)", "hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 if singleThread: if keyboardInterruptSent:", "key.strip().upper() if key[:8] == 'HIERARCH': key = key[8:].strip() _index = self.ascard._keylist.index(key) return 1", "block if _size > 0: self.__file.write(_padLength(_size)*'\\0') # flush, to make sure the content", "_size = 0 if hdu.data is not None: # if image, need to", "del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name in ['TFORM',", "as in update()] \"\"\" self._add_commentary('history', value, before=before, after=after) def add_comment(self, value, before=None, after=None):", "size = self.size() if size: self._file.seek(self._datLoc) data = GroupData(_get_tbdata(self)) data._coldefs = self.columns data.parnames", "Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return valu = self._check(option='parse') if name", "_number or _str: if _number and (_scale or _zero): dummy = self._convert[indx].copy() if", "match raise NameError, \"Ambiguous key name '%s'.\" % key else: raise NameError, \"Illegal", "keywords of BSCALE and BZERO in self.header. This method should only be used", "DELAYED = \"delayed\" # used for lazy instantiation of data ASCIITNULL = 0", "else: _zero = (max + min) / 2. # throw away -2^N _scale", "the cards after the first one must start with CONTINUE and the whole", "The argument `before' takes precedence over `after' if both specified. default=None. after: name", "not isinstance(header, Header): raise ValueError, \"header must be a Header object\" if data", "only ONE u1 (i.e. use tuple always) output_format = _FormatX(`(nbytes,)`+'u1') output_format._nx = repeat", "if repeat != 1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError,", "file name to be written to. output_verify: output verification option, default='exception'. clobber: Overwrite", "reduce(operator.mul, dims[groups:]) size = abs(bitpix) * gcount * (pcount + datasize) / 8", "License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples of usage, see the I{PyFITS User's Manual} available", "if not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append')", "if isinstance(_item, _TempHDU): super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self, start, end):", "val): if name == 'key': raise SyntaxError, 'keyword name cannot be reset.' elif", "for '%s' mode is not supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode", "messages generated by verifications at different class levels. \"\"\" def __init__(self, val, unit=\"Element\"):", "return self._convert[indx] if _str: return self._parent.field(indx) # ASCII table, convert strings to numbers", "this two-tier calls because _File has ts own private attribute __file. \"\"\" if", "field name is a case variant of \"XYZ\", then field('xyz'), field('Xyz'), etc. will", "if (name is None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def", "return result def __setitem__(self, key, hdu): \"\"\"Set an HDU to the HDUList, indexed", "this list of conditions and the following disclaimer in the documentation and/or other", "_isInt, 0, option, _err) self.req_cards('GROUPS', _pos, 'val == True', True, option, _err) return", "% (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary", "before or after the specified location. If no \"before\" or \"after\" is specified,", "not found where expected\") if naxis == 0: datasize = 0 else: dims", "field(self, key): \"\"\"A view of a Column's data as an array.\"\"\" indx =", "hdu.header['NAXIS1']*hdu.header['NAXIS2'] _heapstart = hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart - _tbsize _pcount = hdu.data._heapsize", "x = str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix'] and x.find('Unfixable') != -1: raise", "the reverse dictionary of the above _rec2fits = {} for key in _fits2rec.keys():", "if the HDUList object is created from files # other than FITS, the", "= hdulist[_ext] hdr = hdu.header hdulist.close() return hdr def getdata(filename, *ext, **extkeys): \"\"\"Get", "= longstring + _val elif name == 'comment': _comm = _card.comment if isinstance(_comm,", "isinstance(self.value, str) and len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue output = self._breakup_strings() else:", "min((i+1)*8, nx) for j in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output,", "output += str(card) + '\\n' return output[:-1] # ----------------------------- HDU classes ------------------------------------ class", "% self.key else: headstr = \"CONTINUE \" valstr = valfmt % val_list[i] output", "--------------------------Table related code---------------------------------- # lists of column/field definition common names and keyword names,", "urlretrieve to allow IOError exceptions to be raised when a file specified by", "for i in range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card)", "'' self.__dict__['_fixable'] = 1 if option == 'ignore': return elif option == 'parse':", "not multiple of %d: %d' % (_blockLen, len(blocks)) elif (blocks[:8] not in ['SIMPLE", "otherwise the _parent # of a scaled column may have wrong byteorder if", "for commentary cards, value can only be strings and there # is no", "1, option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err) self.req_cards('GROUPS', _pos, 'val ==", "_format) def get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\" return self.columns def update(self): \"\"\"", "(None) _datLoc: starting byte location of data block in file (None) \"\"\" #", "64, -32, -64]\" # Verify location and value of mandatory keywords. # Do", "not os.path.exists(filename): writeto(filename, data, header) else: hdu=_makehdu(data, header) if isinstance(hdu, PrimaryHDU): hdu =", "# Checks for a valid value/comment string. It returns a match object #", "range(len(dummy)): x = _fmt % dummy[i] if len(x) > (_loc[indx+1]-_loc[indx]): raise ValueError, \"number", "(blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does not begin with", "2Int32 dtype: data type of the variable array \"\"\" _offset = 0 data_output", "the size (in bytes) of the HDU's data part.\"\"\" size = 0 naxis", "= _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt = _fits2rec[fits_fmt] #", "hdu._theap + hdu._datLoc _data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap -", "# A number sub-string, either an integer or a float in fixed or", "_comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage() # self.ascard._keylist[_index] = newkey def get(self, key, default=None):", "n_ext2 == 0: ext = ext1 else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s'", "!= _min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift the unused bits", "GroupData(FITS_rec): \"\"\"Random groups data object. Allows structured access to FITS Group data in", "except KeyError: raise AttributeError(attr) def par(self, parName): \"\"\"Get the group parameter values.\"\"\" if", "card image. self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card image from the", "be more than one 80-char \"physical\" cards. _max = _keyList.count('CONTINUE') _start = 0", "needed to pad the input stringLen to the next FITS block.\"\"\" return (_blockLen", "= -bzero/bscale else: hdu.data._parent.field(i)[n:] = '' hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record", "+ ')') # FSC commentary card string which must contain printable ASCII characters.", "'F':'f', 'E':'E', 'D':'E'} # calculate the starting point and width of each field", "data bzero: BZERO of the data parbscales: list of bscales for the parameters", "already exists.' % new_name else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): \"\"\"Change", "self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode]) # For 'ab+' mode, the", "fits files is not supported\" zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist)", "be streamed. header : Header The header object associated with the data to", "col_name): \"\"\"Delete (the definition of) one Column.\"\"\" indx = _get_index(self.names, col_name) for cname", "str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr == '_theap': self.__dict__[attr]", "extension >>> getdata('in.fits', ext=2) # the second extension By name, i.e., EXTNAME value", "Now, get the data (does not include bscale/bzero for now XXX) _bitpix =", "self._hdutype = GroupsHDU elif cards[0].value == True: self._hdutype = PrimaryHDU else: self._hdutype =", "option) = _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is not recognized.' % tform if", "if isinstance(self, _Hierarch): keyStr = 'HIERARCH %s ' % self.key else: keyStr =", "# so the sliced FITS_rec will view the same scaled columns as #", "it is never fixable if result is not None: _str = result.group('comm') if", "'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype == 'F': output_format =", "return super(HDUList, self).__getitem__(key) def __getslice__(self, start, end): _hdus = super(HDUList, self).__getslice__(start,end) result =", "r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|'", "exist, the new card will have the fix_value as its value when created.", "scaled, so as not to corrupt the original array if bzero not in", "value): \"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension", "['', None, 0] or bscale not in ['', None, 1]: array = array.copy()", "be written to the file. :Returns: None Notes ----- The file will be", "= '' _err = _ErrList([], unit='HDU') # the first (0th) element must be", "strlen # collect the pieces in a list tmp = input[xoffset:offset] list.append(tmp) if", "if not isinstance(indx, _WholeLine): raise IndexError, 'Subsection data is not contiguous.' # the", "9. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc = 8 _start", "keyword name if isinstance(key, str): while 1: try: del self.ascard[key] self._mod = 1", "bscale and bzero takes priority if (bscale != 1 or bzero !=0): _scale", "not self.header.has_key('GCOUNT'): self.header.update('GCOUNT', 1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2)", "keyword name.\"\"\" _key = self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self, start, end): _cards", "Card._commentaryKeys and self._cardimage.find('=') != 8: if option in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card", "raw_data if self._bscale != 1: num.multiply(self.data, self._bscale, self.data) if self._bzero != 0: self.data", "= int(mo.group(1)) else: raise ValueError(\"BITPIX not found where expected\") mo = re_gcount.search(block) if", "= '[' for j in range(_ncols): _format += self.header['TFORM'+`j+1`] + ', ' _format", "have Boolean type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups", "in range(len(tmp)): if tmp._arrays[i] is None: size = 0 else: size = len(tmp._arrays[i])", "= self._parent.field(indx) # further conversion for both ASCII and binary tables if _number", "_keylist = self._keylist if backward: _keylist = self._keylist[:] # make a copy _keylist.reverse()", "arrays. parnames: list of parameter names. bscale: BSCALE of the data bzero: BZERO", "_gethdr: return _data, _hdr else: return _data def getval(filename, key, *ext, **extkeys): \"\"\"Get", "self.ascard._keylist[_index] = newkey def get(self, key, default=None): \"\"\"Get a keyword value from the", "self.comment in [None, '']: commentStr = '' else: commentStr = ' / '", "# put all parts together output = keyStr + eqStr + valStr +", "pass class _FormatP(str): \"\"\"For P format in variable length table.\"\"\" pass # TFORM", "% `key` else: raise KeyError, 'Illegal key data type %s' % type(key) def", "\"\"\"Set the group parameter values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:] = value else:", "tmp else: raise TypeError, \"input to ColDefs must be a table HDU or", "per the header provided in the constructor. \"\"\" size = 0 naxis =", "del self.header.ascard['NAXIS'+`j`] except KeyError: pass if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames),", "= ext1[0] else: if isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s): %s'", "AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "self.req_cards('NAXIS', None, 'val == 2', 2, option, _err) self.req_cards('BITPIX', None, 'val == 8',", "of Columns\" def __getattr__(self, name): \"\"\"Populate the attributes.\"\"\" cname = name[:-1] if cname", "mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close the", "_err = _ErrList([], unit='HDU') # the first (0th) element must be a primary", "(_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def", "int(mo.group(1)) else: gcount = 1 mo = re_pcount.search(block) if mo is not None:", "texts.\"\"\" output = [] for _card in self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value)", ":Returns: writeComplete : integer Flag that when true indicates that all of the", "= min(size, nrows) if fill: n = 0 (_scale, _zero, bscale, bzero) =", "expression _tformat_re = re.compile(r'(?P<repeat>^[0-9]*)(?P<dtype>[A-Za-z])(?P<option>[!-~]*)') # table definition keyword regular expression _tdef_re = re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9", "list or tuple, not required to be NDArray if format is not None:", "for the TableHDU, ImageHDU, and BinTableHDU classes. \"\"\" def __init__(self, data=None, header=None): self._file,", "_get_index(nameList, key): \"\"\" Get the index of the key in the name list.", "== 'SIMPLE': if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True: self._hdutype = GroupsHDU", "i in coldata: if not isinstance(i, chararray.CharArray): if i._type.bytes > 1: if i._byteorder", "i == 0: headstr = \"%-8s= \" % self.key else: headstr = \"CONTINUE", "block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes _memmap_mode =", "the _parent too, otherwise the _parent # of a scaled column may have", "# pad the FITS data block if _size > 0: self.__file.write(_padLength(_size)*'\\0') # flush,", "\" % self.key else: headstr = \"CONTINUE \" valstr = valfmt % val_list[i]", "ext2 if isinstance(ext1[0], str): if n_ext2 == 1 and 'extver' in keys: ext", "# value must be a list (or tuple) containing arrays else: if isinstance(value,", "%s out of range.' % indx elif isinstance(indx, slice): indx = _normalize_slice(indx, naxis)", "= hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file) _shift = self.__file.tell() - _where hdu.data._heapsize", "resized for hdu in self: # Header: # Add 1 to .ascard to", "#raise VerifyError, _text #elif option == 'warn': pass # fix the value elif", "it is a commentary card. \"\"\" # no equal sign for commentary cards", "size def close(self): \"\"\" Close the 'physical' FITS file. :Parameters: None :Returns: None", "'update': if not self._resize: # determine if any of the HDU is resized", "type, use numarray attribute format, (e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None,", "to be populated in EXTNAME keyword \"\"\" if header is not None: if", "isinstance(header, Header): raise ValueError, \"header must be a Header object\" if data is", "= _CorruptedHDU # populate the cardlist self.ascard = CardList(cards) def __getitem__ (self, key):", "Primary header, it will be written to the beginning of the file. If", "not isinstance(coldata, chararray.CharArray): # only swap unswapped # deal with var length table", "self.ascard._keylist.index(key) return 1 except: return 0 def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a", "'HIERARCH %s ' % self.key else: keyStr = '%-8s' % self.key else: keyStr", "self.__file.close() os.remove(self.__file.name) if (verbose): print \"delete the original file\", oldName # reopen the", "\"Fixed by moving it to the right place (card %d).\" % insert_pos fix", "= Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e. everything else) result = None return", "in the ColDefs input is not a Column.\" % input.index(col) self.data = [col.copy()", "valu = self._check(option='parse') if name == 'value': if valu is None: raise ValueError,", "found where expected\") if naxis == 0: datasize = 0 else: dims =", "self.size()*8/abs(_bitpix) else: dims = self._dimShape() code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan]", "the card list. if keylist is None: self._keylist = [k.upper() for k in", "= 0 data_output = _VLF([None]*len(input)) data_output._dtype = dtype if dtype == 'a': _nbytes", "= [1] _width = [] for i in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize =", "is not DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS']", "attr = [''] * len(self) for i in range(len(self)): val = getattr(self[i], cname)", "print \"'%s' is not an attribute of the column definitions.\"%att continue print \"%s:\"", "include the END card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) *", "= num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero: _arr = tmp._arrays[i].copy() else: _arr =", "contains CONTINUE card(s). \"\"\" self.__dict__['_cardimage'] = _pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ =", "value='', before=None, after=None): \"\"\"Add a blank card. value: Text to be added. before:", "its use: header = pyfits.Header() for all the cards you need in the", "input argument (except array) can be a Card or just # a number/string", "i in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is '': self.starts[i] =", "_booltype = 'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a', 'C':'c8',", "+ ')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force a write of the HDUList back", "_TableBaseHDU): hdr = input.header _nfields = hdr['TFIELDS'] self._width = hdr['NAXIS1'] self._shape = hdr['NAXIS2']", "_data._gap = hdu._theap - _tbsize # comment out to avoid circular reference of", "attr = [0] * len(self) for i in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i])", "list = [] _nblanks = input.count(' ') nmax = max(_nblanks, len(input)/strlen+1) arr =", "self[-1] # it also delete the keylist item def keys(self): \"\"\"Return a list", "\"\"\"Construct the CardList object from a list of Cards. cards: A list of", "dat, 3) # update the 3rd extension >>> update(file, dat, hdr, 3) #", "have string value. \"\"\" def __str__(self): \"\"\"Format a list of cards into a", "try to find exact match first try: indx = nameList.index(key.rstrip()) except ValueError: #", "num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array = _out # make a copy", "no equal sign for commentary cards (i.e. part of the string value) _key", "of one column, e.g. ttype, tform, etc. and the array. Does not support", "Astronomical Union in 1999 and mandated by NASA as the standard format for", "which is the # reverse of the numarray shape if isinstance(self, GroupsHDU): _shape", "self._parent = input self._convert = [None]*self._nfields self.names = self._names def copy(self): r =", "not None: _str = result.group('comm') if _str is not None: self._checkText(_str) def fromstring(self,", "compliant to FITS standard. key: keyword name, default=''. value: keyword value, default=''. comment:", "self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr] = self.header.get('PCOUNT', 0) try: return self.__dict__[attr]", "self._size != 0: self.writeComplete = 0 else: self.writeComplete = 1 def write(self,data): \"\"\"", "the equal sign in the card image and return the string after the", "groups')) if header is not None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header =", "keywords dict = [{} for i in range(_nfields)] # definition dictionaries for each", "mode=\"update\", memmap=oldMemmap) self.__file = ffo if (verbose): print \"reopen the newly renamed file\",", "should only be used right before writing to the output file, as the", "{8:'B', 16:'I', 32:'J', 64:'K', -32:'E', -64:'D'} def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data,", "_tmp.strip() elif input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ')", "is not %d: %d' % (_blockLen, len(block)) elif (blocks[:8] not in ['SIMPLE ',", "ValueError, 'Option %s not recognized.' % option if (_option == \"ignore\"): return x", "0: ext = ext1 else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2", "not include bscale/bzero for now XXX) _bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8)", "name of the HDU, will be the value of the keywod EXTNAME, default=None.", "\"\"\" # any of the input argument (except array) can be a Card", "+= str(card) + '\\n' return output[:-1] # ----------------------------- HDU classes ------------------------------------ class _AllHDU:", "self.field(indx[0]).astype('f8') for i in indx[1:]: result += self.field(i) return result def setpar(self, parName,", "written at once. The following psudo code illustrates its use: header = pyfits.Header()", "data object @param data: the new data used for appending @type header: L{Header}", "CONTINUE cards after the first card.' if not isinstance(_card.value, str): raise ValueError, 'Cards", "if i._byteorder != 'big': i.byteswap() i._byteorder = 'big' else: if coldata._type.bytes > 1:", "string' % val self.__dict__['key'] = val def _setvalue(self, val): \"\"\"Set the value attribute.\"\"\"", "def close(self): \"\"\"Close the 'physical' FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list", "be a Card or just # a number/string for cname in _commonNames: value", "not allowed to expand (as C/Python does). for i in range(len(dummy)): x =", "or index. newkey: new keyword, must be a string. force: if new key", "errlist fix = '' cards = self.header.ascard try: _index = cards.index_of(keywd) except: _index", "# UInt8 case _zero = min _scale = (max - min) / (2.**8", "isinstance(self, _Hierarch): self.__class__ = Card else: # does not support CONTINUE for HIERARCH", "self._dtype: pass elif isinstance(value, chararray.CharArray) and value.itemsize() == 1: pass elif self._dtype ==", "one column, e.g. ttype, tform, etc. and the array. Does not support theap", "handles ['abc'] and [['a','b','c']] # equally, beautiful! _func = lambda x: chararray.array(x, itemsize=1)", "By combination of EXTNAME and EXTVER, as separate arguments or as a tuple:", "filename: name of the file to be updated data: the new data used", "= _offset _offset += len(data_output[i]) * _nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable length", "value.\"\"\" self.ascard[key].value = value self._mod = 1 def __delitem__(self, key): \"\"\"Delete card(s) with", "overwritten by any user specified bscale/bzero values. bscale/bzero: user specified BSCALE and BZERO", "= _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if width == '': width = None else:", "hdr[0] = self._xtn hdr.ascard[0].comment = 'binary table extension' class StreamingHDU: \"\"\" A class", "the FITS data block if _size > 0: self.__file.write(_padLength(_size)*'\\0') # flush, to make", "out of range error for BZERO = +32768 self.header.update('BZERO', _zero) else: del self.header['BZERO']", "clobber: (optional) if True and if filename already exists, it will overwrite the", "`%s` ending point overlaps to the next column\" % indx+1 if 'A' in", "default value. key: keyword name or index default: if no keyword is found,", "str(self._verify(_option)).rstrip() if _option in ['fix', 'silentfix'] and x.find('Unfixable') != -1: raise VerifyError, '\\n'+x", "# pass the attributes for attr in ['formats', 'names']: setattr(_data, attr, getattr(tmp, attr))", "must be after it. try: _dum = self.header['EXTEND'] #_after += 1 except: pass", "== 'format' and isinstance(self, BinTableHDU): val = _cols._recformats[i] if isinstance(val, _FormatX): val =", "If backward = 1, search from the end. \"\"\" if isinstance(key, (int, long)):", "open(_name, mode=\"append\") if (verbose): print \"open a temp file\", _name for hdu in", "call. type (string): destination data type, use numarray attribute format, (e.g. 'UInt8', 'Int16',", "!= None: # Check for numbers with leading 0s. numr = Card._number_NFSC_RE.match(valu.group('numr')) _digt", "number/string for cname in _commonNames: value = eval(cname) # get the argument's value", "offset = offset * _naxis + indx.offset # all elements after the first", "[''] * len(self) for i in range(len(self)): val = getattr(self[i], cname) if val", "in hdus: if not isinstance(hdu, _AllHDU): raise \"Element %d in the HDUList input", "\"%s is not an HDU.\" % item else: if not isinstance(hdu, _AllHDU): raise", "type=code, shape=dims) else: raw_data = num.fromfile(self._file, type=code, shape=dims) raw_data._byteorder = 'big' if (self._bzero", "scientific notation. One for FSC and one for non-FSC (NFSC) format: # NFSC", "using the supplied data/header. @type filename: string @param filename: name of the new", "len(self.header.ascard), _shape, _format, _gcount) def scale(self, type=None, option=\"old\", bscale=1, bzero=0): \"\"\"Scale image data", "0) self._bscale = self.header.get('BSCALE', 1) if (data is DELAYED): return self.data = data", "== 'a': _nbytes = 1 else: _nbytes = num.getType(dtype).bytes for i in range(len(input)):", "run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute the verification with selected", "first array = num.array(array) except: try: # then try to conver it to", "defined.' % name self.__dict__[name] = attr return self.__dict__[name] \"\"\" # make sure to", "option=option) self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err) tfields = self.header['TFIELDS'] for", "errmsg, url) urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener # class", "self.__file.seek(hdu._datSpan, 1) if self.__file.tell() > self._size: print 'Warning: File size is smaller than", "When an attribute (value or comment) is changed, will reconstructe # the card", "both the ASCII table and binary table column # format spec, i.e. A7", "_indx = len(_keylist) - _indx - 1 return _indx except: raise KeyError, 'Keyword", "setattr(_data, attr, getattr(tmp, attr)) for i in range(len(tmp)): tmp._arrays[i] = _data.field(i) return FITS_rec(_data)", "'', 0 for j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt", "jtaylor2 $ \"\"\" A module for reading and writing FITS files and manipulating", "HDUList)' else: _name = self.__file.name results = \"Filename: %s\\nNo. Name Type\"\\ \" Cards", "pass these attributes hdu._file = self._file hdu._hdrLoc = self._hdrLoc hdu._datLoc = self._datLoc hdu._datSpan", "_max+1): if _where+nc >= len(_keyList): break if _cardList[_where+nc]._cardimage[:10].upper() != 'CONTINUE ': break #", "or index of the Card after which the new card will be placed.", "cname + ' = ' + `value` + '\\n' return text[:-1] def copy(self):", "attr == 'data': self.__dict__[attr] = self.field('data') elif attr == '_unique': _unique = {}", "header in a FITS file. @type filename: string @param filename: input FITS file", "array of shape (s, nbytes) output: output Boolean array of shape (s, nx)", "not _zero: bzero = 0 return (_str, _bool, _number, _scale, _zero, bscale, bzero)", "used data: data to be used name: name to be populated in EXTNAME", "isinstance(input, ColDefs): self.data = [col.copy() for col in input.data] # if the input", "key): \"\"\"Get a Card by indexing or by the keyword name.\"\"\" _key =", "() _nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format = '[' for j in", "data type dtype. The descriptor location will have a zero offset for all", "if val is not None: raise ValueError, 'comment %s is not a string'", "_width - 1 self.spans[i] = _end - last_end last_end = _end self._Formats =", "= input[i].array \"\"\" def __getitem__(self, key): x = self.data[key] if isinstance(key, (int, long)):", "data after the stream has been filled will raise an IOError exception. If", "re.compile('END'+' '*77) _hdrLoc = self.__file.tell() # Read the first header block. block =", "*' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\(", "Column.\"\"\" indx = _get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value def change_name(self, col_name, new_name):", "as a tuple: >>> getdata('in.fits', 'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits',", "For details of the FITS standard, see the NASA/Science Office of Standards and", "shape=gcount, names= self._coldefs.names)) self.__setstate__(tmp.__getstate__()) for i in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if", "explanations/examples. @return: keyword value @rtype: string, integer, or float \"\"\" _hdr = getheader(filename,", "be overwritten by any user specified bscale/bzero values. bscale/bzero: user specified BSCALE and", "quotes, # whereas it should not end with an even number of #", "in indx] return ColDefs(tmp) def _setup(self): \"\"\" Initialize all attributes to be a", "1 for j in range(1, naxis): size = size * self.header['NAXIS'+`j+1`] bitpix =", "above copyright notice, this list of conditions and the following disclaimer in the", "__getitem__(self, key): dims = [] if not isinstance(key, tuple): key = (key,) naxis", "mo = re_extver.search(self._raw) if mo: extver = int(mo.group(1)) else: extver = 1 return", "_val elif name == 'comment': _comm = _card.comment if isinstance(_comm, str) and _comm", "'a'+`y`, dummy) elif name == 'spans': # make sure to consider the case", "data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes =", "for i in range(naxis): mo = re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1] =", "\"Zip files with multiple members are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name =", "['', None, 0]: array += -bzero if bscale not in ['', None, 1]:", "tmp = rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out = tmp out._parent = rec.RecArray.__getitem__(self._parent,", "len(val) <= 8: val = val.upper() if val == 'END': raise ValueError, \"keyword", "numarray.records as rec import numarray.objects as objects import numarray.memmap as Memmap from string", "self.data = tmp else: raise TypeError, \"input to ColDefs must be a table", "fit in one line. # Instead, just truncate the comment if isinstance(self.value, str)", "self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 hdu._new = 0 hdu._file = ffo.getfile()", "3rd extension >>> update(file, dat, 'sci', 2) # update the 2nd SCI extension", "if exists, default = False. \"\"\" if (len(self) == 0): print \"There is", "specified option. \"\"\" self.__dict__['_err_text'] = '' self.__dict__['_fix_text'] = '' self.__dict__['_fixable'] = 1 if", "= 'exception'. verbose: print out verbose messages? default = 0. \"\"\" # Get", "tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`, None, None, None, option, _err)", "') *, *(?P<imag>' + _numr_NFSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)'", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR", "self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields self.data = data self.columns", "HDUList, indexed by number or name.\"\"\" key = self.index_of(key) _item = super(HDUList, self).__getitem__(key)", "part of the string value) _key = self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc", "= _val[:-1] longstring = longstring + _val elif name == 'comment': _comm =", "fix = '' cards = self.header.ascard try: _index = cards.index_of(keywd) except: _index =", "_pcount # pass the attributes for attr in ['formats', 'names']: setattr(_data, attr, getattr(tmp,", "the file so we # must change the Primary header provided into an", "constructor may be written to the stream. If the provided data would cause", "after = _after) # delete extra NAXISi's for j in range(len(axes)+1, old_naxis+1): try:", "_index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] = Card(newkey, _value,", "if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a table parent", "have both SIMPLE and XTENSION to accomodate Extension # and Corrupted cases del", "'_theap': self.__dict__[attr] = 0 try: return self.__dict__[attr] except KeyError: raise AttributeError(attr) # 0.6.5.5", "HDUList.\" for hdu in hdus: if not isinstance(hdu, _AllHDU): raise \"Element %d in", "columns for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`] hdu.header['TFORM'+`i+1`] =", "permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR", "from the CardList.\"\"\" pairs = [] for card in self.ascard: pairs.append((card.key, card.value)) return", "c0 = Card('SIMPLE', True, 'conforms to FITS standard') _list = CardList([ c0, Card('BITPIX',", "None: # check TFIELDS and NAXIS2 hdu.header['TFIELDS'] = hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] #", "key[:key.find('(')+1] + `hdu.data.field(i)._max` + ')' def flush(self, output_verify='exception', verbose=0): \"\"\"Force a write of", "= self.header.get('NAXIS', 0) if naxis > 0: size = 1 for j in", "output def _add_commentary(self, key, value, before=None, after=None): \"\"\"Add a commentary card. If before", "both will produce 'a7'. if fmt.lstrip()[0] == 'A' and option != '': output_format", "is bscale/bzero if isinstance(array, num.NumArray): # boolean needs to be scaled too if", "not a Card\" % str(card) def _use_blanks(self, how_many): if self._blanks > 0: for", "ValueError, \"Must specify format to construct Column\" # scale the array back to", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "return key elif isinstance(key, tuple): _key = key[0] _ver = key[1] else: _key", "x: chararray.array(x, itemsize=1) array = _VLF(map(_func, array)) except: raise ValueError, \"Inconsistent input data", "% \\ (self.name, type, len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns the table's column", "# Data: if 'data' not in dir(hdu): continue if hdu.data is None: continue", "name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1 def __delslice__(self, i, j):", "= open # Convenience functions class _Zero(int): def __init__(self): self = 0 def", "and self._cardimage.find('=') != 8: if option in ['exception', 'warn']: self.__dict__['_err_text'] = 'Card image", "Google Search, when asked for \"PyFITS\" \"\"\" import re, os, tempfile, exceptions import", "'%20s' % self._valuestring elif isinstance(self.value, Undefined): valStr = '' # conserve space for", "= tmp._arrays[i] if _scale: _arr *= bscale if _zero: _arr += bzero hdu.data._convert[i][:n]", "None: raise ValueError, 'comment %s is not a string' % val self.__dict__['comment'] =", "self.__file.read(_blockLen) if block == '': raise EOFError hdu = _TempHDU() hdu._raw = ''", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO", "= _pad(input) if self._cardimage[:8].upper() == 'HIERARCH': self.__class__ = _Hierarch # for card image", "a list of null strings.\"\"\" for cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self,", "arr in tmp._arrays: if arr is not None: dim = arr._shape[0] else: dim", "self.header['EXTNAME'] self.name = name def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self,", "determine if this is a single treaded application threadName = threading.currentThread() singleThread =", "self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver = '' # only", "It returns a match object # for a valid value/comment string. # The", "self.npts = npts self.offset = offset class _WholeLine(_KeyType): pass class _SinglePoint(_KeyType): pass class", "self).__getitem__(key) def __getslice__(self, start, end): _hdus = super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return", "-2^N _scale = (max - min) / (2.**(8*_type.bytes) - 2) # Do the", "complex value is found, otherwise it will return # None, meaning the keyword", "and val <= 999\", 0, option, _err) naxis = self.header.get('NAXIS', 0) if naxis", "= re.compile(r'(?P<label>^T[A-Z]*)(?P<num>[1-9][0-9 ]*$)') def _parse_tformat(tform): \"\"\"Parse the TFORM value into repeat, data type,", "= format format = _convert_format(recfmt, reverse=1) except: raise ValueError, \"Illegal format `%s`.\" %", "_key) # occurrence of _key in _list if _count == 1: indx =", "%dC\" % (_nrows, _ncols) return \"%-10s %-11s %5d %-12s %s\" % \\ (self.name,", "you need in the header: header.update(key,value,comment) shdu = pyfits.StreamingHDU('filename.fits',header) for each piece of", "result def __setitem__(self, key, value): \"\"\"Set a Card by indexing or by the", "PCOUNT and GCOUNT dim = `self.header['NAXIS']` if dim == '0': dim = ''", "def readall(self): \"\"\"Read data of all HDU's into memory.\"\"\" for i in range(len(self)):", "== 'HISTORY': output.append(_card.value) return output def get_comment(self): \"\"\"Get all comments as a list", "a Card to the CardList. card: The Card to be appended. useblanks: Use", "card, useblanks=useblanks) def insert(self, pos, card, useblanks=1): \"\"\"Insert a Card to the CardList.", "# Verify locations and values of mandatory keywords. self.req_cards('NAXIS', '== 2', _isInt+\" and", "del self.ascard[key] self._mod = 1 def __str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a", "= hdu def __getitem__(self, key): dims = [] if not isinstance(key, tuple): key", "name=None, format=None, unit=None, null=None, \\ bscale=None, bzero=None, disp=None, start=None, \\ dim=None, array=None): \"\"\"Construct", "option=option) self.req_cards('NAXIS', None, 'val == 2', 2, option, _err) self.req_cards('BITPIX', None, 'val ==", "class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS table extension base HDU class.\"\"\" def __init__(self, data=None, header=None, name=None):", "+ _digits_NFSC + ')') # FSC commentary card string which must contain printable", "not exist.\" % key else: # multiple match raise NameError, \"Ambiguous key name", "__init__(self, data=None, header=None, name=None): \"\"\"data: data of the table header: header to be", "at the same CONTINUE card else: _start = _where + 1 if _keyList[_start:].count('CONTINUE')", "for col in range(_nfields): dict[col]['array'] = Delayed(input, col) # now build the columns", "extkeys['header'] new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename, 'update', *ext, **extkeys) hdulist[_ext] = new_hdu", "% self._cardimage elif option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result) if option", "time go through the next level items, each of the next level #", "1'), Card('NAXIS2', 0, 'length of dimension 2'), Card('PCOUNT', 0, 'number of group parameters'),", "\"\"\"Get the index of an HDU from the HDUList. The key can be", "exist.\" % key else: # multiple match raise NameError, \"Ambiguous key name '%s'.\"", "< naxis: if naxis > 1: return _SinglePoint(1, indx) elif naxis == 1:", "(offset - self._byteoffset) / self._strides[0] return _Group(self, row) class _Group(rec.Record): \"\"\"One group of", "pcount = int(mo.group(1)) else: pcount = 0 mo = re_groups.search(block) if mo and", "total space will not increase (default). When useblanks == 0, the card will", "when asked for \"PyFITS\" \"\"\" import re, os, tempfile, exceptions import operator import", "which table HDU, 'BinTableHDU' (default) or 'TableHDU' (text table). \"\"\" ascii_fmt = {'A':'A1',", "= ffo if (verbose): print \"reopen the newly renamed file\", oldName # reset", "ColDefs(object): \"\"\"Column definitions class. It has attributes corresponding to the Column attributes (e.g.", "in the CardList. key: the keyword name (a string) or the index (an", "del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] else: self.data = raw_data try: return self.__dict__[attr] except", "keyword name.\"\"\" if isinstance (value, Card): _key = self.index_of(key) # only set if", "def _makep(input, desp_output, dtype): \"\"\"Construct the P format column array, both the data", "tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale or _zero: _arr = tmp._arrays[i].copy()", "are numbers if not _scale: bscale = 1 if not _zero: bzero =", "hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename, data, header=None, **keys): \"\"\"Create a new FITS", "new signal interput handler keyboardInterruptSent = False def New_SIGINT(*args): print \"KeyboardInterrupt ignored until", "_bitpix > 0: # scale integers to Float32 self.data = num.array(raw_data, type=num.Float32) else:", "to FITS Group data in a manner analogous to tables \"\"\" def __init__(self,", "data self.name = None def size(self): \"\"\"Returns the size (in bytes) of the", "in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc] _start", "case variant of \"XYZ\", then field('xyz'), field('Xyz'), etc. will get this field. \"\"\"", "== 'TableHDU': # string no need to convert, if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] =", "definition common names and keyword names, make # sure to preserve the one-to-one", "= result.group('comm') if _str is not None: self._checkText(_str) def fromstring(self, input): \"\"\"Construct a", "(80 columns). If the card image is longer than 80, assume it contains", "the unparsable case if input is None: _tmp = self._getValueCommentString() try: slashLoc =", "value, corresponding to TSCAL keyword bzero: bzero value, corresponding to TZERO keyword disp:", "zfile = zipfile.ZipFile(self.name) namelist = zfile.namelist() if len(namelist) != 1: raise \"Zip files", "#0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct a", "a name or index. newkey: new keyword, must be a string. force: if", "in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue # skip", "key _ver = None if not isinstance(_key, str): raise KeyError, key _key =", "key data type %s' % type(key) def copy(self): \"\"\"Make a (deep)copy of the", "before the equal sign. If there is no equal sign, return the string", "also OK in this constructor _card = \"Card('%s', %s)\" % (keywd, `fix_value`) fix", "= 'PRIMARY' else: name = '' return size, name def setupHDU(self): \"\"\"Read one", "_shape = tuple(_shape) _format = _format[_format.rfind('.')+1:] # if data is not touched yet,", "*?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_FSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' + _numr_FSC", "== 0: continue _shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount", "KeyError: raise AttributeError(attr) def getfile(self): return self.__file def _readheader(self, cardList, keyList, blocks): \"\"\"Read", "self.__file.tell() _size = 0 if hdu.data is not None: # if image, need", "EOFError: break # check in the case there is extra space after the", "= [0] + axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()] axes = list(self.data.getshape())", "!= _blockLen: raise IOError, 'Block length is not %d: %d' % (_blockLen, len(block))", "\"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\" % _expValStr valStr = '%-20s'", "self._locateEq() if eqLoc is None: eqLoc = 7 return self._cardimage[eqLoc+1:] def _check(self, option='ignore'):", "of the Header ends, but this task may be difficult when the extension", "is _booltype: for i in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] = dummy", "hdu.data._nfields hdu.header['NAXIS2'] = hdu.data.shape[0] # calculate PCOUNT, for variable length tables _tbsize =", "try to conver it to a strings array array = chararray.array(array, itemsize=eval(recfmt[1:])) #", "1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift the unused bits num.lshift(output[...,i], unused, output[...,i])", "try: # this handles ['abc'] and [['a','b','c']] # equally, beautiful! _func = lambda", "by number or name.\"\"\" key = self.index_of(key) super(HDUList, self).__delitem__(key) self._resize = 1 def", "= getheader(filename, *ext, **extkeys) return _hdr[key] def _makehdu(data, header): if header is None:", "\"\"\" f = open(filename) f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright (C) 2004", "\"\"\" _err = errlist fix = '' cards = self.header.ascard try: _index =", "Office of Standards and Technology publication, NOST 100-2.0. License: http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE For detailed examples", "keywords BSCALE and BZERO after scaling del self.header['BSCALE'] del self.header['BZERO'] self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()]", "_ErrList(list): \"\"\"Verification errors list class. It has a nested list structure constructed by", "input: input Uint8 array of shape (s, nbytes) output: output Boolean array of", "FITS file name \"\"\" f = open(filename) f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\"", "set extension name if (name is None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name", "data. It returns the output \"data\" array of data type dtype. The descriptor", "Extension # and Corrupted cases del self['SIMPLE'] del self['XTENSION'] del self['BITPIX'] _naxis =", "-~]+?|\\'\\'|)) *?\\'(?=$|/| )|' r'(?P<bool>[FT])|' r'(?P<numr>' + _numr_NFSC + ')|' r'(?P<cplx>\\( *' r'(?P<real>' +", "up long string value/comment into CONTINUE cards. This is a primitive implementation, it", "\"\"\"Binary table HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\"data: data of the", ">= 0 and val <= 999\", 0, option, _err) tfields = self.header['TFIELDS'] for", "the example in (a), field('abc') will get the first field, and field('ABC') will", "_scale = (max - min) / (2.**8 - 1) else: _zero = (max", "Group data in a manner analogous to tables \"\"\" def __init__(self, input=None, bitpix=None,", "= 0 mo = re_naxis.search(block) if mo is not None: naxis = int(mo.group(1))", "self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def _verify(self, option='warn'): \"\"\"ImageHDU verify method.\"\"\"", "or a single HDU. Default = None, i.e. an empty HDUList. file: The", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "{'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None): self._file, self._datLoc =", "field('xyz'), field('Xyz'), etc. will get this field. \"\"\" if isinstance(key, (int, long)): indx", "self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`,", "in range(self._nfields): _loc.append(_loc[-1]+self._parent.field(i).itemsize()) _width.append(_convert_ASCII_format(self._coldefs._Formats[i])[1]) self._heapsize = 0 for indx in range(self._nfields): if (self._convert[indx]", "data._coldefs = self.columns data.parnames = self.columns._pnames else: data = None self.__dict__[attr] = data", "(for append and update modes only). output_verify: output verification option, default = 'exception'.", "index of the field. \"\"\" if self._coldefs._tbtype == 'BinTableHDU': _str = 'a' in", "% self.value # XXX need to consider platform dependence of the format (e.g.", "in the card image before column 10 and return its location. It returns", "is None: eqLoc = 8 _start = 0 if self._cardimage[:8].upper() == 'HIERARCH': _start", "been truncated.' hdu._ffile = self return hdu def writeHDU(self, hdu): \"\"\"Write *one* FITS", "# Wipe out the old table definition keywords. Mark them first, # then", "**ext2): \"\"\"Open the input file, return the HDUList and the extension.\"\"\" hdulist =", "# the FITS block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open", "sure the EXTEND keyword is in primary HDU if there is extension if", "string, (a) Field (column) names are case sensitive: you can have two different", "= val[8:].strip() self.__class__ = _Hierarch else: raise ValueError, 'keyword name %s is too", "try: self._check(option) except: pass _err.append(self.run_option(option, err_text=self._err_text, fix_text=self._fix_text, fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards", "attribute, surrogate for the __setattr__ key case.\"\"\" if isinstance(val, str): val = val.strip()", "re.compile(_keywd_FSC) # A number sub-string, either an integer or a float in fixed", "default=None. \"\"\" # no need to run _ExtensionHDU.__init__ since it is not doing", "= new_value def change_name(self, col_name, new_name): \"\"\"Change a Column's name.\"\"\" if new_name !=", "option: verification option, default=silentfix. \"\"\" # Only if the card image already exist", "in the card image and return the string before the equal sign. If", "be filled with zeros/blanks. tbtype: table type to be created (BinTableHDU or TableHDU)", "columns.\"\"\" _update = self.header.update _append = self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize, after='naxis')", "class levels. \"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit = unit def", "user easier output interface if only one HDU needs to be written to", "the tmp to the original file if self._resize: oldName = self.__file.name oldMemmap =", "range(len(hdu.data.field(i))): coldata = hdu.data.field(i)[j] if len(coldata) > 0: coldata.tofile(self.__file) _shift = self.__file.tell() -", "'val == 8', 8, option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\" and val >=", "PrimaryHDU else: self._hdutype = _ValidHDU elif cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip() if", "bool, Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1 else: raise ValueError, 'Illegal", "_zero: self._convert[i] = pardata[i] else: self._parent.field(i)[:] = pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if", "self.__file.tell() > self._size: print 'Warning: File size is smaller than specified data size.", "card, useblanks=1): \"\"\"Insert a Card to the CardList. pos: The position (index, keyword", "if (nfound == 0): raise KeyError, 'extension %s not found' % `key` elif", "= PrimaryHDU._verify(self, option=option) # Verify locations and values of mandatory keywords. self.req_cards('NAXIS', '==", "'abc' and 'ABC' respectively. (b) When you *refer* to a field (presumably with", "_add_commentary(self, key, value, before=None, after=None): \"\"\"Add a commentary card. If before and after", "should not already exist. Use the directory of the input file and the", "slice %s, step must be integer.' % input return slice(_start, _stop, _step) class", "with array (None) _datLoc: starting byte location of data block in file (None)", "= [] _nblanks = input.count(' ') nmax = max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+'", "= self.header['PCOUNT'] _format = GroupsHDU._dict[self.header['BITPIX']] for i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1)", "if self._ffo.getfile().tell() - self._datLoc == self._size: # # the stream is full so", "signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append', 'update'): print \"flush for '%s' mode is", "keywords BSCALE and BZERO del self.header['BSCALE'] del self.header['BZERO'] def update_header(self): \"\"\"Update the header", "'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None): self._file, self._datLoc = None,", "-64:'D'} def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype = GroupsHDU self.name", "not in Card._commentaryKeys and self._cardimage.find('=') != 8: if option in ['exception', 'warn']: self.__dict__['_err_text']", "attribute.\"\"\" if attr == 'data': size = self.size() if size: self._file.seek(self._datLoc) data =", "the END card self._blanks = 0 self.count_blanks() def __getitem__(self, key): \"\"\"Get a Card", "else: return _data def getval(filename, key, *ext, **extkeys): \"\"\"Get a keyword's value from", "without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS IS''", "data=None, header=None): \"\"\"Construct a primary HDU. data: the data in the HDU, default=None.", "_val = eval(real.group('sign')+_rdigt) imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign')", "it does not exist, a new card will be created and it will", "value elif option == 'unfixable': _text = \"Unfixable error: %s\" % _text else:", "if _zero: _arr += bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if", "hdu._extver = 1 hdu._file = self.__file hdu._hdrLoc = _hdrLoc # beginning of the", "os.remove(name) else: raise IOError, \"File '%s' already exist.\" % name # make sure", "insert_pos = eval(_parse[1]) # if the card does not exist if _index is", "self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0], shape=1)", "0): raise KeyError, 'extension %s not found' % `key` elif (nfound > 1):", "else: self._parent.field(i)[:] = pardata[i] (_scale, _zero) = self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars]", "variable length columns for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP): key = hdu.header['TFORM'+`i+1`]", "\"Overwrite existing file '%s'.\" % name os.remove(name) else: raise IOError, \"File '%s' already", "one if tbtype == 'TableHDU': for i in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format)", "return valu = self._check(option='parse') if name == 'value': if valu is None: raise", "tmp = input[xoffset:offset] list.append(tmp) if len(input) == offset: break xoffset = offset return", "def _readHDU(self): \"\"\"Read the skeleton structure of the HDU.\"\"\" end_RE = re.compile('END'+' '*77)", "'TableHDU': self._Formats = self.formats if len(self) == 1: dummy = [] else: dummy", "raise IOError, 'Block length is not %d: %d' % (_blockLen, len(block)) elif (blocks[:8]", "+ '\\n%s' % self._cardimage # verify the comment (string), it is never fixable", "bscale/bzero if isinstance(array, num.NumArray): # boolean needs to be scaled too if recfmt", "self.__dict__[attr] except KeyError: raise AttributeError(attr) def getfile(self): return self.__file def _readheader(self, cardList, keyList,", "self.header['NAXIS'] axes = naxis*[0] for j in range(naxis): axes[j] = self.header['NAXIS'+`j+1`] axes.reverse() return", "to be used name: name to be populated in EXTNAME keyword \"\"\" if", "$Id: NA_pyfits.py 329 2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A module for reading and", "if option in ['warn', 'exception']: #raise VerifyError, _text #elif option == 'warn': pass", "'append'. memmap: Is memmory mapping to be used? default=0. \"\"\" # instantiate a", "template), default=None. If header=None, a minimal Header will be provided. name: The name", "= self._ncards() for i in range(ncards): # take each 80-char card as a", "BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i], _FormatP): for j in range(len(hdu.data.field(i))):", "if coldata2._type.bytes > 1: # do the _parent too, otherwise the _parent #", "be used for the HDU name: the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header,", "isinstance(input, ColDefs): if input._tbtype == tbtype: tmp = hdu.columns = input else: raise", "self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the memmap object, it is designed to use", "ASCII table, convert strings to numbers if self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32,", "* (Card.length-strlen) # minimum length is 80 else: strlen = _len % Card.length", "80-char card as a regular card and use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80])", "elif dtype == 'F': output_format = 'f8' else: raise ValueError, \"Illegal format %s\"", "val = `val._nx` + 'X' elif isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max =", "self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a (new) card image from the", "bzero not in ['', None, 0]: array += -bzero if bscale not in", "(as C/Python does). for i in range(len(dummy)): x = _fmt % dummy[i] if", "break in the middle if offset <= xoffset: offset = xoffset + strlen", "'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self) self._datLoc", "type to avoid misalignment. \"\"\" if isinstance(value, num.NumArray) and value.type() == self._dtype: pass", "_getname(self): \"\"\"Get the extname and extver from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\")", "+ datasize) / 8 if simple and not groups: name = 'PRIMARY' else:", "hdu.columns = input else: raise ValueError, 'column definitions have a different table type'", "mo = re_groups.search(block) if mo and simple: groups = 1 else: groups =", "the columns tmp = [Column(**attrs) for attrs in dict] self.data = tmp else:", "input Uint8 array of shape (s, nbytes) output: output Boolean array of shape", "to the file before writing the # given header. # if not os.path.exists(name):", "return output def _words_group(self, input, strlen): \"\"\"Split a long string into parts where", "hdr[0] != self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment = 'binary table extension' class StreamingHDU:", "data=None, header=None, name=None): \"\"\"Construct an image HDU. data: the data in the HDU,", "if isinstance(self.data, GroupData): self.header.update('GROUPS', True, after='NAXIS'+`len(axes)`) self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars", "r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC", "= 8 _start = 0 if self._cardimage[:8].upper() == 'HIERARCH': _start = 8 self.__class__", "'' or comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary cards, value", "all of the required data has been written to the stream. Notes -----", "the location specified by before or after. The argument `before' takes precedence over", "self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt: code, width, prec =", "header=hdr) # update the 3rd extension >>> update(file, dat, header=hdr, ext=5) # update", "it has the proper value. \"\"\" hdr = self[0].header if hdr.has_key('extend'): if (hdr['extend']", "integer, a string, or a tuple of (string, integer). \"\"\" if isinstance(key, (int,", "IndexError: raise IndexError, 'Extension %s is out of bound or not found.' %", "of a card image (80 columns). If the card image is longer than", "to be the extension specification(s). Header and extension specs can also be keyword", "try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0] offset = blank_loc[loc-1] + 1 if loc", "Header The header object associated with the data to be written to the", "for i in range(nc-1, -1, -1): # locate last non-commentary card if self[i].key", "# set extension name if (name is None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME']", "to the file. :Returns: None Notes ----- The file will be opened and", "being referenced If the optional keyword 'header' is set to True, this function", "an HDU.\" for item in hdu: if not isinstance(item, _AllHDU): raise ValueError, \"%s", "\"Fixed by setting a new value '%s'.\" % fix_value if fixable: fix =", "is a FITS_rec tmp = hdu.columns = input._coldefs else: # input is a", "None: self.data[i].format = ascii_fmt[self.data[i].format[0]] elif isinstance(input, _TableBaseHDU): hdr = input.header _nfields = hdr['TFIELDS']", "else: raise IndexError, 'Illegal slice %s, stop must be integer.' % input if", "keyword in the CardList. key: the keyword name (a string) or the index", "self._raw if (len(blocks) % _blockLen) != 0: raise IOError, 'Header size is not", "@param filename: name of the new FITS file to write to @type data:", "file already exists. If it does not, check to see # if we", "(_option != \"silentfix\") and x: print 'Output verification result:' print x if _option", "or a float in fixed or # scientific notation. One for FSC and", "HDU, data portions are not actually read here, but the beginning locations are", "_ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify,", "Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is not None: _valStr =", "the sliced FITS_rec and its ._parent def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key)", "if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the data attribute.\"\"\" if attr", "self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key'] = head.strip().upper() def _extractValueComment(self,", "mode not in _python_mode.keys(): raise \"Mode '%s' not recognized\" % mode if mode", "def add_comment(self, value, before=None, after=None): \"\"\"Add a COMMENT card. value: Comment text to", "= _keylist.index(_key) if backward: _indx = len(_keylist) - _indx - 1 return _indx", "Card(_Verify): # string length of a card length = 80 # String for", "flush, to make sure the content is written self.__file.flush() return loc def writeHDUdata(self,", "by inserting one as 0th HDU.' fix = \"self.insert(0, PrimaryHDU())\" _text = self.run_option(option,", "_stop, _step) class _KeyType: def __init__(self, npts, offset): self.npts = npts self.offset =", "modification, are permitted provided that the following conditions are met: 1. Redistributions of", "+ ' ' self.__dict__[name] = longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long string value/comment", "a Header from a CardList. cards: A list of Cards, default=[]. \"\"\" #", "written to the beginning of the file. If the file does not exist", "point.\"\"\" valueStr = \"%.16G\" % value if \".\" not in valueStr and \"E\"", "useblanks: Use any *extra* blank cards? default=1. If useblanks != 0, and if", "ext: The rest of the arguments are for extension specification. They are flexible", "in update()] after: [same as in update()] \"\"\" self._add_commentary('comment', value, before=before, after=after) def", "value for ASCII table cell with value = TNULL # this can be", "32, 64, -32, -64]\" # Verify location and value of mandatory keywords. #", "True # deprecated FALSE = False # deprecated _INDENT = \" \" DELAYED", "_commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append one Column", "from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY", "+= -bzero if bscale not in ['', None, 1]: array /= bscale self.array", "j == 0: continue _shape += (self.header['NAXIS'+`j+1`],) _format = self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU):", "for i in range(self.header['PCOUNT']): _bscale = self.header.get('PSCAL'+`i+1`, 1) _bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower())", "variable length array columns # this has to be done after the \"regular\"", "self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get the data attribute.\"\"\" if attr == 'section': return", "if _stop < _start: raise IndexError, 'Illegal slice %s, stop < start.' %", "getattr(self, name) def _setkey(self, val): \"\"\"Set the key attribute, surrogate for the __setattr__", "Handle zip files if mode in ['update', 'append']: raise \"Writing to zipped fits", "will be like a binary table's data. \"\"\" if attr == 'data': #", "in range(self._nfields): # touch all fields to expand the original ._convert list #", "self.columns = data._coldefs self.update() elif data is None: pass else: raise TypeError, \"table", "if _key == 'END': break def _readHDU(self): \"\"\"Read the skeleton structure of the", "be appended after the last non-blank card. \"\"\" if isinstance (card, Card): nc", "(int, long)): return key elif isinstance(key, str): _key = key.strip().upper() if _key[:8] ==", "block of the Header ends, but this task may be difficult when the", "j in range(1, naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount", "# # Check if the file already exists. If it does not, check", "# update the 3rd extension >>> update(file, dat, header=hdr, ext=5) # update the", "any user specified bscale/bzero values. bscale/bzero: user specified BSCALE and BZERO values. \"\"\"", "print results def open(name, mode=\"copyonwrite\", memmap=0): \"\"\"Factory function to open a FITS file", "= re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value into data type and width. try:", "in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except:", "array. Does not support theap yet. \"\"\" def __init__(self, name=None, format=None, unit=None, null=None,", "the require keywords PCOUNT and GCOUNT dim = `self.header['NAXIS']` if dim == '0':", "0, option, _err) self.req_cards('GROUPS', _pos, 'val == True', True, option, _err) return _err", "a list of cards into a printable string.\"\"\" output = '' for card", "can be just list or tuple, not required to be NDArray if format", "= 0 # reset the output nbytes = ((nx-1) / 8) + 1", "= self.columns else: data = None self.__dict__[attr] = data elif attr == 'columns':", "new file @type header: L{Header} object or None @param header: the header associated", "new_hdu hdulist.close() def info(filename): \"\"\"Print the summary information on a FITS file. This", "which should not already exist. Use the directory of the input file and", "the original ._convert list # so the sliced FITS_rec will view the same", "image longer than 80, assume it contains CONTINUE card(s). elif len(self._cardimage) > Card.length:", "Column: \"\"\"Column class which contains the definition of one column, e.g. ttype, tform,", "scaled and is therefore not very usable after the call. type (string): destination", "record format _repeat = '' if repeat != 1: _repeat = `repeat` output_format", "elif attr == '_unique': _unique = {} for i in range(len(self.parnames)): _name =", "before=before, after=after) def get_history(self): \"\"\"Get all histories as a list of string texts.\"\"\"", "skeleton structure of the HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc = self.__file.tell() #", "\"\"\" input: a sequence of variable-sized elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max = 0", "self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) # flush, to make sure the content is", "in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt:", "for i in indx] return ColDefs(tmp) def _setup(self): \"\"\" Initialize all attributes to", "(unparsable), such as the 'BITPIX', 'NAXIS', or 'END' cards. A corrupted HDU usually", "# this should never happen if header is None: raise ValueError, \"No header", "comment, or from raw string. option: verification option, default=silentfix. \"\"\" # Only if", "own verify for i in range(len(self)): if i > 0 and (not isinstance(self[i],", "inserted before it. card: The Card to be inserted. useblanks: Use any *extra*", "classes ------------------------------------ class _AllHDU: \"\"\"Base class for all HDU (header data unit) classes.\"\"\"", "of %d: %d' % (_blockLen, len(blocks)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']):", "file instead of requiring data to all be written at once. The following", "self._ascardimage() def ascardimage(self, option='silentfix'): \"\"\"Generate a (new) card image from the attributes: key,", "output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer else: _repeat = ''", "_AllHDU): raise \"Element %d in the HDUList input is not an HDU.\" %", "HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "mode, the pointer is at the end after the open in # Linux,", "nx) nx: number of bits \"\"\" pow2 = [128, 64, 32, 16, 8,", "one, i.e. # input arrays can be just list or tuple, not required", "None, None, None self.header = header self.data = data self._xtn = ' '", "= len(data_output[i]) desp_output[i,1] = _offset _offset += len(data_output[i]) * _nbytes return data_output class", "not we will need # to prepend a default PrimaryHDU to the file", "in the new table fill: if = 1, will fill all cells with", "str(card) + '\\n' return output[:-1] # ----------------------------- HDU classes ------------------------------------ class _AllHDU: \"\"\"Base", "with the data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data, GroupData): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()]", "_ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This class is the base class for the", "equally, beautiful! _func = lambda x: chararray.array(x, itemsize=1) array = _VLF(map(_func, array)) except:", "= tmp out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for i in range(self._nfields):", "return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value): \"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value)", "will be accepted. An attempt to write more data after the stream has", "self.__dict__['_cardimage'] = output def _checkText(self, val): \"\"\"Verify val to be printable ASCII text.\"\"\"", "data portion of the HDU. :Parameters: None :Returns: size : integer The number", "table extension base HDU class.\"\"\" def __init__(self, data=None, header=None, name=None): \"\"\" header: header", "or if it has the proper value. \"\"\" hdr = self[0].header if hdr.has_key('extend'):", "XTENSION to accomodate Extension # and Corrupted cases del self['SIMPLE'] del self['XTENSION'] del", "is not contiguous.' # the offset needs to multiply the length of all", "useblanks=useblanks) elif after != None: loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self,", "renamed file\", oldName # reset the resize attributes after updating self._resize = 0", "specified in the header, the stream is padded to fill a complete FITS", "= 0 else: size = len(tmp._arrays[i]) n = min(size, nrows) if fill: n", "-32, -64]\" # Verify location and value of mandatory keywords. # Do the", "f._convert = copy.deepcopy(self._convert) return f def _clone(self, shape): \"\"\"Overload this to make mask", "the case of reading from a # FITS file) self.data return new_table(self.columns, header=self.header,", "Python might evaluate them as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE", "XTENSION' for i in range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key", "except ValueError: print 'Warning: Required keywords missing when trying to read HDU #%d.\\n", "meaning the keyword is undefined. The comment field will # return a match", "already in memory else: self.data = raw_data if self._bscale != 1: num.multiply(self.data, self._bscale,", "slice %s, stop < start.' % input _step = input.step if _step is", "(logical) column if self._coldefs._recformats[indx]._dtype is _booltype: for i in range(len(self._parent)): dummy[i] = num.equal(dummy[i],", "0, the card will be appended at the end, even if there are", "keywords to agree with the data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data, GroupData):", "def __sub__(self, other): if not isinstance(other, (list, tuple)): other = [other] _other =", "each field for _card in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword = _key.group('label')", "if (bscale != 1 or bzero !=0): _scale = bscale _zero = bzero", "elif isinstance(self.value, str): if self.value == '': valStr = \"''\" else: _expValStr =", "verbose messages? default = 0. \"\"\" # Get the name of the current", "% (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount = '' return \"%-10s %-11s %5d %-12s %s%s\"", "hdu.header['PCOUNT'] = _pcount # update TFORM for variable length columns for i in", "self.header['TFIELDS'] _format = '[' for j in range(_ncols): _format += self.header['TFORM'+`j+1`] + ',", "# Use lists, instead of dictionaries so the names can be displayed in", "to CONTINUE' if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys: if not (newkey", "HIERARCH cards if isinstance(self, _Hierarch): valStr = valStr.strip() # comment string if keyStr.strip()", "int(mo.group(1)) else: extver = 1 return name, extver def _getsize(self, block): \"\"\"Get the", "= tbtype if isinstance(input, ColDefs): self.data = [col.copy() for col in input.data] #", "other than FITS, the close() call can also close the mm object. try:", "' _format = _format[:-2] + ']' _dims = \"%dR x %dC\" % (_nrows,", "headstr = \"%-8s= \" % self.key else: headstr = \"CONTINUE \" valstr =", "file\", oldName # reopen the renamed new file with \"update\" mode os.rename(_name, oldName)", "if hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append HDU\", hdu.name, _extver hdu._new = 0", "no width, add one if tbtype == 'TableHDU': for i in range(len(self)): (type,", "list of all keyword-value pairs from the CardList.\"\"\" pairs = [] for card", "_str = 'a' in self._coldefs.formats[indx] _bool = self._coldefs._recformats[indx][-2:] == _booltype else: _str =", "print 'Warning: Required keywords missing when trying to read HDU #%d.\\n There may", "= self.ascard[j].comment self.ascard[j] = Card(key, value, _comment) elif before != None or after", "in _keyNames): col = eval(_key.group('num')) if col <= _nfields and col > 0:", "_val[-1] == '&': _val = _val[:-1] longstring = longstring + _val elif name", "_FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a table", "self.field(parName) else: indx = self._unique[parName.lower()] if len(indx) == 1: result = self.field(indx[0]) #", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "1]: array /= bscale self.array = array def __repr__(self): text = '' for", "useblanks=useblanks) def insert(self, pos, card, useblanks=1): \"\"\"Insert a Card to the CardList. pos:", "for i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is", "'%s' already exist.\" % name # make sure the EXTEND keyword is there", "which contains the definition of one column, e.g. ttype, tform, etc. and the", "ColDefs): if input._tbtype == tbtype: tmp = hdu.columns = input else: raise ValueError,", "pyfits.Header() for all the cards you need in the header: header.update(key,value,comment) shdu =", "self.__dict__['_fixable'] = 1 if option == 'ignore': return elif option == 'parse': #", "setpar(self, fieldName, value): \"\"\"Set the group parameter value.\"\"\" self.array[self.row:self.row+1].setpar(fieldName, value) class _TableBaseHDU(_ExtensionHDU): \"\"\"FITS", "'END' not allowed\" self._checkKey(val) else: if val[:8].upper() == 'HIERARCH': val = val[8:].strip() self.__class__", "elif dtype+option in _rec2fits.keys(): # record format _repeat = '' if repeat !=", "filename: string @param filename: name of the new FITS file to write to", "is undefined. The comment field will # return a match if the comment", "# no equal sign for commentary cards (i.e. part of the string value)", "\"\"\"Write *one* FITS HDU. Must seek to the correct location before calling this", "in _commonNames: attr = getattr(self, cname+'s') del attr[indx] del self._arrays[indx] self._nfields -= 1", "isinstance(key, (int, long)): return x else: return ColDefs(x) def __len__(self): return len(self.data) def", "ValueError, 'Illegal format `%s` for ASCII table.' % input_format return (dtype, width) def", "= self.comment commfmt = \"%-s\" if not comm == '': nlines = len(comm)", "blank cards directly before END, it will use this space first, instead of", "not support CONTINUE for HIERARCH if len(keyStr + eqStr + valStr) > Card.length:", "If it does not exist, a new card will be created and it", "for all HDUs which are not corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size (in", "while Column has .name), Each attribute in ColDefs is a list of corresponding", "so as not to confuse the indexing. _list = [] for i in", "elif not isinstance(ext[0], (int, long, str, tuple)): raise KeyError, 'Input argument has wrong", "the same CONTINUE card else: _start = _where + 1 if _keyList[_start:].count('CONTINUE') ==", "the stream has been filled will raise an IOError exception. If the dtype", "a slice, do this because Record has no __getstate__. # also more efficient.", "False) hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None): \"\"\"Append the header/data to FITS file", "card and use its methods. _card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and", "setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append one Column to the column definition.\"\"\"", "self.__dict__['value'] = _tmp.strip() elif input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table,", "file '%s'.\" % name os.remove(name) else: raise IOError, \"File '%s' already exist.\" %", "no way to communicate back to the _keylist. self._checkKey(self.key) # verify the value,", "'ASCII table extension' ''' def format(self): strfmt, strlen = '', 0 for j", "= 0 elif indx < 0: indx += npts elif indx > npts:", "(verbose): try: _extver = `hdu.header['extver']` except: _extver = '' if hdu.header._mod or hdu.header.ascard._mod:", "str): raise ValueError, 'Value in a commentary card must be a string' else:", "Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used for all docstrings in this module.", "\"\"\"Exatrct the keyword value or comment from the card image.\"\"\" longstring = ''", "as in update()] \"\"\" self._add_commentary('comment', value, before=before, after=after) def add_blank(self, value='', before=None, after=None):", "the value of the keywod EXTNAME, default=None. \"\"\" # no need to run", "shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct a StreamingHDU object given a", "to use with urlretrieve to allow IOError exceptions to be raised when a", "Card._commentaryKeys or oldkey in Card._commentaryKeys: if not (newkey in Card._commentaryKeys and oldkey in", "self._parent.field(indx)[:] = dummy del dummy # ASCII table does not have Boolean type", "\"\"\"Write FITS HDU data part.\"\"\" self.__file.flush() loc = self.__file.tell() _size = 0 if", "@type filename: string @param filename: name of the new FITS file to write", "naxis > len(key): key = key + (slice(None),) * (naxis-len(key)) offset = 0", "be numarray or table data.' else: hdu=header._hdutype(data=data, header=header) return hdu def writeto(filename, data,", "fixable=self._fixable)) return _err class _Hierarch(Card): \"\"\"Cards begins with HIERARCH which allows keyword name", "self.unit: result += _INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result += _dummy element +=", "\"\"\"Cards begins with HIERARCH which allows keyword name longer than 8 characters. \"\"\"", "increase (default). When useblanks == 0, the card will be appended at the", "a Primary header, the header will be modified to an image extension header", "object.\"\"\" if name == '_cardimage': self.ascardimage() elif name == 'key': self._extractKey() elif name", "A list of Cards, default=[]. \"\"\" # decide which kind of header it", "self+column def del_col(self, col_name): \"\"\"Delete (the definition of) one Column.\"\"\" indx = _get_index(self.names,", "self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\" Return the size (in", "sure option is integer else: _repeat = '' if repeat != 1: _repeat", "dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return self._convert[indx] if _str: return self._parent.field(indx)", "skip NAXIS1. if naxis > 1: size = 1 for j in range(1,", "1: try: del self.ascard[key] self._mod = 1 except: return # for integer key", "and the data is not written. Once sufficient data has been written to", "# FSC commentary card string which must contain printable ASCII characters. _ASCII_text =", "col in self.data] elif name == '_recformats': if self._tbtype == 'BinTableHDU': attr =", "mo: extver = int(mo.group(1)) else: extver = 1 return name, extver def _getsize(self,", "if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS'] for i in range(_naxis): del", "header else: # construct a list of cards of minimal header _list =", "we will need # to prepend a default PrimaryHDU to the file before", "for i in range(len(self)): if self[i].data is not None: continue def update_tbhdu(self): \"\"\"Update", "self.field(indx[0])[:] = value # if more than one group parameter have the same", "two or more attribute names, they must be separated by comma(s). \"\"\" if", "methods to change # the content of header without being able to pass", "self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip': #", "+ _padLength(_bytes) if _bytes != hdu._datSpan: self._resize = 1 if verbose: print \"One", "required Card.\"\"\" \"\"\"If pos = None, it can be anywhere. If the card", "unit=\"Element\"): list.__init__(self, val) self.unit = unit def __str__(self, tab=0): \"\"\"Print out nested structure", "defined (in the case of reading from a # FITS file) self.data return", "output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift the unused bits num.lshift(output[...,i], unused, output[...,i]) def", "value %s' % str(val) self.__dict__['value'] = val def _setcomment(self, val): \"\"\"Set the comment", "_keyList.append(_key) # Deal with CONTINUE cards # if a long string has CONTINUE", "group parameter values.\"\"\" if isinstance(parName, (int, long)): self.field(parName)[:] = value else: indx =", "None: self._keylist = [k.upper() for k in self.keys()] else: self._keylist = keylist #", "with SIMPLE or XTENSION' for i in range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length])", "\"\"\" # no need to run _ExtensionHDU.__init__ since it is not doing anything.", "= lambda x: chararray.array(x, itemsize=1) array = _VLF(map(_func, array)) except: raise ValueError, \"Inconsistent", "% str(card) def _pos_insert(self, card, before, after, useblanks=1): \"\"\"Insert a Card to the", "examples: No extra arguments implies the primary header >>> getdata('in.fits') By extension number:", "_out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array = _out # make", "checking since bool is also int elif isinstance(self.value , bool): valStr = '%20s'", "None: pass else: raise TypeError, \"table data has incorrect type\" # set extension", "go through the list twice, first time print out all top level messages", "Make a \"copy\" (not just a view) of the input header, since it", "and the provided header is not a Primary header, a default Primary HDU", "implemented for mode `%s`.\" % mode else: if os.path.splitext(self.name)[1] == '.gz': # Handle", "exec(fix) #if option != 'silentfix': _text += ' ' + fix_text return _text", "because the # result is not allowed to expand (as C/Python does). for", "try: self.mmobject.close() except: pass def info(self): \"\"\"Summarize the info of the HDU's in", "= _key[8:].strip() _keylist = self._keylist if backward: _keylist = self._keylist[:] # make a", "'data' or 'columns' attribute. The data of random group FITS file will be", "= hdu.columns # populate data to the new table for i in range(len(tmp)):", "code = _ImageBaseHDU.NumCode[self.header['BITPIX']] if self._ffile.memmap: _mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims)", "key elif isinstance(key, tuple): _key = key[0] _ver = key[1] else: _key =", "new HDU to the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new = 1", "copy(self): \"\"\"Make a copy of the table HDU, both header and data are", "already exist.\" % name # make sure the EXTEND keyword is there if", "= key.strip().upper() if _key[:8] == 'HIERARCH': _key = _key[8:].strip() _keylist = self._keylist if", "list(other.data) else: raise TypeError, 'Wrong type of input' if option == 'left': tmp", "for name in ['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return self", "not corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size (in bytes) of the data portion", "def __init__(self, input, row=0): rec.Record.__init__(self, input, row) def par(self, fieldName): \"\"\"Get the group", "self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype", "'data': self.__dict__[attr] = self.field('data') elif attr == '_unique': _unique = {} for i", "0 hdu.header.ascard._mod = 0 if singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self):", "generated by verifications at different class levels. \"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self,", "image for fixable non-standard compliance.\"\"\" _valStr = None # for the unparsable case", "compliance.\"\"\" _valStr = None # for the unparsable case if input is None:", "not rename to CONTINUE' if newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys: if", "'%s'.\" % (keywd, val) fix_text = \"Fixed by setting a new value '%s'.\"", "from a table parent data, just pass it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i],", "num.around(dummy) self._parent.field(indx)[:] = dummy del dummy # ASCII table does not have Boolean", "= self._unique[parName.lower()] if len(indx) == 1: result = self.field(indx[0]) # if more than", ": _after = 'naxis'+`j` self.header.update('naxis'+`j+1`, axes[j], after = _after) # delete extra NAXISi's", "!= '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname == 'format' and isinstance(self, BinTableHDU): val", "'Redundant/conflicting keyword argument(s): %s' % ext2 if isinstance(ext1[0], str): if n_ext2 == 1", "AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL AURA", "elif isinstance(self.value , bool): valStr = '%20s' % `self.value`[0] elif isinstance(self.value , (int,", "# make sure option is integer else: _repeat = '' if repeat !=", "new item has consistent data type to avoid misalignment. \"\"\" if isinstance(value, num.NumArray)", "key: the keyword name (a string) or the index (an integer). backward: search", "%s not defined.' % name self.__dict__[name] = attr return self.__dict__[name] \"\"\" # make", "_padLength(stringLen): \"\"\"Bytes needed to pad the input stringLen to the next FITS block.\"\"\"", "the keyword value or comment from the card image.\"\"\" longstring = '' ncards", "data required to fill the stream per the header provided in the constructor.", "'TUNIT', 'TNULL', 'TSCAL', 'TZERO', 'TDISP', 'TBCOL', 'TDIM'] # mapping from TFORM data type", "self._extractKey() elif name in ['value', 'comment']: self._extractValueComment(name) else: raise AttributeError, name return getattr(self,", "None, 0] # ensure bscale/bzero are numbers if not _scale: bscale = 1", "final offset will be calculated when the file is written. input: input object", "is None: raise IndexError, 'No data in this HDU.' if _gethdr: _hdr =", "= self.header['NAXIS'] + 3 # if the card EXTEND exists, must be after", "of the FITS file to be opened. mode: Open mode, 'readonly' (default), 'update',", "NDArray if format is not None: # check format try: # legit FITS", "= PrimaryHDU else: self._hdutype = _ValidHDU elif cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip()", "output._itemsize # write out the heap of variable length array columns # this", "1 else: groups = 0 size = 1 for j in range(groups,naxis): size", "self.data and update the keywords of BSCALE and BZERO in self.header. This method", "name or index. \"\"\" if before != None: loc = self.index_of(before) self.insert(loc, card,", "self._valueModified: _tmp = '(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) + ')'", "array array._dtype = recfmt._dtype else: raise ValueError, \"Data is inconsistent with the format", "values. bscale/bzero: user specified BSCALE and BZERO values. \"\"\" if self.data is None:", "function will return a (data, header) tuple. \"\"\" if 'header' in extkeys: _gethdr", "`key` return indx def _unwrapx(input, output, nx): \"\"\"Unwrap the X format column into", "option, _err) self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err) self.req_cards('GROUPS', _pos, 'val == True',", "written to. output_verify: output verification option, default='exception'. clobber: Overwrite the output file if", "in _keyNames): _list.append(i) for i in _list: del self.header.ascard[i] del _list # populate", "dim = self.header['NAXIS'] if dim == 0: dim = '' else: dim =", "truncated.' hdu._ffile = self return hdu def writeHDU(self, hdu): \"\"\"Write *one* FITS HDU.", "not self.header.has_key('SIMPLE'): hdulist = HDUList([PrimaryHDU()]) hdulist.writeto(name, 'exception') else: if self.header.has_key('SIMPLE') and os.path.getsize(name) >", "hdr['NAXIS2'] # go through header keywords to pick out column definition keywords dict", "reading and writing Flexible Image Transport System (FITS) files. This file format was", "raise IndexError, 'Illegal slice %s, step must be positive.' % input else: raise", "1 for j in range(naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX']", "= eval(_parse[1]) # if the card does not exist if _index is None:", "writing Flexible Image Transport System (FITS) files. This file format was endorsed by", "val) _append(Card(keyword, val)) def copy(self): \"\"\"Make a copy of the table HDU, both", "def _checkKey(self, val): \"\"\"Verify the keyword to be FITS standard.\"\"\" # use repr", "slashLoc = _tmp.index(\"/\") self.__dict__['value'] = _tmp[:slashLoc].strip() self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip()", "None: # Make a \"copy\" (not just a view) of the input header,", "') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile(", "the HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS", "os.path.splitext(self.name)[1] == '.gz': # Handle gzip files if mode in ['update', 'append']: raise", "=\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)')", "at the beginning of the file and the provided header will be added", "hdu.name == '': hdu.name, hdu._extver = hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver =", "= len(input) # check for one word longer than strlen, break in the", "integer, it is the index in the list. If string, (a) Field (column)", "val): \"\"\"Set the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if val is not", "1 else: raise SyntaxError, \"%s is not a Card\" % str(value) def __delitem__(self,", "and if there are blank cards directly before END, it will use this", "!= _type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS primary HDU class.\"\"\"", "for one field. indx is the index of the field. \"\"\" if self._coldefs._tbtype", "(max - min) / (2.**(8*_type.bytes) - 2) # Do the scaling if _zero", "re_extname.search(self._raw) if mo: name = mo.group(1).rstrip() else: name = '' mo = re_extver.search(self._raw)", "number of bits \"\"\" pow2 = [128, 64, 32, 16, 8, 4, 2,", "= _normalize(_start, naxis) else: raise IndexError, 'Illegal slice %s, start must be integer.'", "= maketrans('dD', 'eE') class Card(_Verify): # string length of a card length =", "is no guarantee # the elements in the object array are consistent. if", "array. self._parent = input self._convert = [None]*self._nfields self.names = self._names def copy(self): r", ">>> update(file, dat, 3) # update the 3rd extension >>> update(file, dat, hdr,", "CONTINUE card else: _start = _where + 1 if _keyList[_start:].count('CONTINUE') == 0: break", "(name is None) and self.header.has_key('EXTNAME'): name = self.header['EXTNAME'] self.name = name def _verify(self,", "in range(npars): (_scale, _zero) = self._get_scale_factors(i)[3:5] if _scale or _zero: self._convert[i] = pardata[i]", "insert(self, pos, card, useblanks=1): \"\"\"Insert a Card to the CardList. pos: The position", "c in _cardList[_where:_where+nc]: _longstring += c._cardimage _cardList[_where-1] = _Card_with_continue().fromstring(_longstring) del _cardList[_where:_where+nc] del _keyList[_where:_where+nc]", "an extension of a FITS file (and optionally the header). @type filename: string", "chararray.CharArray): # only swap unswapped # deal with var length table if isinstance(coldata,", "a Card from the CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] #", "if the file already exists. If it does not, check to see #", "conversion for both ASCII and binary tables if _number or _str: if _number", "in self: if not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" % item # second", "copy, and keep it unchanged else: self.header = header else: # construct a", "an even number of # quotes to be precise. # # Note that", "def _setup(self): \"\"\" Initialize all attributes to be a list of null strings.\"\"\"", "== 'UInt8': # UInt8 case _zero = min _scale = (max - min)", "'update':'rb+', 'append':'ab+'} # open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True", "firstval = self._xtn else: firstkey = 'SIMPLE' firstval = True self.req_cards(firstkey, '== 0',", "'Illegal slice %s, step must be integer.' % input return slice(_start, _stop, _step)", "a file. name: output FITS file name to be written to. output_verify: output", "coldata2._byteorder = 'big' # In case the FITS_rec was created in a LittleEndian", "byte location of data block in file (None) \"\"\" # mappings between FITS", "ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None): self._file,", "!=0): _scale = bscale _zero = bzero else: if option == 'old': _scale", "In future it may be possible to decipher where the last block of", "\"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if naxis > 0: simple", "= 'exception'. verbose: print out verbose messages? default = 0. This simply calls", "elif isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i)", "_comment = comment else: _comment = self.ascard[j].comment self.ascard[j] = Card(key, value, _comment) elif", "method of the _File class. It has this two-tier calls because _File has", "after != None: loc = self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos, card,", "!= None: # Check for numbers with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt", "= r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC _numr_NFSC = r'[+-]?", "block and the comment string in another. Also, it does not break at", "does not exist.\" % key else: # multiple match raise NameError, \"Ambiguous key", "to the next FITS block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete", "update TFORM for variable length columns for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs.formats[i], _FormatP):", "\"\"\"Append the header/data to FITS file if filename exists, create if not. If", "= _fits2rec[dtype]+`repeat` # to accomodate both the ASCII table and binary table column", "'sci') >>> getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs are not case sensitive By", "range(_ncols): _format += self.header['TFORM'+`j+1`] + ', ' _format = _format[:-2] + ']' _dims", "of them may not exist for name in ['key', 'value', 'comment', '_valueModified']: if", "valueStr += \".0\" return valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed", "file format was endorsed by the International Astronomical Union in 1999 and mandated", "'append']: raise \"Writing to zipped fits files is not supported\" zfile = zipfile.ZipFile(self.name)", "else: del self.ascard[key] self._mod = 1 def __str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns", "ord('F'), ord('T'), _out) array = _out # make a copy if scaled, so", "' '*_lead + _pc + _format[1:] + _dict[_format[0]] + ' '*_trail # not", "or self._bscale != 1): if _bitpix > 0: # scale integers to Float32", "= copy.deepcopy(self._convert) return f def _clone(self, shape): \"\"\"Overload this to make mask array", "a minimal Header will be provided. name: The name of the HDU, will", "', itemsize=1) # locations of the blanks blank_loc = num.nonzero(arr == ' ')[0]", "== 'TABLE': self._hdutype = TableHDU elif xtension == 'IMAGE': self._hdutype = ImageHDU elif", "so in the example in (a), field('abc') will get the first field, and", "the header. oldkey: old keyword, can be a name or index. newkey: new", "(pcount + size) / 8 return size def _verify(self, option='warn'): _err = PrimaryHDU._verify(self,", "attr = map(lambda y: 'a'+`y`, dummy) elif name == 'spans': # make sure", "an IOError exception. If the dtype of the input data does not match", "break hdu._raw += block _size, hdu.name = hdu._getsize(hdu._raw) # get extname and extver", "HDU to the HDUList, indexed by number or name.\"\"\" _key = self.index_of(key) if", "def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec to record format spec. \"\"\" ascii2rec", "mappings between FITS and numarray typecodes NumCode = {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32',", "'%-8s' % self.key else: keyStr = ' '*8 # value string # check", "bscale if _zero: _arr += bzero hdu.data._convert[i][:n] = _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n]", "trying to read HDU #%d.\\n There may be extra bytes after the last", "= 80 # String for a FITS standard compliant (FSC) keyword. _keywd_FSC =", "to fill a complete FITS block and no more data will be accepted.", "software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY AURA ``AS", "indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError,", "indx[1:]: result += self.field(i) return result def setpar(self, parName, value): \"\"\"Set the group", "None: _start = 0 elif isinstance(_start, (int, long)): _start = _normalize(_start, naxis) else:", "a template), default=None. If header=None, a minimal Header will be provided. name: The", "input is a list of Columns tmp = hdu.columns = ColDefs(input, tbtype) #", "break # combine contiguous CONTINUE cards with its parent card if nc >", "= 0 for i in range(_max): _where = _keyList[_start:].index('CONTINUE') + _start for nc", "item else: if not isinstance(hdu, _AllHDU): raise ValueError, \"%s is not an HDU.\"", "newkey in Card._commentaryKeys or oldkey in Card._commentaryKeys: if not (newkey in Card._commentaryKeys and", "new value '%s'.\" % fix_value if fixable: fix = \"self.header['%s'] = %s\" %", "naxis > 1: size = 1 for j in range(1, naxis): size =", "in range(len(self)): val = getattr(self[i], cname) if val != None: attr[i] = val", "bytes of data required to fill the stream per the header provided in", "# for commentary cards, no need to parse further if self.key in Card._commentaryKeys:", "attr = [_convert_format(fmt) for fmt in self.formats] elif self._tbtype == 'TableHDU': self._Formats =", "write out the heap of variable length array columns # this has to", "import numarray.strings as chararray import numarray.records as rec import numarray.objects as objects import", "self.header['NAXIS'] = len(axes) # add NAXISi if it does not exist for j", "PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'): _err = _ErrList([],", "ValueError, self._err_text, '\\n%s' % self._cardimage elif option in ['fix', 'silentfix']: result = self._check('parse')", "num.choose(self._convert[indx], (ord('F'),ord('T'))) class GroupData(FITS_rec): \"\"\"Random groups data object. Allows structured access to FITS", "axes.reverse() return tuple(axes) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name", "return slice(_start, _stop, _step) class _KeyType: def __init__(self, npts, offset): self.npts = npts", "bzero) = hdu.data._get_scale_factors(i)[3:] if n > 0: if isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] ==", "class.\"\"\" \"\"\"Attributes: header: image header data: image data _file: file associated with array", "329 2007-07-06 13:11:54Z jtaylor2 $ \"\"\" A module for reading and writing FITS", "attr == 'data': self.__dict__[attr] = None if self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX']", "% (self.name, \"CorruptedHDU\") def verify(self): pass class _ValidHDU(_AllHDU, _Verify): \"\"\"Base class for all", "= self.formats if len(self) == 1: dummy = [] else: dummy = map(lambda", "else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal index %s' % indx def", "nrows = dim if tbtype == 'TableHDU': _formats = '' _itemsize = 0", "stream. Notes ----- Only the amount of data specified in the header provided", "for i in range(hdu.data._nfields): coldata = hdu.data.field(i) coldata2 = hdu.data._parent.field(i) if not isinstance(coldata,", "'' if hdu.header._mod or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update header in", "= self._cardimage[:_limit].index(\"=\") except: eqLoc = None return eqLoc def _getKeyString(self): \"\"\"Locate the equal", "'Output verification result:' print x if _option == 'exception' and x: raise VerifyError", "hdu.name == 'PRIMARY': hdu._extver = 1 hdu._file = self.__file hdu._hdrLoc = _hdrLoc #", "oldkey in Card._commentaryKeys): raise ValueError, 'Regular and commentary keys can not be renamed", "two pieces. But if there is one single word which is longer than", "def _verify(self, option='warn'): \"\"\"Card class verification method.\"\"\" _err = _ErrList([]) try: self._check(option) except:", "a list of Columns tmp = hdu.columns = ColDefs(input, tbtype) # read the", "_nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable length field object.\"\"\" def __init__(self, input): \"\"\"", "(new) card image from the attributes: key, value, and comment, or from raw", "del extkeys['header'] else: _gethdr = False hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys)", "_unwrapx(input, output, nx): \"\"\"Unwrap the X format column into a Boolean array. input:", "getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci', 2)) # equivalent Ambiguous or", "tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a table parent data,", "= _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr'))", "slice of HDUs from the HDUList, indexed by number only.\"\"\" super(HDUList, self).__delslice__(i, j)", "' if len(blocks)%_blockLen != 0: raise IOError self.__file.flush() loc = self.__file.tell() self.__file.write(blocks) #", "'f4' _formats = (_fmt+',') * npars data_fmt = '%s%s' % (`input.shape[1:]`, _fmt) _formats", "_zero: self._convert[npars] = input else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def __getattr__(self, attr):", "elif cards[0].value == True: self._hdutype = PrimaryHDU else: self._hdutype = _ValidHDU elif cards[0].key", "a copy _keylist.reverse() try: _indx = _keylist.index(_key) if backward: _indx = len(_keylist) -", "\"\"\" if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist =", "fields') ]) if header is not None: # Make a \"copy\" (not just", "of a scaled column may have wrong byteorder if coldata2._byteorder != 'big': coldata2.byteswap()", "= open(name, mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0):", "elif isinstance(self.value, float): if self._valueModified: valStr = '%20s' % _floatFormat(self.value) else: valStr =", "invalid value '%s'.\" % (keywd, val) fix_text = \"Fixed by setting a new", "== 0: return input else: return input + ' ' * (Card.length-strlen) #", "range.\"\"\" def _normalize(indx, npts): if indx < -npts: indx = 0 elif indx", "case that the starting column of # a field may not be the", "_end - last_end last_end = _end self._Formats = self.formats self._arrays[i] = input[i].array \"\"\"", "_hduList = open(_name, mode=\"append\") if (verbose): print \"open a temp file\", _name for", "_shape = () _nrows = self.header['NAXIS2'] _ncols = self.header['TFIELDS'] _format = '[' for", "= _val if '_valuestring' not in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not", "file. If the file does not already exist, it will be created and", "(i.e. part of the string value) _key = self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys:", "delattr(self, name) return self def _ncards(self): return len(self._cardimage) / Card.length def _verify(self, option='warn'):", "match if the comment separator is found, though the # comment maybe an", "CardList([ c0, Card('BITPIX', 8, 'array data type'), Card('NAXIS', 0, 'number of array dimensions'),", "1 except: return 0 def rename_key(self, oldkey, newkey, force=0): \"\"\"Rename a card's keyword", "and extver from the header.\"\"\" re_extname = re.compile(r\"EXTNAME\\s*=\\s*'([ -&(-~]*)'\") re_extver = re.compile(r\"EXTVER\\s*=\\s*(\\d+)\") mo", "list. If string, (a) Field (column) names are case sensitive: you can have", "and commentary keys can not be renamed to each other.' elif (force ==", "attrs in dict] self.data = tmp else: raise TypeError, \"input to ColDefs must", "the location and the size of the data area return loc, _size+_padLength(_size) def", "HDU.\" % hdu try: super(HDUList, self).__setitem__(_key, hdu) except IndexError: raise IndexError, 'Extension %s", "corrupted.' % (len(hduList)+1) break # initialize/reset attributes to be used in \"update/append\" mode", "level messages for item in self: if not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\"", "E-009 vs. E-09) elif isinstance(self.value, float): if self._valueModified: valStr = '%20s' % _floatFormat(self.value)", "hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero = self.header.get('BZERO', 0) self._bscale = self.header.get('BSCALE', 1)", "int, long, float, complex, bool, Undefined)): if isinstance(val, str): self._checkText(val) self.__dict__['_valueModified'] = 1", "_start = 8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def _getValueCommentString(self): \"\"\"Locate the equal", "names can be displayed in a # preferred order. _commonNames = ['name', 'format',", "' = ' + `value` + '\\n' return text[:-1] def copy(self): tmp =", "_list = [] for i in range(len(self.header.ascard)-1,-1,-1): _card = self.header.ascard[i] _key = _tdef_re.match(_card.key)", "_keywd_FSC_RE = re.compile(_keywd_FSC) # A number sub-string, either an integer or a float", "= '%-' else: _pc = '%' _fmt = ' '*_lead + _pc +", "reset the modification attributes after updating for hdu in self: hdu.header._mod = 0", "= HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def", "print out all attributes. It forgives plurals and blanks. If there are two", "# class to the urllibrary urllib._urlopener.tempcache = {} # Initialize tempcache with an", "of the Header.\"\"\" tmp = Header(self.ascard.copy()) # also copy the class tmp._hdutype =", "\"\"\"Verification errors list class. It has a nested list structure constructed by error", "= err_text if not fixable: option = 'unfixable' if option in ['warn', 'exception']:", "the slice's start/stop in the regular range.\"\"\" def _normalize(indx, npts): if indx <", "of header without being able to pass it to the header object hduList._resize", "will need # to prepend a default PrimaryHDU to the file before writing", "fix_value if fixable: fix = \"self.header['%s'] = %s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text,", "after the \"regular\" data is written (above) _where = self.__file.tell() if isinstance(hdu, BinTableHDU):", "_numr_FSC + ') *\\))' r')? *)' r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE", "END card self._blanks = 0 self.count_blanks() def __getitem__(self, key): \"\"\"Get a Card by", "= \"val in [8, 16, 32, 64, -32, -64]\" # Verify location and", "KeyError, 'data must be numarray or table data.' else: hdu=header._hdutype(data=data, header=header) return hdu", "else: raise ValueError, 'column definitions have a different table type' elif isinstance(input, FITS_rec):", "0) else: if indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start)", "to find the # end of a string rather well, but will accept", "return loc def writeHDUdata(self, hdu): \"\"\"Write FITS HDU data part.\"\"\" self.__file.flush() loc =", "# make sure the EXTEND keyword is there if there is extension if", "data=None, header=None): self._file, self._datLoc = None, None if header is not None: if", "(to avoid infinite loop), # fix it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return", "of shape (s, nbytes) nx: number of bits \"\"\" output[...] = 0 #", "verbose: print out verbose messages? default = 0. This simply calls the close", "'Illegal key data type %s' % type(key) def copy(self): \"\"\"Make a (deep)copy of", "hdu.data output.tofile(self.__file) _size = output.nelements() * output._itemsize # write out the heap of", "EXTEND keyword is there if there is extension if len(self) > 1: self.update_extend()", "# XXX need to consider platform dependence of the format (e.g. E-009 vs.", "self._mod = 1 def __str__(self): return self.ascard.__str__() def ascardlist(self): \"\"\"Returns a CardList.\"\"\" return", "'append': for hdu in self: if (verbose): try: _extver = `hdu.header['extver']` except: _extver", "!= (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose: print \"One or more header is", "or hdu.header.ascard._mod: hdu._file.seek(hdu._hdrLoc) self.__file.writeHDUheader(hdu) if (verbose): print \"update header in place: Name =\",", "in Card._commentaryKeys and self._cardimage.find('=') != 8: if option in ['exception', 'warn']: self.__dict__['_err_text'] =", "**extkeys): \"\"\"Update the specified extension with the input data/header. @type filename: string @param", "else: commentStr = '' # equal sign string eqStr = '= ' if", "the case where \"=\" is before column 9, # since there is no", "= arr._shape[0] else: dim = 0 if dim > nrows: nrows = dim", "npars data_fmt = '%s%s' % (`input.shape[1:]`, _fmt) _formats += data_fmt gcount = input.shape[0]", "place: Name =\", hdu.name, _extver # reset the modification attributes after updating for", "(self, key, value): \"\"\"Set a header keyword value.\"\"\" self.ascard[key].value = value self._mod =", "_normalize_slice(input, naxis): \"\"\"Set the slice's start/stop in the regular range.\"\"\" def _normalize(indx, npts):", "the skeleton structure of the HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc = self.__file.tell()", "value string # check if both value and _cardimage attributes are missing, #", "1 except: return # for integer key only delete once else: del self.ascard[key]", "the value elif option == 'unfixable': _text = \"Unfixable error: %s\" % _text", "card will be appended after the last non-commentary card. If =1, the card", "KeyError: raise AttributeError(attr) def _dimShape(self): \"\"\"Returns a tuple of image dimensions, reverse the", "will view the same scaled columns as # the original dummy = self.field(i)", "result is not allowed to expand (as C/Python does). for i in range(len(dummy)):", "EXTVER does not exist, default it to 1 _extver = self[j]._extver if _ver", "= ext2.keys() # parse the extension spec if n_ext1 > 2: raise ValueError,", "None: raise ValueError, \"No header to setup HDU.\" # if the file is", "# Check if the file already exists. If it does not, check to", "\"\"\"If pos = None, it can be anywhere. If the card does not", "the new file @type header: L{Header} object or None @param header: the header", "ncards = self._ncards() for i in range(ncards): # take each 80-char card as", "_zero = bzero else: if option == 'old': _scale = self._bscale _zero =", "= longstring.rstrip() def _breakup_strings(self): \"\"\"Break up long string value/comment into CONTINUE cards. This", "= {8:'UInt8', 16:'Int16', 32:'Int32', 64:'Int64', -32:'Float32', -64:'Float64'} ImgCode = {'UInt8':8, 'Int16':16, 'Int32':32, 'Int64':64,", "template), default=None. If header=None, a minimal Header will be provided. \"\"\" _ImageBaseHDU.__init__(self, data=data,", "0, option, _err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\" _dict", "No extra arguments implies the primary header >>> getdata('in.fits') By extension number: >>>", "raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 0: if", "== 1): return _WholeLine(naxis, 0) else: if indx.step == 1: return _LineSlice(indx.stop-indx.start, indx.start)", "\"\"\" # instantiate a FITS file object (ffo) ffo = _File(name, mode=mode, memmap=memmap)", "is None: _stop = naxis elif isinstance(_stop, (int, long)): _stop = _normalize(_stop, naxis)", "not isinstance(_card.value, str): raise ValueError, 'Cards with CONTINUE must have string value.' if", "of a missing 'END' card, the Header may also contain the binary data(*).", "dims = [0]*naxis for i in range(naxis): mo = re_naxisn.search(block, pos) pos =", "for each field for _card in hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword =", "= self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] # update the keylist self.count_blanks() self._mod =", "(latest) scaled array.\"\"\" _dict = {'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate the", "= self.parnames[i] if _name in _unique: _unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr] =", "are assumed to be the extension specification(s). Header and extension specs can also", "IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "\"\"\"Set the key attribute, surrogate for the __setattr__ key case.\"\"\" if isinstance(val, str):", "extver = 1 return name, extver def _getsize(self, block): \"\"\"Get the size from", "_list = CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8, 'array data type'), Card('NAXIS', 2,", "type, len(self.header.ascard), _dims, _format) def get_coldefs(self): \"\"\"Returns the table's column definitions.\"\"\" return self.columns", "array = array.copy() if bzero not in ['', None, 0]: array += -bzero", "self.comment else: commentStr = '' # equal sign string eqStr = '= '", "self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII table extension' ''' def", "(e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None, use the current data type.", "if isinstance(hdus, _ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise \"Invalid", "- 2) # Do the scaling if _zero != 0: self.data += -_zero", "header=header) return hdu def writeto(filename, data, header=None, **keys): \"\"\"Create a new FITS file", "b = list(other.data) else: raise TypeError, 'Wrong type of input' if option ==", "_err) self.req_cards('GROUPS', _pos, 'val == True', True, option, _err) return _err # --------------------------Table", "self.req_cards('PCOUNT', None, 'val == 0', 0, option, _err) tfields = self.header['TFIELDS'] for i", "self['EXTEND'] del self['PCOUNT'] del self['GCOUNT'] if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU):", "%s, step must be positive.' % input else: raise IndexError, 'Illegal slice %s,", "self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\" Return the size (in bytes) of the", "valu.group('comm') if isinstance(_comm, str): self.__dict__['comment'] = _comm.rstrip() def _fixValue(self, input): \"\"\"Fix the card", "result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None or self.key in Card._commentaryKeys: return", "_FormatX(`(nbytes,)`+'u1') output_format._nx = repeat elif dtype == 'P': output_format = _FormatP('2i4') output_format._dtype =", "+= 'a%d,' % tmp.spans[i] _itemsize += tmp.spans[i] hdu.data = FITS_rec(rec.array(' '*_itemsize*nrows, formats=_formats[:-1], names=tmp.names,", "# ASCII table does not have Boolean type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx],", "size, including padding hdu._datSpan = _size + _padLength(_size) hdu._new = 0 self.__file.seek(hdu._datSpan, 1)", "'data must be numarray or table data.' else: hdu=header._hdutype(data=data, header=header) return hdu def", "+ repr(card) return block def __str__(self): \"\"\"Format a list of cards into a", "\"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose): print \"append HDU\", hdu.name, _extver hdu._new =", "Primary header, the header will be modified to an image extension header and", "__init__(self, npts, offset): self.npts = npts self.offset = offset class _WholeLine(_KeyType): pass class", "first with .verify('fix').\" if valu.group('bool') != None: _val = valu.group('bool')=='T' elif valu.group('strg') !=", "groups = 0 mo = re_naxis.search(block) if mo is not None: naxis =", "return # for integer key only delete once else: del self.ascard[key] self._mod =", "list of null strings.\"\"\" for cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays',", "ASCII and binary tables if _number and (_scale or _zero): # only do", "self.__file.name oldMemmap = self.__file.memmap _name = _tmpName(oldName) _hduList = open(_name, mode=\"append\") if (verbose):", "+ min) / 2. # throw away -2^N _scale = (max - min)", "hdulist, _ext = _getext(filename, 'update', *ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close() def info(filename):", "_dict[self._coldefs._Formats[indx][0]] # if the string = TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy", "columns after this call. The final offset will be calculated when the file", "= self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else: self.__file = __builtin__.open(self.name, _python_mode[mode]) #", "the 3rd extension >>> update(file, dat, 'sci', 2) # update the 2nd SCI", "formats.\"\"\" class_name = str(self.__class__) type = class_name[class_name.rfind('.')+1:] # if data is touched, use", "next card to search # to avoid starting at the same CONTINUE card", "'' hdu.update() return hdu class FITS_rec(rec.RecArray): \"\"\"FITS record array class. FITS record array", "for fixable non-standard compliance.\"\"\" _valStr = None # for the unparsable case if", "_arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n < nrows: if tbtype == 'BinTableHDU':", "fix it first. if self.__dict__.has_key('_cardimage'): self._check(option) self._ascardimage() return self.__dict__['_cardimage'] def _ascardimage(self): \"\"\"Generate a", "only for the first case. bitpix: data type as expressed in FITS BITPIX", "imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ') if imag.group('sign') is not None:", "the real CONTINUE card, skip to the next card to search # to", "_scale = bscale _zero = bzero else: if option == 'old': _scale =", "header, it (and other positional arguments) are assumed to be the extension specification(s).", "in list: if att not in _commonNames: print \"'%s' is not an attribute", "= [col.copy() for col in input] # if the format of an ASCII", "blocks = self._raw if (len(blocks) % _blockLen) != 0: raise IOError, 'Header size", "in valueStr: valueStr += \".0\" return valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass class", "dummy) elif name == 'spans': # make sure to consider the case that", "imag = Card._number_NFSC_RE.match(valu.group('imag')) _idigt = imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') == None: _val", "mode is not supported.\" % self.__file.mode return self.update_tbhdu() self.verify(option=output_verify) if self.__file.mode == 'append':", "if self.data is None: return # Determine the destination (numarray) data type if", "column if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:] = 0 # reset _npts", "the middle if offset <= xoffset: offset = xoffset + strlen # collect", "raise ValueError, \"parameter value must be a sequence with %d arrays/numbers.\" % len(indx)", "self = 0 def _getext(filename, mode, *ext1, **ext2): \"\"\"Open the input file, return", "err_text if not fixable: option = 'unfixable' if option in ['warn', 'exception']: #raise", "self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount", "self.index_of(key) return super(CardList, self).__getitem__(_key) def __getslice__(self, start, end): _cards = super(CardList, self).__getslice__(start,end) result", "TableHDU) \"\"\" # construct a table HDU hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs):", "else: raise ValueError(\"BITPIX not found where expected\") mo = re_gcount.search(block) if mo is", "Columns tmp = hdu.columns = ColDefs(input, tbtype) # read the delayed data for", "'comment': self.__dict__['comment'] = '' if valu is not None: _comm = valu.group('comm') if", "or index default: if no keyword is found, the value to be returned.", "keywords nrows: number of rows in the new table fill: if = 1,", "'Intended keyword %s already exists in header.' % newkey _index = self.ascard.index_of(oldkey) _comment", "= self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if", "keywords. Mark them first, # then delete from the end so as not", "# conserve space for HIERARCH cards if isinstance(self, _Hierarch): valStr = valStr.strip() #", "table column # format spec, i.e. A7 in ASCII table is the same", "if self.data is None: _shape, _format = (), '' else: # the shape", "a copy of the Header.\"\"\" tmp = Header(self.ascard.copy()) # also copy the class", "a header keyword value.\"\"\" return self.ascard[key].value def __setitem__ (self, key, value): \"\"\"Set a", "tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read()) zfile.close() elif os.path.splitext(self.name)[1] == '.zip':", "1 def write(self,data): \"\"\" Write the given data to the stream. :Parameters: data", "len(self._cardimage) > Card.length: self.__class__ = _Card_with_continue # remove the key/value/comment attributes, some of", "valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self,", "else: # floating point cases if self._ffile.memmap: self.data = raw_data.copy() # if not", "away -2^N _scale = (max - min) / (2.**(8*_type.bytes) - 2) # Do", "'length of dimension 1'), Card('NAXIS2', 0, 'length of dimension 2'), Card('PCOUNT', 0, 'number", "None, option, _err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def __init__(self,", "except IndexError: raise IndexError, 'Extension %s is out of bound or not found.'", "_card = Card().fromstring(self._cardimage[i*80:(i+1)*80]) if i > 0 and _card.key != 'CONTINUE': raise ValueError,", "be written to. output_verify: output verification option, default='exception'. clobber: Overwrite the output file", "of conditions and the following disclaimer in the documentation and/or other materials provided", "missing 'END' card, the Header may also contain the binary data(*). (*) In", "in ['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise ValueError, 'Option %s not recognized.' %", "add_history(self, value, before=None, after=None): \"\"\"Add a HISTORY card. value: History text to be", "__getattr__(self, attr): if attr == 'data': self.__dict__[attr] = self.field('data') elif attr == '_unique':", "specified bscale/bzero values. bscale/bzero: user specified BSCALE and BZERO values. \"\"\" if self.data", "FITS Group data in a manner analogous to tables \"\"\" def __init__(self, input=None,", "raise ValueError, 'comment %s is not a string' % val self.__dict__['comment'] = val", "and there # is no comment if self.key in Card._commentaryKeys: if not isinstance(self.value,", "minimum and maximum of the data to scale. The option will be overwritten", "# to avoid infinite loops if not (self.__dict__.has_key('value') or self.__dict__.has_key('_cardimage')): valStr = ''", "view the same scaled columns as # the original dummy = self.field(i) if", "be compliant to FITS standard. key: keyword name, default=''. value: keyword value, default=''.", "j in range(naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount =", "HDU to the HDUList.\"\"\" if isinstance(hdu, _AllHDU): super(HDUList, self).append(hdu) hdu._new = 1 self._resize", "__delitem__(self, key): \"\"\"Delete an HDU from the HDUList, indexed by number or name.\"\"\"", "hdu in self: (hdu._hdrLoc, hdu._datLoc, hdu._datSpan) = _hduList.__file.writeHDU(hdu) _hduList.__file.close() self.__file.close() os.remove(self.__file.name) if (verbose):", "{} # Initialize tempcache with an empty # dictionary to enable file cacheing", "'\\n' return text[:-1] def copy(self): tmp = Column(format='I') # just use a throw-away", "a table HDU hdu = eval(tbtype)(header=header) if isinstance(input, ColDefs): if input._tbtype == tbtype:", "is None: bitpix = _ImageBaseHDU.ImgCode[input.type()] fits_fmt = GroupsHDU._dict[bitpix] # -32 -> 'E' _fmt", "def __coerce__(self, other): pass # needed for __add__ def __add__(self, other, option='left'): if", "1 for n in dims: npt *= n # Now, get the data", "n = 0 (_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n > 0:", "code must retain the above copyright notice, this list of conditions and the", "(as in reading in the FITS file), # it will be constructed from", "_LineSlice(indx.stop-indx.start, indx.start) else: return _SteppedSlice((indx.stop-indx.start)/indx.step, indx.start) else: raise IndexError, 'Illegal index %s' %", "the file is read the first time, no need to copy, and keep", "% repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns the keyword", "self.__dict__[attr] = None if self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self,", "type' elif isinstance(input, FITS_rec): # input is a FITS_rec tmp = hdu.columns =", "== 0, the card will be appended at the end, even if there", "elif _count == 0: raise NameError, \"Key '%s' does not exist.\" % key", "+ hdu._datLoc _data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap - _tbsize", "stop must be integer.' % input if _stop < _start: raise IndexError, 'Illegal", "def __init__(self, data=None, header=None, name=None): \"\"\" header: header to be used data: data", "later stage as CONTINUE cards may span across blocks. \"\"\" if len(block) !=", "int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size = abs(bitpix) * gcount * (pcount +", "for i in range(len(indx)): self.field(indx[i])[:] = value[i] else: raise ValueError, \"parameter value must", "mode='update') f.append(hdu) f.close() def update(filename, data, *ext, **extkeys): \"\"\"Update the specified extension with", "message only if there is something if _dummy.strip(): if self.unit: result += _INDENT*tab+\"%s", "# so the checking is in order, in case of required cards in", "unique): >>> getdata('in.fits', 'sci') >>> getdata('in.fits', extname='sci') # equivalent Note EXTNAMEs are not", "self.__dict__['_valueModified'] = 1 else: raise ValueError, 'Illegal value %s' % str(val) self.__dict__['value'] =", "physical file associated with the HDUList. Default = None. \"\"\" self.__file = file", "HDUList, indexed by number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize = 1 def _verify", "before column 9, # since there is no way to communicate back to", "def update(self): \"\"\" Update header keywords to reflect recent changes of columns.\"\"\" _update", "offset needs to multiply the length of all remaining axes else: offset *=", "self.__dict__['comment'] = '' if valu is not None: _comm = valu.group('comm') if isinstance(_comm,", "CardList.\"\"\" cards = [None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self):", "return self.__dict__[attr] except KeyError: raise AttributeError(attr) # 0.6.5.5 def size(self): \"\"\"Returns the size", "header=self.header.copy()) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDU to a new file.", "and data are copied.\"\"\" # touch the data, so it's defined (in the", "of the mktemp() output. \"\"\" dirName = os.path.dirname(input) if dirName != '': dirName", "parbscales=None, parbzeros=None): \"\"\"input: input data, either the group data itself (a numarray) or", "size = len(tmp._arrays[i]) n = min(size, nrows) if fill: n = 0 (_scale,", "if _scale or _zero: for i in range(len(self._parent)): dummy[i][:] = dummy[i]*bscale+bzero # Boolean", "the key attribute, surrogate for the __setattr__ key case.\"\"\" if isinstance(val, str): val", "% output # longstring case (CONTINUE card) else: # try not to use", "column right after the last field elif tbtype == 'TableHDU': (_format, _width) =", "self.writeComplete def size(self): \"\"\" Return the size (in bytes) of the data portion", "with the distribution. 3. The name of AURA and its representatives may not", "created. Also check the card's value by using the \"test\" argument. \"\"\" _err", "it. try: _dum = self.header['EXTEND'] #_after += 1 except: pass _pos = '>=", "\"\"\"Fix the card image for fixable non-standard compliance.\"\"\" _valStr = None # for", "map(lambda x: getattr(x,'key'), self) def index_of(self, key, backward=0): \"\"\"Get the index of a", "= eval(width)+1 strfmt = strfmt + 's'+str(size) + ',' strlen = strlen +", "OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "with 'data', if None, a header of the appropriate type is created for", "> 1: # do the _parent too, otherwise the _parent # of a", "<= 999\", 0, option, _err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TFORM'+`i+1`,", "created for the data object supplied. \"\"\" if not os.path.exists(filename): writeto(filename, data, header)", "data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype = ImageHDU # insert the require keywords", "It returns the output \"data\" array of data type dtype. The descriptor location", "else: self.change_attrib(col_name, 'name', new_name) def change_unit(self, col_name, new_unit): \"\"\"Change a Column's unit.\"\"\" self.change_attrib(col_name,", "name, default=''. value: keyword value, default=''. comment: comment, default=''. \"\"\" if key !=", "more attribute names, they must be separated by comma(s). \"\"\" if attrib.strip().lower() in", "instantiate a FITS file object (ffo) ffo = _File(name, mode=mode, memmap=memmap) hduList =", "many positional arguments\" elif n_ext1 == 1: if n_ext2 == 0: ext =", "'%20s' % _floatFormat(self.value) else: valStr = '%20s' % self._valuestring elif isinstance(self.value, complex): if", "in range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key) if", "for storing high energy astrophysics data. For details of the FITS standard, see", "of shape (s, nx) output: output Uint8 array of shape (s, nbytes) nx:", "'']: list = _commonNames else: list = attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower()", "constructed by error messages generated by verifications at different class levels. \"\"\" def", "== '': break else: break hdu._raw += block _size, hdu.name = hdu._getsize(hdu._raw) #", "'spans': # make sure to consider the case that the starting column of", "is out of bound or not found.' % key self._resize = 1 def", "ValueError, \"Unparsable card, fix it first with .verify('fix').\" if valu.group('bool') != None: _val", "% Card.length if strlen == 0: return input else: return input + '", "= 1 def __delitem__(self, key): \"\"\"Delete card(s) with the name 'key'.\"\"\" # delete", "_comment_FSC_RE = re.compile(_ASCII_text) # Checks for a valid value/comment string. It returns a", "of string texts.\"\"\" output = [] for _card in self.ascardlist(): if _card.key ==", "\"\"\"Update the header keywords to agree with the data.\"\"\" old_naxis = self.header.get('NAXIS', 0)", "keywords for i in range(len(_cols)): for cname in _commonNames: val = getattr(_cols, cname+'s')[i]", "end): _hdus = super(HDUList, self).__getslice__(start,end) result = HDUList(_hdus) return result def __setitem__(self, key,", "') if real.group('sign') is not None: _realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr", "right before writing to the output file, as the data will be scaled", "return self.__file def _readheader(self, cardList, keyList, blocks): \"\"\"Read blocks of header, and put", "1: raise \"Zip files with multiple members are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits')", "of a FITS file. @param filename: input FITS file name @type: string @param", "(deep)copy of the CardList.\"\"\" cards = [None]*len(self) for i in range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return", "use repr to accomodate both string and non-string types # Boolean is also", "of array dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if isinstance(self,", "in _format: _pc = '%-' else: _pc = '%' _fmt = ' '*_lead", "update the 3rd extension >>> update(file, dat, header=hdr, ext=5) # update the 5th", "= `val._nx` + 'X' elif isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max = max(map(len,", "all parts together output = keyStr + eqStr + valStr + commentStr #", "by NASA as the standard format for storing high energy astrophysics data. For", "the output file already exists if os.path.exists(name): if clobber: print \"Overwrite existing file", "'there are %d extensions of %s' % (nfound, `key`) else: return found def", "'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM value into data", "name == 'comment': _comm = _card.comment if isinstance(_comm, str) and _comm != '':", "table HDU, 'BinTableHDU' (default) or 'TableHDU' (text table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10',", "class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def", "by number or name.\"\"\" key = self.index_of(key) _item = super(HDUList, self).__getitem__(key) if isinstance(_item,", "portions are not actually read here, but the beginning locations are computed. \"\"\"", "if self._valueModified: _tmp = '(' + _floatFormat(self.value.real) + ', ' + _floatFormat(self.value.imag) +", "j in range(3, naxis+3): self.req_cards('NAXIS'+`j-2`, '== '+`j`, _isInt+\" and val>= 0\", 1, option,", "getdata('in.fits', ext=2) # the second extension By name, i.e., EXTNAME value (if unique):", "and \"+isValid, 8, option, _err) self.req_cards('NAXIS', '== 2', _isInt+\" and val >= 0", "the space already in memory else: self.data = raw_data if self._bscale != 1:", "header. # self.header.update('XTENSION','IMAGE','Image extension', after='SIMPLE') del self.header['SIMPLE'] if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS']", "`hdu.header['extver']` except: _extver = '' # only append HDU's which are \"new\" if", "a certain kind of header. Strip cards like SIMPLE, BITPIX, etc. so the", "name first, so in the example in (a), field('abc') will get the first", "SIMPLE and XTENSION to accomodate Extension # and Corrupted cases del self['SIMPLE'] del", "is a TableHDU containing ASCII data. \"\"\" def __init__(self, data=None, header=None): self._file, self._offset,", "array (i.e. table), or groups data object depending on the type of the", "\"physical\" cards. _max = _keyList.count('CONTINUE') _start = 0 for i in range(_max): _where", "raise IOError, 'Block does not begin with SIMPLE or XTENSION' for i in", "name os.remove(name) else: raise IOError, \"File '%s' already exist.\" % name # make", "@param filename: input FITS file name \"\"\" f = open(filename) f.info() f.close() UNDEFINED", "new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp", "for attrs in dict] self.data = tmp else: raise TypeError, \"input to ColDefs", "`tuple(self.data)` def __coerce__(self, other): pass # needed for __add__ def __add__(self, other, option='left'):", "new_hdu=_makehdu(data, header) hdulist, _ext = _getext(filename, 'update', *ext, **extkeys) hdulist[_ext] = new_hdu hdulist.close()", "_key = _card.key cardList.append(_card) keyList.append(_key) if _key == 'END': break def _readHDU(self): \"\"\"Read", "them as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])? *0*(?P<digt>'", "repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify the keyword", "self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL self._convert[indx] = dummy for i", "range(len(self)): if i > 0 and (not isinstance(self[i], _ExtensionHDU)): err_text = \"HDUList's element", "0 else: npars = len(pardata) if parbscales is None: parbscales = [None]*npars if", "\"\"\"Set the value attribute.\"\"\" if isinstance(val, (str, int, long, float, complex, bool, Undefined)):", "bzero = parbzeros[i])) _cols.append(Column(name='data', format = fits_fmt, bscale = bscale, bzero = bzero))", "be a string. force: if new key name already exist, force to have", "\"\"\"FITS primary HDU class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct a primary HDU. data:", "# 0.6.5.5 def size(self): \"\"\"Size (in bytes) of the data portion of the", "input data array: %s\" % array array._dtype = recfmt._dtype else: raise ValueError, \"Data", "header and data are copied.\"\"\" if self.data is not None: _data = self.data.copy()", "if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if the primary header", "class.\"\"\" \"\"\" This class is used when one or more mandatory Cards are", "So, in the last example, field('Abc') will cause an exception since there is", "_parent too, otherwise the _parent # of a scaled column may have wrong", "['TDISP', 'TDIM', 'THEAP']: for i in range(_tfields): del self[name+`i+1`] if issubclass(self._hdutype == TableHDU):", "of the blanks blank_loc = num.nonzero(arr == ' ')[0] offset = 0 xoffset", "IOError, 'Header size is not multiple of %d: %d' % (_blockLen, len(blocks)) elif", "stream to the file. :Returns: writeComplete : integer Flag that when true indicates", "= re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)') re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount =", "to an image extension header and appended to the end of the file.", "card must be a string' else: self.__dict__['_cardimage'] = ' '*80 def __repr__(self): return", "A list of Cards, default=[]. \"\"\" list.__init__(self, cards) self._cards = cards # if", "and/or other materials provided with the distribution. 3. The name of AURA and", "c0 = Card('XTENSION', 'IMAGE', 'Image extension') else: c0 = Card('SIMPLE', True, 'conforms to", "useblanks=1, bottom=0): \"\"\"Append a Card to the CardList. card: The Card to be", "None: axes = [] else: raise ValueError, \"incorrect array type\" self.header['NAXIS'] = len(axes)", "the 3rd extension >>> update(file, dat, header=hdr, ext=5) # update the 5th extension", "header to be used (as a template), default=None. If header=None, a minimal Header", "in the order of NAXIS's which is the # reverse of the numarray", "rec.RecArray r._coldefs = self._coldefs f = FITS_rec(r) f._convert = copy.deepcopy(self._convert) return f def", "if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:] = 0 # reset _npts =", "if isinstance(value, (list, tuple)) and len(indx) == len(value): for i in range(len(indx)): self.field(indx[i])[:]", "application threadName = threading.currentThread() singleThread = (threading.activeCount() == 1) and (threadName.getName() == 'MainThread')", "if isinstance(hdu.data._parent.field(i), num.NumArray): # make the scaled data = 0, not the stored", "\" \" DELAYED = \"delayed\" # used for lazy instantiation of data ASCIITNULL", "keyword name or index default: if no keyword is found, the value to", "in a manner analogous to tables \"\"\" def __init__(self, input=None, bitpix=None, pardata=None, parnames=[],", "else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder = 'big'", "the EXTNAME value \"\"\" _TableBaseHDU.__init__(self, data=data, header=header, name=name) self._xtn = 'TABLE' if self.header[0].rstrip()", "name @type: string @param ext: The rest of the arguments are for extension", "self.ascard[j] = Card(key, value, _comment) elif before != None or after != None:", "specified by a URL cannot be accessed\"\"\" def http_error_default(self, url, fp, errcode, errmsg,", "this should never happen if header is None: raise ValueError, \"No header to", "and it will be placed before or after the specified location. If no", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "scale by TSCAL and TZERO if _scale or _zero: for i in range(len(self._parent)):", "for floating value string _fix_table = maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE') class", "END. bottom: If =0 (default) the card will be appended after the last", "is not found. In the case of a missing 'END' card, the Header", "if fixable: fix = \"self.header['%s'] = %s\" % (keywd, `fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text,", "the cards. try: header = Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) # pass", "a new table from the input column definitions.\"\"\" \"\"\" input: a list of", "or _zero: self._convert[npars] = input else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__()) def __getattr__(self,", "= \"CONTINUE \" valstr = valfmt % val_list[i] output = output + '%-80s'", "**extkeys): \"\"\"Get a keyword's value from a header in a FITS file. @type", "key, it is never fixable # always fix silently the case where \"=\"", "0 for j in range(self.header['TFIELDS']): bcol = self.header['TBCOL'+`j+1`] valu = self.header['TFORM'+`j+1`] fmt =", "`before' takes precedence over `after' if both specified. default=None. after: name of the", "# only append HDU's which are \"new\" if hdu._new: self.__file.writeHDU(hdu) if (verbose): print", "needs to be written to a file. name: output FITS file name to", "(card %d).\" % insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" % (_index,", "header without being able to pass it to the header object hduList._resize =", "and isinstance(_ext, _Zero): try: hdu = hdulist[1] _data = hdu.data except IndexError: raise", "len(blocks)) elif (blocks[:8] not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does not", "checking is in order, in case of required cards in wrong order. if", "method), it will try to match the exact name first, so in the", "card from key, value, and (optionally) comment. Any specifed arguments, except defaults, must", "object, using the cards. try: header = Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header)", "= rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out = tmp out._parent = rec.RecArray.__getitem__(self._parent, key)", "== ' ': useblanks = new_card._cardimage != ' '*80 self.ascard.append(new_card, useblanks=useblanks, bottom=1) else:", "size(self): \"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\" size =", "_format = _format[:-2] + ']' _dims = \"%dR x %dC\" % (_nrows, _ncols)", "at the end. key: keyword name value: keyword value (to be used for", "None: test_pos = '_index '+ pos if not eval(test_pos): err_text = \"'%s' card", "0: ext = ext1[0] else: if isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting keyword", "'header' in extkeys: _gethdr = extkeys['header'] del extkeys['header'] else: _gethdr = False hdulist,", "+= self.header['TFORM'+`j+1`] + ', ' _format = _format[:-2] + ']' _dims = \"%dR", "the location offset of the heap area for each # variable length column", "\"\"\"Write the HDUList to a new file. name: output FITS file name to", "in Card._commentaryKeys: eqLoc = None else: if _key == 'HIERARCH': _limit = Card.length", "= (key,) naxis = self.hdu.header['NAXIS'] if naxis < len(key): raise IndexError, 'too many", "option, _err) return _err class TableHDU(_TableBaseHDU): \"\"\"FITS ASCII table extension HDU class.\"\"\" __format_RE", "'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if not parsable (i.e. everything else) result =", "of mandatory keywords. naxis = self.header.get('NAXIS', 0) self.req_cards('PCOUNT', '== '+`naxis+3`, _isInt+\" and val", "return output[:-1] # ----------------------------- HDU classes ------------------------------------ class _AllHDU: \"\"\"Base class for all", "for item in self: if not isinstance(item, _ErrList): result += _INDENT*tab+\"%s\\n\" % item", "character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val)", "more than one group parameter have the same name else: result = self.field(indx[0]).astype('f8')", "= {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the TFORM", "for i in range(len(self)): (type, width) = _convert_ASCII_format(self.data[i].format) if width is None: self.data[i].format", "parbzeros=None): \"\"\"input: input data, either the group data itself (a numarray) or a", "self.data = data self._xtn = ' ' def __setattr__(self, attr, value): \"\"\"Set an", "make it to be one, i.e. # input arrays can be just list", "'Insufficient keyword argument: %s' % ext2 return hdulist, ext def getheader(filename, *ext, **extkeys):", "and if filename already exists, it will overwrite the file. Default is False.", "self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i]) def __getattr__(self, attr): \"\"\"Get", "use the original BSCALE and BZERO values when the data was read/created. If", "sign is not present, or it is a commentary card. \"\"\" # no", "= CardList(_cards, self._keylist[start:end]) return result def __setitem__(self, key, value): \"\"\"Set a Card by", "pcount = self.header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount + size)", "raise SyntaxError, 'keyword name cannot be reset.' elif name == 'value': self._setvalue(val) elif", "issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype,", "image data _file: file associated with array (None) _datLoc: starting byte location of", "= r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?' + _digits_FSC", "_makep(input, desp_output, dtype): \"\"\"Construct the P format column array, both the data descriptors", "can fit in one line. # Instead, just truncate the comment if isinstance(self.value,", "if _name in _unique: _unique[_name].append(i) else: _unique[_name] = [i] self.__dict__[attr] = _unique try:", "def size(self): \"\"\"Returns the size (in bytes) of the HDU's data part.\"\"\" size", "size = 0 naxis = self.header.get('NAXIS', 0) # for random group image, NAXIS1", "one FITS HDU, data portions are not actually read here, but the beginning", "in range(len(_cols)): for cname in _commonNames: val = getattr(_cols, cname+'s')[i] if val !=", "else: dims = [0]*naxis for i in range(naxis): mo = re_naxisn.search(block, pos) pos", "_val = eval(_digt) else: _val = eval(numr.group('sign')+_digt) elif valu.group('cplx') != None: # Check", "if self._size != 0: self.writeComplete = 0 else: self.writeComplete = 1 def write(self,data):", "dummy # ASCII table does not have Boolean type elif _bool: self._parent.field(indx)[:] =", "dummy del dummy # ASCII table does not have Boolean type elif _bool:", "supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file = self.tfile.file self.__file.write(zfile.read(namelist[0])) zfile.close() else:", "self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.data.type()] axes = list(self.data.data.getshape())[1:] axes.reverse() axes = [0] + axes elif", "__init__(self, key='', value='', comment=''): \"\"\"Construct a card from key, value, and (optionally) comment.", "no word is cut into two pieces. But if there is one single", "= re_groups.search(block) if mo and simple: groups = 1 else: groups = 0", "_padLength(_bytes) if _bytes != (hdu._datLoc-hdu._hdrLoc): self._resize = 1 if verbose: print \"One or", "input.data] # if the input is a list of Columns elif isinstance(input, (list,", "self[name+`i+1`] if issubclass(self._hdutype, BinTableHDU): for name in ['TDISP', 'TDIM', 'THEAP']: for i in", "The Card to be appended. useblanks: Use any *extra* blank cards? default=1. If", "extra NAXISi's for j in range(len(axes)+1, old_naxis+1): try: del self.header.ascard['NAXIS'+`j`] except KeyError: pass", "name list. The key can be an integer or string. If integer, it", "if len(indx) == 1: result = self.field(indx[0]) # if more than one group", "self._strides[0] return _Group(self, row) class _Group(rec.Record): \"\"\"One group of the random group data.\"\"\"", "= 0 size = 1 for j in range(groups,naxis): size = size *", "_scale) else: del self.header['BSCALE'] if self.data._type != _type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1", "option.\"\"\" try: (repeat, dtype, option) = _tformat_re.match(tform.strip()).groups() except: print 'Format \"%s\" is not", "1 def __delslice__(self, i, j): \"\"\"Delete a slice of HDUs from the HDUList,", "column shape as the shape of the record if nrows == 0: for", "column may have wrong byteorder if coldata2._byteorder != 'big': coldata2.byteswap() coldata2._byteorder = 'big'", "the second extension By name, i.e., EXTNAME value (if unique): >>> getdata('in.fits', 'sci')", "add NAXISi if it does not exist for j in range(len(axes)): try: self.header['NAXIS'+`j+1`]", "= _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue # skip if there is", "if mo is not None: bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX not found", "= ' / ' + self.comment else: commentStr = '' # equal sign", "data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header): \"\"\" Construct a StreamingHDU object", "return its location. It returns None if equal sign is not present, or", "the next column\" % indx+1 if 'A' in _format: _pc = '%-' else:", "all fields to expand the original ._convert list # so the sliced FITS_rec", "if the card EXTEND exists, must be after it. try: _dum = self.header['EXTEND']", "head.strip().upper() def _extractValueComment(self, name): \"\"\"Exatrct the keyword value or comment from the card", "= False. \"\"\" if isinstance(self, _ExtensionHDU): hdulist = HDUList([PrimaryHDU(), self]) elif isinstance(self, PrimaryHDU):", "= _ValidHDU except: self._hdutype = _CorruptedHDU # populate the cardlist self.ascard = CardList(cards)", "too, otherwise the _parent # of a scaled column may have wrong byteorder", "Field (column) names are case sensitive: you can have two different columns called", "\"\"\"Unwrap the X format column into a Boolean array. input: input Uint8 array", "top level messages for item in self: if not isinstance(item, _ErrList): result +=", "self.__dict__['_err_text'] = 'Card image is not FITS standard (unparsable value string).' raise ValueError,", "needs the keyword EXTEND or if it has the proper value. \"\"\" hdr", "= 1 for n in dims: npt *= n # Now, get the", "None if self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc) if isinstance(self, GroupsHDU): dims", "list of cards of minimal header _list = CardList([ Card('XTENSION', '', ''), Card('BITPIX',", "is designed to use an independent # attribute of mmobject so if the", "8, 'array data type'), Card('NAXIS', 0, 'number of array dimensions'), ]) if isinstance(self,", "data is touched, use data info. if 'data' in dir(self): if self.data is", "keylist item def keys(self): \"\"\"Return a list of all keywords from the CardList.\"\"\"", "# For 'ab+' mode, the pointer is at the end after the open", "2) self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the _mm attribute.\"\"\" if", "By extension number: >>> getdata('in.fits', 0) # the primary header >>> getdata('in.fits', 2)", "one Column.\"\"\" indx = _get_index(self.names, col_name) for cname in _commonNames: attr = getattr(self,", "# of a scaled column may have wrong byteorder if coldata2._byteorder != 'big':", "comes pretty darn close. It appears to find the # end of a", "just # a number/string for cname in _commonNames: value = eval(cname) # get", "= 1. \"\"\" fmt = input_format (repeat, dtype, option) = _parse_tformat(fmt) if reverse", "if offset <= xoffset: offset = xoffset + strlen # collect the pieces", "option == 'parse': # check the value only, no need to check key", "card image.\"\"\" head = self._getKeyString() if isinstance(self, _Hierarch): self.__dict__['key'] = head.strip() else: self.__dict__['key']", "the Header may also contain the binary data(*). (*) In future it may", "eval(width) except: raise ValueError, 'Illegal format `%s` for ASCII table.' % input_format return", "_mmap = self._ffile._mm[self._datLoc:self._datLoc+self._datSpan] raw_data = num.array(_mmap, type=code, shape=dims) else: raw_data = num.fromfile(self._file, type=code,", "_naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise", "(not str) in case of control character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] =", "field('Xyz'), etc. will get this field. \"\"\" if isinstance(key, (int, long)): indx =", "# the elements in the object array are consistent. if not isinstance(array, (num.NumArray,", "expand the original ._convert list # so the sliced FITS_rec will view the", "does not exist, a new card will be created and it will be", "= 'SIMPLE' firstval = True self.req_cards(firstkey, '== 0', '', firstval, option, _err) self.req_cards('BITPIX',", "be the value of the keywod EXTNAME, default=None. \"\"\" # no need to", "is there if there is extension if len(self) > 1: self.update_extend() hduList =", "_dummy = item.__str__(tab=tab+1) # print out a message only if there is something", "self.update_extend() hduList = open(name, mode=\"append\") for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self,", "Card): setattr(self, cname, value.value) else: setattr(self, cname, value) # if the column data", "pass else: raise TypeError, \"table data has incorrect type\" # set extension name", "a primary HDU if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)): err_text =", "@param filename: input FITS file name @param ext: The rest of the arguments", "data type and width. try: (dtype, width) = _re.match(input_format.strip()).groups() dtype = ascii2rec[dtype] if", "self._convert[indx] (_str, _bool, _number, _scale, _zero, bscale, bzero) = self._get_scale_factors(indx) # for P", "= ((nx-1) / 8) + 1 for i in range(nbytes): _min = i*8", "# attribute of mmobject so if the HDUList object is created from files", "= ext2['extname'], ext2['extver'] else: ext = ext2['extname'] else: raise KeyError, 'Insufficient keyword argument:", "len(self.columns.formats) _format = self.columns.formats # if data is not touched yet, use header", "print out verbose messages? default = 0. This simply calls the close method", "= _ErrList([], unit='HDU') # the first (0th) element must be a primary HDU", "resized, need to write it to a tmp file, # delete the original", "if _scale or _zero: self._convert[npars] = input else: self._parent.field(npars)[:] = input else: self.__setstate__(input.__getstate__())", "after the call. type (string): destination data type, use numarray attribute format, (e.g.", "header) if isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) f = open(filename, mode='update') f.append(hdu)", "else: _shape = () for j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and j", "self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] #", "for a string, # since a greedy match will find a single-quote after", "the input data/header. @type filename: string @param filename: name of the file to", "(code) _booltype = 'i1' _fits2rec = {'L':_booltype, 'B':'u1', 'I':'i2', 'E':'f4', 'D':'f8', 'J':'i4', 'A':'a',", "output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap the X format column Boolean array into", "the scaled (physical) array. self._parent = input self._convert = [None]*self._nfields self.names = self._names", "format column into a Boolean array. input: input Uint8 array of shape (s,", "\"\"\"FITS record array class. FITS record array is the data part of a", "_expValStr valStr = '%-20s' % valStr # must be before int checking since", "value, comment)) self._mod = 1 def add_history(self, value, before=None, after=None): \"\"\"Add a HISTORY", "if mode != 'append' and not os.path.exists(name): self.name, fileheader = urllib.urlretrieve(name) else: self.name", "dtype == 'A': output_format = _fits2rec[dtype]+`repeat` # to accomodate both the ASCII table", "the data descriptors and the data. It returns the output \"data\" array of", "at once. The following psudo code illustrates its use: header = pyfits.Header() for", "-64]\" # Verify location and value of mandatory keywords. # Do the first", "self.header.update('PCOUNT', len(self.data.parnames), after='GROUPS') self.header.update('GCOUNT', len(self.data), after='PCOUNT') npars = len(self.data.parnames) (_scale, _zero) = self.data._get_scale_factors(npars)[3:5]", "(for now) hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data is", "self._size = self.size() if self._size != 0: self.writeComplete = 0 else: self.writeComplete =", "cards: A list of Cards, default=[]. \"\"\" # decide which kind of header", "HDU.\"\"\" re_simple = re.compile(r'SIMPLE =\\s*') re_bitpix = re.compile(r'BITPIX =\\s*(-?\\d+)') re_naxis = re.compile(r'NAXIS =\\s*(\\d+)')", "j): \"\"\"Delete a slice of HDUs from the HDUList, indexed by number only.\"\"\"", "_locateEq(self): \"\"\"Locate the equal sign in the card image before column 10 and", "PrimaryHDU(data, header=header) clobber = keys.get('clobber', False) hdu.writeto(filename, clobber=clobber) def append(filename, data, header=None): \"\"\"Append", "key, value): \"\"\"Set a header keyword value.\"\"\" self.ascard[key].value = value self._mod = 1", "possible to decipher where the last block of the Header ends, but this", "in dir(hdu): if hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update", "_cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero = _bzero)) data_shape = self._dimShape()[:-1]", "fill a complete FITS block and no more data will be accepted. An", "name of the keyword, or index of the Card after which the new", "pass class _ErrList(list): \"\"\"Verification errors list class. It has a nested list structure", "64:'K', -32:'E', -64:'D'} def __init__(self, data=None, header=None, name=None): PrimaryHDU.__init__(self, data=data, header=header) self.header._hdutype =", "data_output[i] = num.array(input[i], type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset _offset += len(data_output[i])", "chararray.CharArray): if i._type.bytes > 1: if i._byteorder != 'big': i.byteswap() i._byteorder = 'big'", "= key[1] else: _key = key _ver = None if not isinstance(_key, str):", "self['BITPIX'] _naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS'] for", "' + fix_text return _text def verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option", "sufficient data has been written to the stream to satisfy the amount specified", "% val self.__dict__['key'] = val def _setvalue(self, val): \"\"\"Set the value attribute.\"\"\" if", "way to communicate back to the _keylist. self._checkKey(self.key) # verify the value, it", "offset return list class Header: \"\"\"FITS header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a", "elif (force == 0) and (newkey in self.ascard._keylist): raise ValueError, 'Intended keyword %s", "the extension being referenced If the optional keyword 'header' is set to True,", "= data self._xtn = ' ' def __setattr__(self, attr, value): \"\"\"Set an HDU", "None: loc = self.index_of(before) self.insert(loc, card, useblanks=useblanks) elif after != None: loc =", "=\\s*(T)') simple = re_simple.search(block[:80]) mo = re_bitpix.search(block) if mo is not None: bitpix", "= 'HIERARCH %s ' % self.key else: keyStr = '%-8s' % self.key else:", "all HDUs which are not corrupted.\"\"\" # 0.6.5.5 def size(self): \"\"\"Size (in bytes)", "None: continue _bytes = hdu.data._itemsize*hdu.data.nelements() _bytes = _bytes + _padLength(_bytes) if _bytes !=", "GroupsHDU): _gcount = ' %d Groups %d Parameters' % (self.header['GCOUNT'], self.header['PCOUNT']) else: _gcount", "Card._commentaryKeys: if not (newkey in Card._commentaryKeys and oldkey in Card._commentaryKeys): raise ValueError, 'Regular", "class.\"\"\" def __init__(self, data=None, header=None): \"\"\"Construct a primary HDU. data: the data in", "dummy = self.field(i) if self._convert[i] is not None: out._convert[i] = ndarray.NDArray.__getitem__(self._convert[i], key) del", "dimensions'), ]) if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)):", "`%s` starting point overlaps to the previous column\" % indx+1 _trail = _loc[indx+1]", "'Keyword %s not found.' % `key` else: raise KeyError, 'Illegal key data type", "the header will be modified to an image extension header and appended to", "Card.length else: _limit = 10 try: eqLoc = self._cardimage[:_limit].index(\"=\") except: eqLoc = None", "% newkey _index = self.ascard.index_of(oldkey) _comment = self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] =", "_pad('END') blocks = blocks + _padLength(len(blocks))*' ' if len(blocks)%_blockLen != 0: raise IOError", "= [{} for i in range(_nfields)] # definition dictionaries for each field for", "to the previous column\" % indx+1 _trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx]", "class Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu = hdu self.field", "\"%s is not a Card\" % str(card) def _pos_insert(self, card, before, after, useblanks=1):", "\"\"\"Parse the TFORM value into repeat, data type, and option.\"\"\" try: (repeat, dtype,", "= self.columns._pnames else: data = None self.__dict__[attr] = data elif attr == 'columns':", "work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self)", "if issubclass(self._hdutype, PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if", "'TDIM'] # mapping from TFORM data type to numarray data type (code) _booltype", "else: dummy[i] = num.array(self._file, type=self._coldefs._recformats[indx]._dtype, shape=self._parent.field(indx)[i,0]) dummy[i]._byteorder = 'big' # scale by TSCAL", "par(self, fieldName): \"\"\"Get the group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self, fieldName, value):", "UInt8 array. input: input Boolean array of shape (s, nx) output: output Uint8", "raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 else: if 'extname' in keys:", "following disclaimer in the documentation and/or other materials provided with the distribution. 3.", "def _readheader(self, cardList, keyList, blocks): \"\"\"Read blocks of header, and put each card", "the card image for fixable non-standard compliance.\"\"\" _valStr = None # for the", "considered # to be more than one 80-char \"physical\" cards. _max = _keyList.count('CONTINUE')", "ext = _Zero() elif 'ext' in keys: if n_ext2 == 1: ext =", "in binary tables.\"\"\" pass class _FormatP(str): \"\"\"For P format in variable length table.\"\"\"", "sign position if self.key not in Card._commentaryKeys and self._cardimage.find('=') != 8: if option", "variable length array except: if isinstance(recfmt, _FormatP): try: _func = lambda x: num.array(x,", "self).__delitem__(_key) del self._keylist[_key] # update the keylist self.count_blanks() self._mod = 1 def count_blanks(self):", "print out all top level messages for item in self: if not isinstance(item,", "1 def __delitem__(self, key): \"\"\"Delete an HDU from the HDUList, indexed by number", "width == '': width = None else: width = eval(width) except: raise ValueError,", "1 return result class _Verify: \"\"\"Shared methods for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\",", "methods for verification.\"\"\" def run_option(self, option=\"warn\", err_text=\"\", fix_text=\"Fixed.\", fix = \"pass\", fixable=1): \"\"\"Execute", "image.\"\"\" longstring = '' ncards = self._ncards() for i in range(ncards): # take", "out._parent = rec.RecArray.__getitem__(self._parent, key) out._convert = [None]*self._nfields for i in range(self._nfields): # touch", "out # if not a slice, do this because Record has no __getstate__.", "else: return tmp def _get_scale_factors(self, indx): \"\"\" Get the scaling flags and factors", "avoid out of range error for BZERO = +32768 self.header.update('BZERO', _zero) else: del", "keywords from the CardList.\"\"\" return map(lambda x: getattr(x,'key'), self) def index_of(self, key, backward=0):", "\"\"\"FITS header class.\"\"\" def __init__(self, cards=[]): \"\"\"Construct a Header from a CardList. cards:", "data._shape[0] self.header['TFIELDS'] = data._nfields self.data = data self.columns = data._coldefs self.update() elif data", "PrimaryHDU to the file before writing the # given header. # if not", "header/data to FITS file if filename exists, create if not. If only data", "of the variable array \"\"\" _offset = 0 data_output = _VLF([None]*len(input)) data_output._dtype =", "New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush is complete!\" keyboardInterruptSent = True # Install", "= dummy for i in range(len(self._parent)): if self._parent.field(indx)[i].strip() != nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D',", "for hdu in self: hduList.__file.writeHDU(hdu) hduList.close(output_verify=output_verify) def close(self, output_verify='exception', verbose=0): \"\"\"Close the associated", "coldata._byteorder = 'big' if coldata2._type.bytes > 1: # do the _parent too, otherwise", "with an odd number of single quotes, # instead of issuing an error.", "permitted provided that the following conditions are met: 1. Redistributions of source code", "!= 'CONTINUE': raise ValueError, 'Long card image must have CONTINUE cards after the", "data descriptors and the data. It returns the output \"data\" array of data", "'array data type'), Card('NAXIS', 2, 'number of array dimensions'), Card('NAXIS1', 0, 'length of", "col_name and new_name in self.names: raise ValueError, 'New name %s already exists.' %", "= Delayed(input, col) # now build the columns tmp = [Column(**attrs) for attrs", "or 'END' cards. A corrupted HDU usually means that the data size cannot", "is the same as 7A in # binary table, so both will produce", "[col.copy() for col in input] # if the format of an ASCII column", "def copy(self): \"\"\"Make a (deep)copy of the CardList.\"\"\" cards = [None]*len(self) for i", "= num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return self._convert[indx] if _str: return self._parent.field(indx) #", "') if numr.group('sign') is not None: _valStr = numr.group('sign')+_valStr elif input.group('cplx') != None:", "make # sure to preserve the one-to-one correspondence when updating the list(s). #", "+ indx.offset # all elements after the first WholeLine must be WholeLine or", "= self.header['TFIELDS'] _format = '[' for j in range(_ncols): _format += self.header['TFORM'+`j+1`] +", "size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes _memmap_mode = {'readonly':'r',", "\"\"\" try: # have both SIMPLE and XTENSION to accomodate Extension # and", "strlen and no word is cut into two pieces. But if there is", "array desp_output: output \"descriptor\" array of data type 2Int32 dtype: data type of", "updating) comment: keyword comment (to be used for updating), default=None. before: name of", "reverse of the numarray shape if isinstance(self, GroupsHDU): _shape = list(self.data.data.getshape())[1:] _format =", "shape as the shape of the record if nrows == 0: for arr", "the string value) _key = self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc = None", "first try: indx = nameList.index(key.rstrip()) except ValueError: # try to match case-insentively, _key", "# get the right shape for the data part of the random group,", "objects.ObjectArray.__init__(self, input) self._max = 0 def __setitem__(self, key, value): \"\"\"To make sure the", "the end after the open in # Linux, but is at the beginning", "== '': hdu.name, hdu._extver = hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver = 1", "place (card %d).\" % insert_pos fix = \"_cards=self.header.ascard; dummy=_cards[%d]; del _cards[%d];_cards.insert(%d, dummy)\" %", "f.close() def update(filename, data, *ext, **extkeys): \"\"\"Update the specified extension with the input", "pyfits.StreamingHDU('filename.fits',header) for each piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self, name, header):", "\"The stream is closed and can no longer be written\" curDataSize = self._ffo.getfile().tell()", "[hdus] elif not isinstance(hdus, (HDUList, list)): raise \"Invalid input for HDUList.\" for hdu", "_Hierarch): valStr = valStr.strip() # comment string if keyStr.strip() in Card._commentaryKeys: # do", "self.header['BSCALE'] if self.data._type != _type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class PrimaryHDU(_ImageBaseHDU): \"\"\"FITS", "header: the header associated with 'data', if None, a header of the appropriate", "string texts.\"\"\" output = [] for _card in self.ascardlist(): if _card.key == 'HISTORY':", "%s is not a string' % val self.__dict__['key'] = val def _setvalue(self, val):", "mo is not None: bitpix = int(mo.group(1)) else: raise ValueError(\"BITPIX not found where", "after the last HDU or the file is corrupted.' % (len(hduList)+1) break #", "list of string texts.\"\"\" output = [] for _card in self.ascardlist(): if _card.key", "attr == '_theap': self.__dict__[attr] = self.header.get('THEAP', self.header['NAXIS1']*self.header['NAXIS2']) elif attr == '_pcount': self.__dict__[attr] =", "the CardList.\"\"\" return map(lambda x: getattr(x,'key'), self) def index_of(self, key, backward=0): \"\"\"Get the", "None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, ' ') if numr.group('sign') is not", "certain kind of header. Strip cards like SIMPLE, BITPIX, etc. so the rest", "8) + 1 for i in range(nbytes): _min = i*8 _max = min((i+1)*8,", "'_recformats': if self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt) for fmt in self.formats] elif", "if self.__file.mode in ['append', 'update']: self.flush(output_verify=output_verify, verbose=verbose) self.__file.close() # close the memmap object,", "support ND yet if isinstance(hdu, GroupsHDU): tmp._recformats[-1] = `hdu._dimShape()[:-1]` + tmp._dat_format if hdu._ffile.memmap:", "key = key + (slice(None),) * (naxis-len(key)) offset = 0 for i in", "length of header, data shape and type for each extension. @type filename: string", "key, value, and comment. Core code for ascardimage. \"\"\" # keyword string if", "than strlen and no word is cut into two pieces. But if there", "option.lower() if _option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise ValueError, 'Option", "else: return ColDefs(x) def __len__(self): return len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def", "else: val = _convert_format(val, reverse=1) #_update(keyword, val) _append(Card(keyword, val)) def copy(self): \"\"\"Make a", "@param header: the header associated with 'data', if None, a header of the", "= CardList([ c0, Card('BITPIX', 8, 'array data type'), Card('NAXIS', 0, 'number of array", "8, 4, 2, 1] nbytes = ((nx-1) / 8) + 1 for i", "not allowed\" self._checkKey(val) else: if val[:8].upper() == 'HIERARCH': val = val[8:].strip() self.__class__ =", "'BinTableHDU' (default) or 'TableHDU' (text table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6', 'F':'F16.7',", "psudo code illustrates its use: header = pyfits.Header() for all the cards you", "raise AttributeError, name return getattr(self, name) def _setkey(self, val): \"\"\"Set the key attribute,", "first WholeLine must be WholeLine or # OnePointAxis if isinstance(indx, (_WholeLine, _LineSlice)): dims.append(indx.npts)", "_ASCII_text = r'[ -~]*$' _comment_FSC_RE = re.compile(_ASCII_text) # Checks for a valid value/comment", "ValueError, 'Value in a commentary card must be a string' else: self.__dict__['_cardimage'] =", "row) def par(self, fieldName): \"\"\"Get the group parameter value.\"\"\" return self.array.par(fieldName)[self.row] def setpar(self,", "def update_extend(self): \"\"\"Make sure if the primary header needs the keyword EXTEND or", "useblanks=1): \"\"\"Insert a Card to the location specified by before or after. The", "primitive implementation, it will put the value string in one block and the", "both value and _cardimage attributes are missing, # to avoid infinite loops if", "eqLoc = self._locateEq() if eqLoc is None: eqLoc = 7 return self._cardimage[eqLoc+1:] def", "_index is None: err_text = \"'%s' card does not exist.\" % keywd fix_text", "name to be written to. output_verify: output verification option, default = 'exception'. clobber:", "simple = self.header.get('SIMPLE','F') randomGroups = self.header.get('GROUPS','F') if simple == 'T' and randomGroups ==", "own private attribute __file. \"\"\" if self.__file != None: if self.__file.memmap == 1:", "= len(self) - self._blanks i = nc - 1 if not bottom: for", "input.step if _step is None: _step = 1 elif isinstance(_step, (int, long)): if", "NAXISi if it does not exist for j in range(len(axes)): try: self.header['NAXIS'+`j+1`] =", "case sensitive: you can have two different columns called 'abc' and 'ABC' respectively.", "80 else: strlen = _len % Card.length return input + ' ' *", "may get modified. the data is still a \"view\" (for now) hcopy =", "input, row=0): rec.Record.__init__(self, input, row) def par(self, fieldName): \"\"\"Get the group parameter value.\"\"\"", "= self.header.get('PCOUNT', 0) size = abs(bitpix) * gcount * (pcount + size) /", "also int elif isinstance(self.value , bool): valStr = '%20s' % `self.value`[0] elif isinstance(self.value", "# keyword string if self.__dict__.has_key('key') or self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr = 'HIERARCH", "strlen == 0: return input else: return input + ' ' * (Card.length-strlen)", "the size of the data area return loc, _size+_padLength(_size) def close(self): \"\"\"Close the", "len(input) if _len == Card.length: return input elif _len > Card.length: strlen =", "words. So it may not look pretty. \"\"\" val_len = 67 comm_len =", "= _end else: raise KeyError, 'Attribute %s not defined.' % name self.__dict__[name] =", "$ \"\"\" A module for reading and writing FITS files and manipulating their", "key list is not supplied (as in reading in the FITS file), #", "Column has .name), Each attribute in ColDefs is a list of corresponding attribute", "list(self.data) return ColDefs(tmp) def __radd__(self, other): return self.__add__(other, 'right') def __sub__(self, other): if", "not be allowed) to insert. The new card will be inserted before it.", "attr): \"\"\"Get the 'data' or 'columns' attribute. The data of random group FITS", "[same as in update()] \"\"\" self._add_commentary('history', value, before=before, after=after) def add_comment(self, value, before=None,", "self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is not a Card\"", "def append(self, card, useblanks=1, bottom=0): \"\"\"Append a Card to the CardList. card: The", "== 'a': output_format = option+_rec2fits[dtype] elif isinstance(dtype, _FormatX): print 'X format' elif dtype+option", "= hdr['NAXIS2'] # go through header keywords to pick out column definition keywords", "_ValidHDU): hdus = [hdus] elif not isinstance(hdus, (HDUList, list)): raise \"Invalid input for", "the attributes: key, value, and comment, or from raw string. option: verification option,", "header keywords to agree with the data.\"\"\" old_naxis = self.header.get('NAXIS', 0) if isinstance(self.data,", "len(self.data) def __repr__(self): return 'ColDefs'+ `tuple(self.data)` def __coerce__(self, other): pass # needed for", "the data. The rest of the arguments are used only for the first", "(int, long)): if indx >= 0 and indx < naxis: if naxis >", "# FITS file) self.data return new_table(self.columns, header=self.header, tbtype=self.columns._tbtype) def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify", "HDU.\" fix_text = 'Fixed by inserting one as 0th HDU.' fix = \"self.insert(0,", "input stringLen to the next FITS block.\"\"\" return (_blockLen - stringLen%_blockLen) % _blockLen", "found where expected\") mo = re_gcount.search(block) if mo is not None: gcount =", "both specified. default=None. after: name of the keyword, or index of the Card", "None: # Check for numbers with leading 0s. real = Card._number_NFSC_RE.match(valu.group('real')) _rdigt =", "True, 'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT',", "random group image, NAXIS1 should be 0, so we skip NAXIS1. if naxis", "self.ascard[_index].comment _value = self.ascard[_index].value self.ascard[_index] = Card(newkey, _value, _comment) # self.ascard[_index].__dict__['key']=newkey # self.ascard[_index].ascardimage()", "(insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if the supposed location", "headers): raise IOError, (errcode, errmsg, url) urllib._urlopener = ErrorURLopener() # Assign the locally", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "FITS file.\"\"\" self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list class. This is the top-level", "1, 'number of groups', after='PCOUNT') self._ffo = _File(name, 'append') self._ffo.getfile().seek(0,2) self._hdrLoc = self._ffo.writeHDUheader(self)", "This includes the name, type, length of header, data shape and type for", "info. else: _shape = () for j in range(self.header['NAXIS']): if isinstance(self, GroupsHDU) and", "real CONTINUE card, skip to the next card to search # to avoid", "# multiple match raise NameError, \"Ambiguous key name '%s'.\" % key else: raise", "name == '_recformats': if self._tbtype == 'BinTableHDU': attr = [_convert_format(fmt) for fmt in", "output_format def _convert_ASCII_format(input_format): \"\"\"Convert ASCII table format spec to record format spec. \"\"\"", "value by using the \"test\" argument. \"\"\" _err = errlist fix = ''", "= input.step if _step is None: _step = 1 elif isinstance(_step, (int, long)):", "_card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key) if _key == 'END': break", "to the class constructor may be written to the stream. If the provided", "= len(pardata) if parbscales is None: parbscales = [None]*npars if parbzeros is None:", "header=header, name=name) self._xtn = 'BINTABLE' hdr = self.header if hdr[0] != self._xtn: hdr[0]", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "a CardList.\"\"\" return self.ascard def items(self): \"\"\"Return a list of all keyword-value pairs", "hdu._ffile.memmap: _mmap = hdu._ffile._mm[hdu._datLoc:hdu._datLoc+hdu._datSpan] _data = rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data =", "before: [same as in update()] after: [same as in update()] \"\"\" self._add_commentary(' ',", "then try to conver it to a strings array array = chararray.array(array, itemsize=eval(recfmt[1:]))", "if _zero != 0: self.data += -_zero # 0.9.6.3 to avoid out of", "value = TNULL # this can be reset by user. _isInt = \"isinstance(val,", "does not exist, the new card will have the fix_value as its value", "= Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED, header=header) # pass these attributes hdu._file =", "string.\"\"\" kard = self._cardimage output = '' for i in range(len(kard)/80): output +=", "of bits \"\"\" pow2 = [128, 64, 32, 16, 8, 4, 2, 1]", "EXTNAME, default=None. \"\"\" # no need to run _ExtensionHDU.__init__ since it is not", "= _Hierarch else: raise ValueError, 'keyword name %s is too long (> 8),", "self._file, self._datLoc = None, None if header is not None: if not isinstance(header,", "the header self.update_header() self._bitpix = self.header['BITPIX'] # delete the keywords BSCALE and BZERO", "and no other field name is a case variant of \"XYZ\", then field('xyz'),", "of the file to be updated data: the new data used for updating", "returns 0. \"\"\" try: key = key.strip().upper() if key[:8] == 'HIERARCH': key =", "_err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val == 2', 2, option, _err) self.req_cards('BITPIX',", "name of the new FITS file to write to @type data: array, record", "for _verify.\"\"\" _option = option.lower() if _option not in ['fix', 'silentfix', 'ignore', 'warn',", "_end = self.starts[i] + _width - 1 attr[i] = _end - last_end last_end", "'F': output_format = 'f8' else: raise ValueError, \"Illegal format %s\" % fmt else:", "'END': break def _readHDU(self): \"\"\"Read the skeleton structure of the HDU.\"\"\" end_RE =", "%s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns the", "number: >>> getdata('in.fits', 0) # the primary header >>> getdata('in.fits', 2) # the", "the locally subclassed opener # class to the urllibrary urllib._urlopener.tempcache = {} #", "reference of _pcount # pass the attributes for attr in ['formats', 'names']: setattr(_data,", "Header object, using the cards. try: header = Header(CardList(_cardList, keylist=_keyList)) hdu = header._hdutype(data=DELAYED,", "nx: number of bits \"\"\" output[...] = 0 # reset the output nbytes", "desc[1:,1] = num.add.accumulate(desc[:-1,0])*_dtype.bytes desc[:,1][:] += self._heapsize self._heapsize += desc[:,0].sum()*_dtype.bytes # conversion for both", "par(self, parName): \"\"\"Get the group parameter values.\"\"\" if isinstance(parName, (int, long)): result =", "kind of header. \"\"\" try: # have both SIMPLE and XTENSION to accomodate", "the beginning of the file. If the file does not exist and the", "\"\"\"Set the slice's start/stop in the regular range.\"\"\" def _normalize(indx, npts): if indx", "'key': self._extractKey() elif name in ['value', 'comment']: self._extractValueComment(name) else: raise AttributeError, name return", "with multiple members are not supported.\" self.tfile = tempfile.NamedTemporaryFile('rb+',-1,'.fits') self.name = self.tfile.name self.__file", "an odd number of single quotes, # instead of issuing an error. The", "!= 'silentfix': _text += ' ' + fix_text return _text def verify (self,", "__init__(self, input, tbtype='BinTableHDU'): \"\"\"input: a list of Columns, an (table) HDU tbtype: which", "self.header.update _append = self.header.ascard.append _cols = self.columns _update('naxis1', self.data._itemsize, after='naxis') _update('naxis2', self.data._shape[0], after='naxis1')", "the CardList. If no keyword is found, return the default value. key: keyword", "Header, _Hierarch @group HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU,", "= self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount = self.header.get('PCOUNT', 0) size = abs(bitpix)", "column if self._coldefs._recformats[indx]._dtype is _booltype: for i in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T'))", "for i in range(len(_cols)): for cname in _commonNames: val = getattr(_cols, cname+'s')[i] if", "HIERARCH if len(keyStr + eqStr + valStr) > Card.length: raise ValueError, \"The keyword", "'Redundant/conflicting keyword argument(s): %s' % ext2 elif n_ext1 == 0: if n_ext2 ==", "(an integer). backward: search the index from the END, i.e. backward? default=0. If", "not. If only data is supplied, a minimal header is created @type filename:", "fixable non-standard compliance.\"\"\" _valStr = None # for the unparsable case if input", "% att print ' ', getattr(self, att+'s') #def change_format(self, col_name, new_format): #new_format =", "= _tmp.strip() elif input.group('numr') != None: numr = Card._number_NFSC_RE.match(input.group('numr')) _valStr = numr.group('digt').translate(_fix_table, '", "the argument's value keyword = _keyNames[_commonNames.index(cname)] if isinstance(value, Card): setattr(self, cname, value.value) else:", "PrimaryHDU): del self['GROUPS'] if issubclass(self._hdutype, _ImageBaseHDU): del self['BSCALE'] del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU):", "of control character if Card._keywd_FSC_RE.match(val) is None: self.__dict__['_err_text'] = 'Illegal keyword name %s'", "ALL cards with the same keyword name if isinstance(key, str): while 1: try:", "for BZERO = +32768 self.header.update('BZERO', _zero) else: del self.header['BZERO'] if _scale != 1:", "elif valu.group('numr') != None: # Check for numbers with leading 0s. numr =", "list of conditions and the following disclaimer in the documentation and/or other materials", "= output + '%-80s' % (headstr + valstr) # do the comment string", "_LineSlice)): dims.append(indx.npts) break elif isinstance(indx, _SteppedSlice): raise IndexError, 'Subsection data must be contiguous.'", "combine contiguous CONTINUE cards with its parent card if nc > 0: _longstring", "# do NOT use self.key commentStr = '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if", "name self.mode = mode self.memmap = memmap if memmap and mode not in", "same CONTINUE card else: _start = _where + 1 if _keyList[_start:].count('CONTINUE') == 0:", "',' strlen = strlen + size else: strfmt = '>' + strfmt[:-1] return", "be contiguous.' for j in range(i+1,naxis): _naxis = self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis)", "before writing the # given header. # if not os.path.exists(name): if not self.header.has_key('SIMPLE'):", "'' if self.__dict__.has_key('value'): valStr = str(self.value) # put all parts together output =", "0, 'number of parameters')) _list.append(Card('GCOUNT', 1, 'number of groups')) if header is not", "if cards[0].key == 'SIMPLE': if 'GROUPS' in cards._keylist and cards['GROUPS'].value == True: self._hdutype", "lists, instead of dictionaries so the names can be displayed in a #", "_getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] _data = hdu.data if _data is", "self._checkText(_str) def fromstring(self, input): \"\"\"Construct a Card object from a (raw) string. It", "\"\"\" self._ffo.close() class ErrorURLopener(urllib.FancyURLopener): \"\"\"A class to use with urlretrieve to allow IOError", "'columns': class_name = str(self.__class__) class_name = class_name[class_name.rfind('.')+1:] self.__dict__[attr] = ColDefs(self, tbtype=class_name) elif attr", "same name (except blank card). If there is no card (or blank card),", "float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx) # further conversion for both ASCII and", "this module. @group Header-related Classes: Card, CardList, _Card_with_continue, Header, _Hierarch @group HDU Classes:", "element = 0 # go through the list twice, first time print out", "strlen + size else: strfmt = '>' + strfmt[:-1] return strfmt ''' def", "= hdu.data # Binary table byteswap elif isinstance(hdu, BinTableHDU): for i in range(hdu.data._nfields):", "singleThread: if keyboardInterruptSent: raise KeyboardInterrupt signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if the primary", "{'A':'s', 'I':'d', 'F':'f', 'E':'E', 'D':'E'} # calculate the starting point and width of", "num.NumArray): # boolean needs to be scaled too if recfmt == _booltype: _out", "= {} # Initialize tempcache with an empty # dictionary to enable file", "L{getdata} for explanations/examples. @return: keyword value @rtype: string, integer, or float \"\"\" _hdr", "* len(self) for i in range(len(self)): (_format, _width) = _convert_ASCII_format(self.formats[i]) if self.starts[i] is", "+ _format[1:] + _dict[_format[0]] + ' '*_trail # not using numarray.strings's num2char because", "extkeys: _gethdr = extkeys['header'] del extkeys['header'] else: _gethdr = False hdulist, _ext =", "+ size) / 8 return size def close(self): \"\"\" Close the 'physical' FITS", "the keyword name (a string) or the index (an integer). backward: search the", "_scale: self.header.update('BSCALE', self.data._coldefs.bscales[npars]) if _zero: self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i])", "etc. so the rest of the header can be used to reconstruct another", "also copy the class tmp._hdutype = self._hdutype return tmp def _strip(self): \"\"\"Strip cards", "header to be used data: data to be used name: name to be", "is also int elif isinstance(self.value , bool): valStr = '%20s' % `self.value`[0] elif", "ASCII table.' % input_format return (dtype, width) def _get_index(nameList, key): \"\"\" Get the", "card will be placed. The argument `before' takes precedence over `after' if both", "be a string' else: self.__dict__['_cardimage'] = ' '*80 def __repr__(self): return self._cardimage def", "= _coldefs elif attr == '_theap': self.__dict__[attr] = 0 try: return self.__dict__[attr] except", "_pad(input): \"\"\"Pad balnk space to the input string to be multiple of 80.\"\"\"", "block # self._ffo.getfile().write(_padLength(self._size)*'\\0') self.writeComplete = 1 self._ffo.getfile().flush() return self.writeComplete def size(self): \"\"\" Return", "the names can be displayed in a # preferred order. _commonNames = ['name',", "return _err # 0.8.8 def _iswholeline(indx, naxis): if isinstance(indx, (int, long)): if indx", "_Zero(int): def __init__(self): self = 0 def _getext(filename, mode, *ext1, **ext2): \"\"\"Open the", "dtype == 'P': output_format = _FormatP('2i4') output_format._dtype = _fits2rec[option[0]] elif dtype == 'F':", "BSCALE and BZERO in self.header. This method should only be used right before", "a string, # since a greedy match will find a single-quote after #", "the end of the file. If the file does not already exist, it", "argument is not a header, it (and other positional arguments) are assumed to", "\"\"\"For P format in variable length table.\"\"\" pass # TFORM regular expression _tformat_re", "the last field if self._tbtype == 'TableHDU': last_end = 0 attr = [0]", "for j in range(1, naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX']", "index of the key in the name list. The key can be an", "not FITS standard (equal sign not at column 8).' raise ValueError, self._err_text, '\\n%s'", "if isinstance(key, (int, long)): return key elif isinstance(key, str): _key = key.strip().upper() if", "ext = ext1[0] else: if isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s):", "self.data = data self.columns = data._coldefs self.update() elif data is None: pass else:", "self.__file is None: _name = '(No file associated with this HDUList)' else: _name", "_formats += data_fmt gcount = input.shape[0] for i in range(npars): _cols.append(Column(name='c'+`i+1`, format =", "self.ascardlist(): if _card.key == 'HISTORY': output.append(_card.value) return output def get_comment(self): \"\"\"Get all comments", "raise ValueError, self._err_text def _extractKey(self): \"\"\"Returns the keyword name parsed from the card", "else: groups = 0 mo = re_naxis.search(block) if mo is not None: naxis", "_VLF([None]*len(input)) data_output._dtype = dtype if dtype == 'a': _nbytes = 1 else: _nbytes", "take each 80-char card as a regular card and use its methods. _card", "_err) return _err class BinTableHDU(_TableBaseHDU): \"\"\"Binary table HDU class.\"\"\" def __init__(self, data=None, header=None,", "range(0, len(blocks), Card.length): _card = Card('').fromstring(blocks[i:i+Card.length]) _key = _card.key if _key == 'END':", "name '%s'.\" % key else: raise NameError, \"Illegal key '%s'.\" % `key` return", "not None: naxis = int(mo.group(1)) pos = mo.end(0) else: raise ValueError(\"NAXIS not found", "or comment) is changed, will reconstructe # the card image. self._ascardimage() def ascardimage(self,", "of minimal header if isinstance(self, _ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image extension') else:", "self._xtn = 'TABLE' if self.header[0].rstrip() != self._xtn: self.header[0] = self._xtn self.header.ascard[0].comment = 'ASCII", "+= eval(_idigt)*1j else: _val += eval(imag.group('sign') + _idigt)*1j else: _val = UNDEFINED self.__dict__['value']", "the last block of the Header ends, but this task may be difficult", "valueStr = \"%.16G\" % value if \".\" not in valueStr and \"E\" not", "urllib._urlopener = ErrorURLopener() # Assign the locally subclassed opener # class to the", "of usage, see the I{PyFITS User's Manual} available from U{http://www.stsci.edu/resources/software_hardware/pyfits/Users_Manual1.pdf} Epydoc markup used", "a field may not be the column right after the last field if", "in range(naxis): _naxis = self.hdu.header['NAXIS'+`naxis-i`] indx = _iswholeline(key[i], _naxis) offset = offset *", "ext1[0] else: if isinstance(ext1[0], (int, tuple)): raise KeyError, 'Redundant/conflicting keyword argument(s): %s' %", "= 1 def __delslice__(self, i, j): \"\"\"Delete a slice of HDUs from the", "indx += npts elif indx > npts: indx = npts return indx _start", "associated with the data. If the 3rd argument is not a header, it", "'exception'. verbose: print out verbose messages? default = 0. \"\"\" # Get the", "dir(self): if self.data is None: _shape, _format = (), '' _nrows = 0", "% ext2 elif n_ext1 == 2: if n_ext2 == 0: ext = ext1", "\"\"\" input: a list of Columns or a ColDefs object. header: header to", "if self._coldefs._recformats[indx]._dtype is _booltype: for i in range(len(self._parent)): dummy[i] = num.equal(dummy[i], ord('T')) self._convert[indx]", "key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key) # occurrence", "ASCII table extension HDU class.\"\"\" __format_RE = re.compile( r'(?P<code>[ADEFI])(?P<width>\\d+)(?:\\.(?P<prec>\\d+))?') def __init__(self, data=None, header=None,", "type, length of header, data shape and type for each extension. @type filename:", "The data of random group FITS file will be like a binary table's", "for extension specification. See L{getdata} for explanations/examples. @return: keyword value @rtype: string, integer,", "evaluate them as octal values. _number_FSC_RE = re.compile(r'(?P<sign>[+-])?0*(?P<digt>' + _digits_FSC+')') _number_NFSC_RE = re.compile(r'(?P<sign>[+-])?", "def __getitem__(self, key): tmp = rec.RecArray.__getitem__(self, key) if isinstance(key, slice): out = tmp", "in file (None) \"\"\" # mappings between FITS and numarray typecodes NumCode =", "tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=') tmp = tmp[:loc+7] + `self._coldefs.names` + ')'", "time, no need to copy, and keep it unchanged else: self.header = header", "!= '': output_format = _fits2rec[dtype]+`int(option)` # make sure option is integer else: _repeat", "or after the specified location. If no \"before\" or \"after\" is specified, it", "since it is not doing anything. _ImageBaseHDU.__init__(self, data=data, header=header) self._xtn = 'IMAGE' self.header._hdutype", "so the sliced FITS_rec will view the same scaled columns as # the", "of the string value) _key = self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc =", "print 'Format \"%s\" is not recognized.' % tform if repeat == '': repeat", "before int checking since bool is also int elif isinstance(self.value , bool): valStr", "len(output) <= Card.length: output = \"%-80s\" % output # longstring case (CONTINUE card)", "'sci', 2) # EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent >>>", "dim=None, array=None): \"\"\"Construct a Column by specifying attributes. All attributes except format can", "= 1 def _verify (self, option='warn'): _text = '' _err = _ErrList([], unit='HDU')", "still be filled with zeros/blanks. tbtype: table type to be created (BinTableHDU or", "== 0) and (indx.stop == naxis) and (indx.step == 1): return _WholeLine(naxis, 0)", "self.__dict__.has_key('_cardimage'): if isinstance(self, _Hierarch): keyStr = 'HIERARCH %s ' % self.key else: keyStr", "# is no comment if self.key in Card._commentaryKeys: if not isinstance(self.value, str): raise", "and keyword names, make # sure to preserve the one-to-one correspondence when updating", "using self.key eqStr = '' if self.__dict__.has_key('value'): valStr = str(self.value) # put all", "populate data to the new table for i in range(len(tmp)): if tmp._arrays[i] is", "if not self.header.has_key('PCOUNT'): dim = self.header['NAXIS'] if dim == 0: dim = ''", "for lazy instantiation of data ASCIITNULL = 0 # value for ASCII table", "(data is DELAYED): return self.data = data # update the header self.update_header() self._bitpix", "with HIERARCH which allows keyword name longer than 8 characters. \"\"\" def _verify(self,", "ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') # Parse the", "_INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result += _dummy element += 1 return result", "repr to accomodate both string and non-string types # Boolean is also OK", "self.columns data.parnames = self.columns._pnames else: data = None self.__dict__[attr] = data elif attr", "null strings.\"\"\" for cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def", "wrong data type.' if 'header' in extkeys: header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data,", "def size(self): \"\"\"Size (in bytes) of the data portion of the HDU.\"\"\" size", "' '*Card.length: self._blanks = i - 1 break def append(self, card, useblanks=1, bottom=0):", "_bzero = self.header.get('PZERO'+`i+1`, 0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero", "Association of Universities for Research in Astronomy (AURA) Redistribution and use in source", "the new table definition keywords for i in range(len(_cols)): for cname in _commonNames:", "% self.key if len(output) <= Card.length: output = \"%-80s\" % output # longstring", "list(self.data.data.getshape())[1:] _format = `self.data._parent.field(0).type()` else: _shape = list(self.data.getshape()) _format = `self.data.type()` _shape.reverse() _shape", "the attributes for attr in ['formats', 'names']: setattr(_data, attr, getattr(tmp, attr)) for i", "for hdu in self: if 'data' in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and", "self.header['TFORM'+`j+1`] fmt = self.__format_RE.match(valu) if fmt: code, width, prec = fmt.group('code', 'width', 'prec')", "second field. If there is no exact name matched, it will try to", "pcount = 0 mo = re_groups.search(block) if mo and simple: groups = 1", "the header from an extension of a FITS file. @param filename: input FITS", "reverse = 1. \"\"\" fmt = input_format (repeat, dtype, option) = _parse_tformat(fmt) if", "raise IOError, \"Supplied data will overflow the stream\" if _ImageBaseHDU.NumCode[self.header['BITPIX']] != data.type(): raise", "sensitive: you can have two different columns called 'abc' and 'ABC' respectively. (b)", "must be compliant to FITS standard. key: keyword name, default=''. value: keyword value,", "if (indx.start == 0) and (indx.stop == naxis) and (indx.step == 1): return", "class _Hierarch(Card): \"\"\"Cards begins with HIERARCH which allows keyword name longer than 8", "option will be overwritten by any user specified bscale/bzero values. bscale/bzero: user specified", "in self.formats] elif self._tbtype == 'TableHDU': self._Formats = self.formats if len(self) == 1:", "elif cards[0].key == 'XTENSION': xtension = cards[0].value.rstrip() if xtension == 'TABLE': self._hdutype =", "\"\"\" try: key = key.strip().upper() if key[:8] == 'HIERARCH': key = key[8:].strip() _index", "= 0 raise ValueError, self._err_text def _checkKey(self, val): \"\"\"Verify the keyword to be", "list(self.data.data.getshape())[1:] axes.reverse() axes = [0] + axes elif isinstance(self.data, num.NumArray): self.header['BITPIX'] = _ImageBaseHDU.ImgCode[self.data.type()]", "hdr.ascardlist(): _key = _tdef_re.match(_card.key) try: keyword = _key.group('label') except: continue # skip if", "' ') if numr.group('sign') == None: _val = eval(_digt) else: _val = eval(numr.group('sign')+_digt)", "isinstance(tmp._recformats[i], _FormatX): if tmp._arrays[i][:n].shape[-1] == tmp._recformats[i]._nx: _wrapx(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._nx) else: # from a", "0 def _getext(filename, mode, *ext1, **ext2): \"\"\"Open the input file, return the HDUList", "use the current data type. option: how to scale the data: if \"old\",", "extension >>> update(file, dat, 3) # update the 3rd extension >>> update(file, dat,", "the 3rd extension >>> update(file, dat, hdr, 3) # update the 3rd extension", "%s\" % array array._dtype = recfmt._dtype else: raise ValueError, \"Data is inconsistent with", "\"\"\" Get the scaling flags and factors for one field. indx is the", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL", "undefined cells will still be filled with zeros/blanks. tbtype: table type to be", "= hdu.columns # get the right shape for the data part of the", "in self.__dict__: self.__dict__['_valuestring'] = valu.group('valu') if '_valueModified' not in self.__dict__: self.__dict__['_valueModified'] = 0", "object array are consistent. if not isinstance(array, (num.NumArray, chararray.CharArray, Delayed)): try: # try", "flush is complete!\" keyboardInterruptSent = True # Install new handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode", "formats=_formats[:-1], names=tmp.names, shape=nrows)) else: hdu.data = FITS_rec(rec.array(None, formats=tmp._recformats, names=tmp.names, shape=nrows)) hdu.data._coldefs = hdu.columns", "0 (_scale, _zero, bscale, bzero) = hdu.data._get_scale_factors(i)[3:] if n > 0: if isinstance(tmp._recformats[i],", "\"\"\" if header is not None: if not isinstance(header, Header): raise ValueError, \"header", "\"\"\" self.__file = file if hdus is None: hdus = [] # can", "= 'f8' else: raise ValueError, \"Illegal format %s\" % fmt else: if dtype", "the header/data to FITS file if filename exists, create if not. If only", "something if _dummy.strip(): if self.unit: result += _INDENT*tab+\"%s %s:\\n\" % (self.unit, element) result", "hdu._extver = self._extver hdu._new = 0 hdu.header._mod = 0 hdu.header.ascard._mod = 0 except:", "a field (presumably with the field method), it will try to match the", "self._hdutype = _ExtensionHDU else: self._hdutype = _ValidHDU except: self._hdutype = _CorruptedHDU # populate", "match the name with case insensitivity. So, in the last example, field('Abc') will", "data reading will be delayed for col in range(_nfields): dict[col]['array'] = Delayed(input, col)", "bottom: for i in range(nc-1, -1, -1): # locate last non-commentary card if", "if self._bscale != 1: num.multiply(self.data, self._bscale, self.data) if self._bzero != 0: self.data +=", "usable after the call. type (string): destination data type, use numarray attribute format,", "file if filename exists, create if not. If only data is supplied, a", "tuple)): for col in input: if not isinstance(col, Column): raise \"Element %d in", "the string value can fit in one line. # Instead, just truncate the", "in _rec2fits.keys(): # record format _repeat = '' if repeat != 1: _repeat", "to be appended. useblanks: Use any *extra* blank cards? default=1. If useblanks !=", "re_naxisn.search(block, pos) pos = mo.end(0) dims[int(mo.group(1))-1] = int(mo.group(2)) datasize = reduce(operator.mul, dims[groups:]) size", "# try to match case-insentively, _key = key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(),", "\"\"\"To make sure the new item has consistent data type to avoid misalignment.", "in range(len(input)): if dtype == 'a': data_output[i] = chararray.array(input[i], itemsize=1) else: data_output[i] =", "header=None, name=None): \"\"\"Construct an image HDU. data: the data in the HDU, default=None.", "present, or it is a commentary card. \"\"\" # no equal sign for", "/ \" + commfmt % i output = output + '%-80s' % commstr", "del self._arrays[indx] self._nfields -= 1 def change_attrib(self, col_name, attrib, new_value): \"\"\"Change an attribute", "variable length column if isinstance(self._coldefs._recformats[indx], _FormatP): desc = self._parent.field(indx) desc[:] = 0 #", "shdu = pyfits.StreamingHDU('filename.fits',header) for each piece of data: shdu.write(data) shdu.close() \"\"\" def __init__(self,", "the file does not already exist, it will be created and if the", "isinstance(self._parent.field(indx)._type, num.IntegralType): dummy = num.around(dummy) self._parent.field(indx)[:] = dummy del dummy # ASCII table", "raise \"Writing to zipped fits files is not supported\" zfile = zipfile.ZipFile(self.name) namelist", "before the END card.\"\"\" for i in range(1, len(self)): if str(self[-i]) != '", "dummy._dtype = self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)): _offset = self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset)", "% (len(hduList)+1) break # initialize/reset attributes to be used in \"update/append\" mode #", "ColDefs, Column, FITS_rec, _FormatP, _FormatX, _VLF \"\"\" \"\"\" Do you mean: \"Profits\"? -", "_err) self.req_cards('PCOUNT', _pos, _isInt, 0, option, _err) self.req_cards('GROUPS', _pos, 'val == True', True,", "same code as in _TableBaseHDU size = self.size() if size: self._file.seek(self._datLoc) data =", "isinstance(ext1[0], str): if n_ext2 == 1 and 'extver' in keys: ext = ext1[0],", "mo is not None: pcount = int(mo.group(1)) else: pcount = 0 mo =", "so we can deal with scaled columns. \"\"\" def __init__(self, input): \"\"\"Construct a", "self.header['TFIELDS'] = data._nfields self.data = data self.columns = data._coldefs self.update() elif data is", "the FITS block size _python_mode = {'readonly':'rb', 'copyonwrite':'rb', 'update':'rb+', 'append':'ab+'} # open modes", "+ _format _bscale = self.header.get('BSCALE', 1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format =", "# comment maybe an empty string. _value_FSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' #", "val[:8].upper() == 'HIERARCH': val = val[8:].strip() self.__class__ = _Hierarch else: raise ValueError, 'keyword", "elif xtension in ('BINTABLE', 'A3DTABLE'): self._hdutype = BinTableHDU else: self._hdutype = _ExtensionHDU else:", "= fix_value is not None # if pos is a string, it must", "= {} for key in _fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format in", "'Illegal slice %s, step must be positive.' % input else: raise IndexError, 'Illegal", "_ExtensionHDU): c0 = Card('XTENSION', 'IMAGE', 'Image extension') else: c0 = Card('SIMPLE', True, 'conforms", "_verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val ==", "= 'big' hdu.data._parent._byteorder = 'big' output = hdu.data else: output = hdu.data output.tofile(self.__file)", "opposite if reverse = 1. \"\"\" fmt = input_format (repeat, dtype, option) =", "% item # second time go through the next level items, each of", "isinstance(self, PrimaryHDU): hdulist = HDUList([self]) hdulist.writeto(name, output_verify, clobber=clobber) def _verify(self, option='warn'): _err =", "= _where + 1 if _keyList[_start:].count('CONTINUE') == 0: break # construct the Header", "'key'.\"\"\" # delete ALL cards with the same keyword name if isinstance(key, str):", "class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\" This class is used when one", "be a list of null strings.\"\"\" for cname in _commonNames: setattr(self, cname+'s', ['']*self._nfields)", "self.hdu.header['NAXIS'+`naxis-j`] indx = _iswholeline(key[j], _naxis) dims.append(indx.npts) if not isinstance(indx, _WholeLine): raise IndexError, 'Subsection", "is raised and the data is not written. Once sufficient data has been", "+= eval(imag.group('sign') + _idigt)*1j else: _val = UNDEFINED self.__dict__['value'] = _val if '_valuestring'", "self.__class__ = Card else: # does not support CONTINUE for HIERARCH if len(keyStr", "= nc - 1 if not bottom: for i in range(nc-1, -1, -1):", "_number and (_scale or _zero): dummy = self._convert[indx].copy() if _zero: dummy -= bzero", "data for i in range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] =", "which will contain both group parameter info and the data. The rest of", "for existence of a keyword. Returns 1 if found, otherwise, 0. key: keyword", "column Boolean array into an UInt8 array. input: input Boolean array of shape", "TSCAL keyword bzero: bzero value, corresponding to TZERO keyword disp: display format, corresponding", "# find the END card mo = end_RE.search(block) if mo is None: hdu._raw", "value, 'extension name')) self.__dict__[attr] = value def _verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option)", "_naxis = self['NAXIS'] if issubclass(self._hdutype, _TableBaseHDU): _tfields = self['TFIELDS'] del self['NAXIS'] for i", "output = output + '%-80s' % commstr return output def _words_group(self, input, strlen):", "_get_index(self.names, col_name) getattr(self, attrib+'s')[indx] = new_value def change_name(self, col_name, new_name): \"\"\"Change a Column's", "is None: self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val) self.__dict__['_fixable'] = 0", "and BZERO in self.header. This method should only be used right before writing", "= getattr(_cols, cname+'s')[i] if val != '': keyword = _keyNames[_commonNames.index(cname)]+`i+1` if cname ==", "r'(?P<comm>.*)' r')?$') # keys of commentary cards _commentaryKeys = ['', 'COMMENT', 'HISTORY'] def", "opened physical file associated with the HDUList. Default = None. \"\"\" self.__file =", "import threading # Module variables _blockLen = 2880 # the FITS block size", "as the standard format for storing high energy astrophysics data. For details of", "will be created and it will be placed before or after the specified", "to satisfy the amount specified in the header, the stream is padded to", "var length table if isinstance(coldata, _VLF): for i in coldata: if not isinstance(i,", "integer Flag that when true indicates that all of the required data has", "argument: %s' % ext2 return hdulist, ext def getheader(filename, *ext, **extkeys): \"\"\"Get the", "else: strfmt = '>' + strfmt[:-1] return strfmt ''' def _verify(self, option='warn'): \"\"\"TableHDU", "r'[+-]? *' + _digits_NFSC # This regex helps delete leading zeros from numbers,", "int(mo.group(1)) pos = mo.end(0) else: raise ValueError(\"NAXIS not found where expected\") if naxis", "hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose): print \"update data in place:", "keywords EXTEND if header is None: dim = `self.header['NAXIS']` if dim == '0':", "in range(groups,naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT',", "+ _floatFormat(self.value.imag) + ')' valStr = '%20s' % _tmp else: valStr = '%20s'", "except: pass _pos = '>= '+`_after` self.req_cards('GCOUNT', _pos, _isInt, 1, option, _err) self.req_cards('PCOUNT',", "as the shape of the record if nrows == 0: for arr in", "option='warn'): \"\"\"Card class verification method.\"\"\" _err = _ErrList([]) try: self._check(option) except: pass _err.append(self.run_option(option,", "renamed to each other.' elif (force == 0) and (newkey in self.ascard._keylist): raise", "= \"'%s' card does not exist.\" % keywd fix_text = \"Fixed by inserting", "the length of all remaining axes else: offset *= _naxis if dims ==", "self.writeComplete = 0 else: self.writeComplete = 1 def write(self,data): \"\"\" Write the given", "found, otherwise it will return # None, meaning the keyword is undefined. The", "object from a (raw) string. It will pad the string if it is", "can only have one extension with # that name if _ver == None:", "will be created for the data object supplied. \"\"\" if not os.path.exists(filename): writeto(filename,", "None: _realStr = real.group('sign')+_realStr imag = Card._number_NFSC_RE.match(input.group('imag')) _imagStr = imag.group('digt').translate(_fix_table, ' ') if", "# do the comment string if self.comment is None: comm = '' else:", "'header' is set to True, this function will return a (data, header) tuple.", "self.ascard.append(new_card, useblanks=useblanks, bottom=1) else: try: _last = self.ascard.index_of(key, backward=1) self.ascard.insert(_last+1, new_card) except: self.ascard.append(new_card,", "self._heapsize = 0 for indx in range(self._nfields): if (self._convert[indx] is not None): if", "decimal point.\"\"\" valueStr = \"%.16G\" % value if \".\" not in valueStr and", "make mask array indexing work properly.\"\"\" hdu = new_table(self._coldefs, nrows=shape[0]) return hdu.data def", "the record if nrows == 0: for arr in tmp._arrays: if arr is", "self.__dict__['_err_text'] = 'Unprintable string %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError, self._err_text", "1) _bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero", "is written. input: input object array desp_output: output \"descriptor\" array of data type", "a primary HDU.\" fix_text = 'Fixed by inserting one as 0th HDU.' fix", "chararray.array(input+' ', itemsize=1) # locations of the blanks blank_loc = num.nonzero(arr == '", "r'(?P<comm_field>' r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([", "columns. \"\"\" def __init__(self, input): \"\"\"Construct a FITS record array from a RecArray.\"\"\"", "_extver = self[j]._extver if _ver == _extver: found = j nfound += 1", "IndexError, 'No data in this HDU.' if _gethdr: _hdr = hdu.header hdulist.close() if", "= Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table, ' ') if real.group('sign') is not None: _realStr", "and oldkey in Card._commentaryKeys): raise ValueError, 'Regular and commentary keys can not be", "= format # does not include Object array because there is no guarantee", "= key.lower().rstrip() _list = map(lambda x: x.lower().rstrip(), nameList) _count = operator.countOf(_list, _key) #", "_coldefs._pnames = _pnames self.__dict__[attr] = _coldefs elif attr == '_theap': self.__dict__[attr] = 0", "a (data, header) tuple. \"\"\" if 'header' in extkeys: _gethdr = extkeys['header'] del", "class to the urllibrary urllib._urlopener.tempcache = {} # Initialize tempcache with an empty", "the list. If string, (a) Field (column) names are case sensitive: you can", "option, default = 'exception'. verbose: print out verbose messages? default = 0. This", "reset by user. _isInt = \"isinstance(val, (int, long))\" # Functions def _padLength(stringLen): \"\"\"Bytes", "tbtype: table type to be created (BinTableHDU or TableHDU) \"\"\" # construct a", "self._err_text, '\\n%s' % self._cardimage elif option in ['fix', 'silentfix']: result = self._check('parse') self._fixValue(result)", "if len(self) > 0 and (not isinstance(self[0], PrimaryHDU)): err_text = \"HDUList's 0th element", "HDU Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU,", "if hdu.name == '': hdu.name, hdu._extver = hdu._getname() elif hdu.name == 'PRIMARY': hdu._extver", "str) and len(valStr) > (Card.length-10): self.__class__ = _Card_with_continue output = self._breakup_strings() else: print", "associated with array (None) _datLoc: starting byte location of data block in file", "default=None. If header=None, a minimal Header will be provided. name: The name of", "card image longer than 80, assume it contains CONTINUE card(s). elif len(self._cardimage) >", "= super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return result def __setitem__(self, key, value):", "if not eval(test): err_text = \"'%s' card has invalid value '%s'.\" % (keywd,", "_bzero = self.header.get('BZERO', 0) _cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero =", "dimension 1'), Card('NAXIS2', 0, 'length of dimension 2'), Card('PCOUNT', 0, 'number of group", "number to make sure it gets the decimal point.\"\"\" valueStr = \"%.16G\" %", "save memory dims = self.data.getshape() self.data.setshape(self.data.nelements()) min = num.minimum.reduce(self.data) max = num.maximum.reduce(self.data) self.data.setshape(dims)", "'append':'ab+'} # open modes _memmap_mode = {'readonly':'r', 'copyonwrite':'c', 'update':'r+'} TRUE = True #", "VLdata)) val = 'P' + _convert_format(val._dtype, reverse=1) + '(%d)' % VLdata._max else: val", "provided in the constructor. \"\"\" size = 0 naxis = self.header.get('NAXIS', 0) if", "_list.append(Card('PCOUNT', 0, 'number of parameters')) _list.append(Card('GCOUNT', 1, 'number of groups')) if header is", "HDU, will be the value of the keywod EXTNAME, default=None. \"\"\" # no", "self._parent.field(indx) # further conversion for both ASCII and binary tables if _number and", "isinstance(_key, int): raise ValueError, \"An element in the HDUList must be an HDU.\"", "to multiply the length of all remaining axes else: offset *= _naxis if", "= valu.group('bool')=='T' elif valu.group('strg') != None: _val = re.sub(\"''\", \"'\", valu.group('strg')) elif valu.group('numr')", "dummy[:] = ASCIITNULL self._convert[indx] = dummy for i in range(len(self._parent)): if self._parent.field(indx)[i].strip() !=", "_mod attribute since it has methods to change # the content of header", "if self._cardimage[:8].upper() == 'HIERARCH': _start = 8 self.__class__ = _Hierarch return self._cardimage[_start:eqLoc] def", "the blanks blank_loc = num.nonzero(arr == ' ')[0] offset = 0 xoffset =", "string _fix_table = maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE') class Card(_Verify): # string", "_bool, _number, _scale, _zero, bscale, bzero) def field(self, key): \"\"\"A view of a", "= self.header if hdr[0] != self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment = 'binary table", "keyword in the header. oldkey: old keyword, can be a name or index.", "chararray.array(array, itemsize=eval(recfmt[1:])) # then try variable length array except: if isinstance(recfmt, _FormatP): try:", "key[:8] == 'HIERARCH': key = key[8:].strip() _index = self.ascard._keylist.index(key) return 1 except: return", "FITS_rec(_data) def new_table (input, header=None, nrows=0, fill=0, tbtype='BinTableHDU'): \"\"\"Create a new table from", "\"\"\" def __init__(self, val, unit=\"Element\"): list.__init__(self, val) self.unit = unit def __str__(self, tab=0):", "# delete the keywords BSCALE and BZERO after scaling del self.header['BSCALE'] del self.header['BZERO']", "tbtype if isinstance(input, ColDefs): self.data = [col.copy() for col in input.data] # if", "to the CardList. card: The Card to be appended. useblanks: Use any *extra*", "self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key else: self.__dict__['_err_text']", "include bscale/bzero for now XXX) _bitpix = self.hdu.header['BITPIX'] code = _ImageBaseHDU.NumCode[_bitpix] self.hdu._file.seek(self.hdu._datLoc+offset*abs(_bitpix)/8) raw_data", "be displayed in a # preferred order. _commonNames = ['name', 'format', 'unit', 'null',", "= _ExtensionHDU else: self._hdutype = _ValidHDU except: self._hdutype = _CorruptedHDU # populate the", "the keyword name.\"\"\" if isinstance (value, Card): _key = self.index_of(key) # only set", "accomodate both the ASCII table and binary table column # format spec, i.e.", "and x: print 'Output verification result:' print x if _option == 'exception' and", "the default value. key: keyword name or index default: if no keyword is", "range(1, naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT',", "self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if _scale: self.header.update('PSCAL'+`i+1`, self.data._coldefs.bscales[i]) if _zero: self.header.update('PZERO'+`i+1`, self.data._coldefs.bzeros[i])", "integer.' % input _stop = input.stop if _stop is None: _stop = naxis", "_fits2rec[_format] _coldefs._pnames = _pnames self.__dict__[attr] = _coldefs elif attr == '_theap': self.__dict__[attr] =", "_offset _offset += len(data_output[i]) * _nbytes return data_output class _VLF(objects.ObjectArray): \"\"\"variable length field", "group will return a match if a FITS string, boolean, # number, or", "except: eqLoc = None return eqLoc def _getKeyString(self): \"\"\"Locate the equal sign in", "format is not None: # check format try: # legit FITS format? convert", "`%s`.\" % format self.format = format # does not include Object array because", "0) _pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero = _bzero)) data_shape", "it will be constructed from the card list. if keylist is None: self._keylist", "integer else: _repeat = '' if repeat != 1: _repeat = `repeat` output_format", "self.columns else: data = None self.__dict__[attr] = data elif attr == 'columns': class_name", "data portions are not actually read here, but the beginning locations are computed.", "is a field named \"XYZ\" and no other field name is a case", "HDU, 'BinTableHDU' (default) or 'TableHDU' (text table). \"\"\" ascii_fmt = {'A':'A1', 'I':'I10', 'E':'E14.6',", "if mo: extver = int(mo.group(1)) else: extver = 1 return name, extver def", "object. Allows structured access to FITS Group data in a manner analogous to", "the summary information on a FITS file. This includes the name, type, length", "range(len(self)): cards[i]=Card('').fromstring(str(self[i])) return CardList(cards) def __repr__(self): \"\"\"Format a list of cards into a", "if _bitpix > 0: # scale integers to Float32 self.data = num.array(raw_data, type=num.Float32)", "= `self.data.type()` _shape.reverse() _shape = tuple(_shape) _format = _format[_format.rfind('.')+1:] # if data is", "mo.group(1).rstrip() else: name = '' mo = re_extver.search(self._raw) if mo: extver = int(mo.group(1))", "in range(len(self))].__iter__() def __getitem__(self, key): \"\"\"Get an HDU from the HDUList, indexed by", "be used right before writing to the output file, as the data will", "i in range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key = _card.key cardList.append(_card) keyList.append(_key)", "['key', 'value', 'comment', '_valueModified']: if self.__dict__.has_key(name): delattr(self, name) return self def _ncards(self): return", "column starting position (ASCII table only), corresponding to TBCOL keyword dim: column dimension", "_bscale, bzero = _bzero)) data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` + _format _bscale", "= _getext(filename, 'readonly', *ext, **extkeys) hdu = hdulist[_ext] hdr = hdu.header hdulist.close() return", "filename: string @param filename: name of the file to append to @type data:", "return input elif _len > Card.length: strlen = _len % Card.length if strlen", "# _parent is the original (storage) array, # _convert is the scaled (physical)", "it to be one, i.e. # input arrays can be just list or", "= self.__file.read(_blockLen) if block == '': raise EOFError hdu = _TempHDU() hdu._raw =", "'Value in a commentary card must be a string' else: self.__dict__['_cardimage'] = '", "if useblanks: self._use_blanks(card._ncards()) self.count_blanks() self._mod = 1 else: raise SyntaxError, \"%s is not", "_indx except: raise KeyError, 'Keyword %s not found.' % `key` else: raise KeyError,", "# make sure to consider the case that the starting column of #", "(or blank card), append at the end. \"\"\" new_card = Card(key, value) if", "+ _realStr + ', ' + _imagStr + ')' self.__dict__['_valuestring'] = _valStr self._ascardimage()", "write(self,data): \"\"\" Write the given data to the stream. :Parameters: data : NumArray", "_cards[%d];_cards.insert(%d, dummy)\" % (_index, _index, insert_pos) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix)) # if value", "after=None): \"\"\"Add a COMMENT card. value: Comment text to be added. before: [same", "hdr = self.header if hdr[0] != self._xtn: hdr[0] = self._xtn hdr.ascard[0].comment = 'binary", "self.__file.close() class HDUList(list, _Verify): \"\"\"HDU list class. This is the top-level FITS object.", "self.__dict__: self.__dict__['_valueModified'] = 0 elif name == 'comment': self.__dict__['comment'] = '' if valu", "shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile, _File): _data._byteorder =", "_arr = tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the largest", "val = self.value.replace(\"'\", \"''\") val_list = self._words_group(val, val_len) for i in range(len(val_list)): if", "all HDU's while 1: try: hduList.append(ffo._readHDU()) except EOFError: break # check in the", "present, even it has nothing. for item in self: if isinstance(item, _ErrList): _dummy", "_pnames.append(self.header['PTYPE'+`i+1`].lower()) _cols.append(Column(name='c'+`i+1`, format = _format, bscale = _bscale, bzero = _bzero)) data_shape =", "the key in the name list. The key can be an integer or", "valueStr: valueStr += \".0\" return valueStr class Undefined: \"\"\"Undefined value.\"\"\" pass class Delayed:", "# try not to use CONTINUE if the string value can fit in", "in range(_min, _max): num.bitwise_and(input[...,i], pow2[j-i*8], output[...,j]) def _wrapx(input, output, nx): \"\"\"Wrap the X", "{'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64} _type = _dict[self._coldefs._Formats[indx][0]] # if the string = TNULL,", "Card('PCOUNT', 0, 'number of group parameters'), Card('GCOUNT', 1, 'number of groups'), Card('TFIELDS', 0,", "the value is different from the old one if str(self[_key]) != str(value): super(CardList,", "more efficient. else: return tmp def _get_scale_factors(self, indx): \"\"\" Get the scaling flags", "# then try variable length array except: if isinstance(recfmt, _FormatP): try: _func =", "def writeto(filename, data, header=None, **keys): \"\"\"Create a new FITS file using the supplied", "'Int32':32, 'Int64':64, 'Float32':-32, 'Float64':-64} def __init__(self, data=None, header=None): self._file, self._datLoc = None, None", "to be written to the file. :Returns: None Notes ----- The file will", "conflicting specifications will raise an exception, e.g., >>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return:", "object depending on the type of the extension being referenced If the optional", "(storage) array, # _convert is the scaled (physical) array. self._parent = input self._convert", "a header, it (and other positional arguments) are assumed to be the extension", "place (card %d).\" % (keywd, _index) fix_text = \"Fixed by moving it to", "spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re = re.compile(r'(?P<dtype>[AIFED])(?P<width>[0-9]*)') #", "scaled columns. \"\"\" def __init__(self, input): \"\"\"Construct a FITS record array from a", "last_end last_end = _end self._width = _end else: raise KeyError, 'Attribute %s not", "if we were provided with a Primary Header. If not we will need", "_where = self.__file.tell() if isinstance(hdu, BinTableHDU): self.__file.write(hdu.data._gap*'\\0') for i in range(hdu.data._nfields): if isinstance(hdu.data._coldefs._recformats[i],", "pass class _CorruptedHDU(_AllHDU): \"\"\"A Corrupted HDU class.\"\"\" \"\"\" This class is used when", "'fix': self.__dict__['_fix_text'] = 'Fixed card to be FITS standard.: %s' % self.key else:", "accessed. \"\"\" def _getname(self): \"\"\"Get the extname and extver from the header.\"\"\" re_extname", "HDU from the HDUList, indexed by number or name.\"\"\" key = self.index_of(key) _item", "created for the supplied data. This argument is optional. @keyword clobber: (optional) if", "return size, name def setupHDU(self): \"\"\"Read one FITS HDU, data portions are not", "\"self.header.ascard.insert(%d, %s)\" % (insert_pos, _card) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) else: # if", "r'(?P<sepr>/ *)' r'(?P<comm>[!-~][ -~]*)?' r')?$') _value_NFSC_RE = re.compile( r'(?P<valu_field> *' r'(?P<valu>' r'\\'(?P<strg>([ -~]+?|\\'\\'|))", "ext = ext2['ext'], ext2['extver'] else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2", "type=dtype) desp_output[i,0] = len(data_output[i]) desp_output[i,1] = _offset _offset += len(data_output[i]) * _nbytes return", "1, option, _err) # verify each card for _card in self.header.ascard: _err.append(_card._verify(option)) return", "as not to corrupt the original array if bzero not in ['', None,", "card image and return the string before the equal sign. If there is", "is None: comm = '' else: comm = self.comment commfmt = \"%-s\" if", "may not be the column right after the last field if self._tbtype ==", "'== 7', _isInt+\" and val >= 0 and val <= 999\", 0, option,", "!= 0: self.writeComplete = 0 else: self.writeComplete = 1 def write(self,data): \"\"\" Write", "create if not. If only data is supplied, a minimal header is created", "output_verify == 'warn': output_verify = 'exception' self.verify(option=output_verify) # check if the output file", "subclassed opener # class to the urllibrary urllib._urlopener.tempcache = {} # Initialize tempcache", "if there is one single word which is longer than strlen, then it", "They can be either a keyword name or index. \"\"\" if before !=", "_TempHDU() hdu._raw = '' # continue reading header blocks until END card is", "= self._check('parse') self._fixValue(result) if option == 'fix': self.__dict__['_fix_text'] = 'Fixed card to be", "return (dtype, width) def _get_index(nameList, key): \"\"\" Get the index of the key", "and a header. :Parameters: name : string The name of the file to", "table definition keywords. Mark them first, # then delete from the end so", "_keyNames): col = eval(_key.group('num')) if col <= _nfields and col > 0: cname", "_key in Card._commentaryKeys: eqLoc = None else: if _key == 'HIERARCH': _limit =", "kind of header. Strip cards like SIMPLE, BITPIX, etc. so the rest of", "# Handle zip files if mode in ['update', 'append']: raise \"Writing to zipped", "need to check key and comment for 'parse' result = Card._value_NFSC_RE.match(self._getValueCommentString()) # if", "the base name of the mktemp() output. \"\"\" dirName = os.path.dirname(input) if dirName", "self._xtn = 'BINTABLE' hdr = self.header if hdr[0] != self._xtn: hdr[0] = self._xtn", "open # Convenience functions class _Zero(int): def __init__(self): self = 0 def _getext(filename,", "to FITS file if filename exists, create if not. If only data is", "input data/header. @type filename: string @param filename: name of the file to be", "parse further if self.key in Card._commentaryKeys: self.__dict__['value'] = self._cardimage[8:].rstrip() self.__dict__['comment'] = '' return", "= _arr else: hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] if n < nrows: if tbtype ==", "_min: num.lshift(output[...,i], 1, output[...,i]) num.add(output[...,i], input[...,j], output[...,i]) # shift the unused bits num.lshift(output[...,i],", "'3J'->'3i4') recfmt = _convert_format(format) except: try: # legit RecArray format? recfmt = format", "a FITS string, boolean, # number, or complex value is found, otherwise it", "# the offset needs to multiply the length of all remaining axes else:", "!= '' or comment != '': self._setkey(key) self._setvalue(value) self._setcomment(comment) # for commentary cards,", "self.__file.writeHDUheader(hdu) if (verbose): print \"update header in place: Name =\", hdu.name, _extver if", "# determine if any of the HDU is resized for hdu in self:", "backward: _keylist = self._keylist[:] # make a copy _keylist.reverse() try: _indx = _keylist.index(_key)", "field will # return a match if the comment separator is found, though", "eval(_parse[1]) # if the card does not exist if _index is None: err_text", "_other = [_get_index(self.names, key) for key in other] indx=range(len(self)) for x in _other:", "to record format spec. \"\"\" ascii2rec = {'A':'a', 'I':'i4', 'F':'f4', 'E':'f4', 'D':'f8'} _re", "= val elif name == '_arrays': attr = [col.array for col in self.data]", "' ' + fix_text return _text def verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\"", "not None: # check format try: # legit FITS format? convert to record", "`%s` for ASCII table.' % input_format return (dtype, width) def _get_index(nameList, key): \"\"\"", "else: raise ValueError, 'keyword name %s is not a string' % val self.__dict__['key']", "del self['BZERO'] if issubclass(self._hdutype, _TableBaseHDU): del self['TFIELDS'] for name in ['TFORM', 'TSCAL', 'TZERO',", "be updated data: the new data used for updating The rest of the", "'%s'.\" % name os.remove(name) else: raise IOError, \"File '%s' already exist.\" % name", "# Functions def _padLength(stringLen): \"\"\"Bytes needed to pad the input stringLen to the", "0. This simply calls the close method of the _File class. It has", "column definitions.\"\"\" \"\"\" input: a list of Columns or a ColDefs object. header:", "not in ['SIMPLE ', 'XTENSION']): raise IOError, 'Block does not begin with SIMPLE", "0) _cols.append(Column(name='data', format = dat_format, bscale = _bscale, bzero = _bzero)) _coldefs =", "- min) / (2.**(8*_type.bytes) - 2) # Do the scaling if _zero !=", "in _commonNames: setattr(self, cname+'s', ['']*self._nfields) setattr(self, '_arrays', [None]*self._nfields) def add_col(self, column): \"\"\"Append one", "be optional. name: column name, corresponding to TTYPE keyword format: column format, corresponding", "optional keyword 'header' is set to True, this function will return a (data,", "exponents _digits_FSC = r'(\\.\\d+|\\d+(\\.\\d*)?)([DE][+-]?\\d+)?' _digits_NFSC = r'(\\.\\d+|\\d+(\\.\\d*)?) *([deDE] *[+-]? *\\d+)?' _numr_FSC = r'[+-]?'", "'big': output = hdu.data.byteswapped() else: output = hdu.data # Binary table byteswap elif", "Classes: _AllHDU, BinTableHDU, _CorruptedHDU, _ExtensionHDU, GroupsHDU, ImageHDU, _ImageBaseHDU, PrimaryHDU, TableHDU, _TableBaseHDU, _TempHDU, _ValidHDU", "hdr['naxis'] hdr.update('extend', True, after='naxis'+`n`) def writeto(self, name, output_verify='exception', clobber=False): \"\"\"Write the HDUList to", "== 'data': self.__dict__[attr] = None if self.header['NAXIS'] > 0: _bitpix = self.header['BITPIX'] self._file.seek(self._datLoc)", "= self.NumCode[self.header['BITPIX']] if isinstance(self, GroupsHDU): _gcount = ' %d Groups %d Parameters' %", "self._datSpan hdu._ffile = self._ffile hdu.name = self.name hdu._extver = self._extver hdu._new = 0", "isinstance(hdu, PrimaryHDU): hdu = ImageHDU(data, header) f = open(filename, mode='update') f.append(hdu) f.close() def", "data type.' if 'header' in extkeys: header = extkeys['header'] del extkeys['header'] new_hdu=_makehdu(data, header)", "_option = option.lower() if _option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise", "- self._datLoc def _summary(self): return \"%-10s %-11s\" % (self.name, \"CorruptedHDU\") def verify(self): pass", "promote products derived from this software without specific prior written permission. THIS SOFTWARE", "_key = self.index_of(key) if isinstance(hdu, (slice, list)): if isinstance(_key, int): raise ValueError, \"An", "and the data. It returns the output \"data\" array of data type dtype.", "if _pcount > 0: hdu.header['PCOUNT'] = _pcount # update TFORM for variable length", ">>> getdata('in.fits', ext=('sci',1), extname='err', extver=2) @return: an array, record array (i.e. table), or", "not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise ValueError, 'Option %s not recognized.'", "the variable array \"\"\" _offset = 0 data_output = _VLF([None]*len(input)) data_output._dtype = dtype", "= data._coldefs self.update() elif data is None: pass else: raise TypeError, \"table data", "= 0 for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod = 0 hdu._new", "FITS standard (equal sign not at column 8).' raise ValueError, self._err_text, '\\n%s' %", "(_blockLen - stringLen%_blockLen) % _blockLen def _tmpName(input): \"\"\"Create a temporary file name which", "self.__dict__['_err_text'] = 'Illegal keyword name %s' % repr(val) self.__dict__['_fixable'] = 0 raise ValueError,", "by number only.\"\"\" super(HDUList, self).__delslice__(i, j) self._resize = 1 def _verify (self, option='warn'):", "used only for the first case. bitpix: data type as expressed in FITS", "self.__dict__['comment'] = _tmp[slashLoc+1:].strip() except: self.__dict__['value'] = _tmp.strip() elif input.group('numr') != None: numr =", "it will be appended at the end. key: keyword name value: keyword value", "num.equal(dummy[i], ord('T')) self._convert[indx] = dummy return self._convert[indx] if _str: return self._parent.field(indx) # ASCII", "and are best illustrated by examples: No extra arguments implies the primary header", "imag.group('digt').translate(_fix_table2, ' ') if imag.group('sign') == None: _val += eval(_idigt)*1j else: _val +=", "= header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) if (data is not DELAYED): if", "file name \"\"\" f = open(filename) f.info() f.close() UNDEFINED = Undefined() __credits__=\"\"\" Copyright", "if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype for i in range(len(self._parent)):", "display format, corresponding to TDISP keyword start: column starting position (ASCII table only),", "= maketrans('de', 'DE') _fix_table2 = maketrans('dD', 'eE') class Card(_Verify): # string length of", "_floatFormat(self.value.imag) + ')' valStr = '%20s' % _tmp else: valStr = '%20s' %", "else) result = None return result else: # verify the equal sign position", "DELAYED): if isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields", "None, 1]: array /= bscale self.array = array def __repr__(self): text = ''", "self.__dict__['_cardimage'] = ' '*80 def __repr__(self): return self._cardimage def __getattr__(self, name): \"\"\" instanciate", "elif isinstance(key, tuple): _key = key[0] _ver = key[1] else: _key = key", "HDUList object is returned. \"\"\" def __init__(self, hdus=[], file=None): \"\"\"Construct a HDUList object.", "= i - 1 break def append(self, card, useblanks=1, bottom=0): \"\"\"Append a Card", "so we # must change the Primary header provided into an image #", "'' _err = _ErrList([], unit='HDU') # the first (0th) element must be a", "ASCII table is the same as 7A in # binary table, so both", "'%-' else: _pc = '%' _fmt = ' '*_lead + _pc + _format[1:]", "if 'data' in dir(hdu): if isinstance(hdu, (GroupsHDU, _TableBaseHDU)) and hdu.data is not None:", "name which should not already exist. Use the directory of the input file", "(BinTableHDU or TableHDU) \"\"\" # construct a table HDU hdu = eval(tbtype)(header=header) if", "Memmap from string import maketrans import copy import signal import threading # Module", "!= nullval: dummy[i] = float(self._parent.field(indx)[i].replace('D', 'E')) else: dummy = self._parent.field(indx) # further conversion", "URL cannot be accessed\"\"\" def http_error_default(self, url, fp, errcode, errmsg, headers): raise IOError,", "block + repr(card) return block def __str__(self): \"\"\"Format a list of cards into", "is not None: hcopy = header.copy() hcopy._strip() _list.extend(hcopy.ascardlist()) self.header = Header(_list) self._bzero =", "isinstance(data, rec.RecArray): self.header['NAXIS1'] = data._itemsize self.header['NAXIS2'] = data._shape[0] self.header['TFIELDS'] = data._nfields self.data =", "HDUList. file: The opened physical file associated with the HDUList. Default = None.", "extkeys['header'] else: _gethdr = False hdulist, _ext = _getext(filename, 'readonly', *ext, **extkeys) hdu", "the data to scale. The option will be overwritten by any user specified", ":Returns: None Notes ----- The file will be opened and the header appended", "' + _imagStr + ')' self.__dict__['_valuestring'] = _valStr self._ascardimage() def _locateEq(self): \"\"\"Locate the", "def _verify(self, option='warn'): \"\"\"_TableBaseHDU verify method.\"\"\" _err = _ExtensionHDU._verify(self, option=option) self.req_cards('NAXIS', None, 'val", "# verify the value, it may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result", "issubclass(self._hdutype == TableHDU): for i in range(_tfields): del self['TBCOL'+`i+1`] except: pass class CardList(list):", "re_naxis.search(block) if mo is not None: naxis = int(mo.group(1)) pos = mo.end(0) else:", "'exception' self.verify(option=output_verify) # check if the output file already exists if os.path.exists(name): if", "is truncated.' output = output[:Card.length] self.__dict__['_cardimage'] = output def _checkText(self, val): \"\"\"Verify val", "self.header['BZERO'] if _scale != 1: self.data /= _scale self.header.update('BSCALE', _scale) else: del self.header['BSCALE']", "with an empty # dictionary to enable file cacheing class _File: \"\"\"A file", "hdu.header hdulist.close() if _gethdr: return _data, _hdr else: return _data def getval(filename, key,", "isinstance(key, (int, long)): return key elif isinstance(key, str): _key = key.strip().upper() if _key[:8]", "the CardList.\"\"\" pairs = [] for card in self.ascard: pairs.append((card.key, card.value)) return pairs", "the call. type (string): destination data type, use numarray attribute format, (e.g. 'UInt8',", "self.index_of(after) self.insert(loc+1, card, useblanks=useblanks) def insert(self, pos, card, useblanks=1): \"\"\"Insert a Card to", "raise NameError, \"Ambiguous key name '%s'.\" % key else: raise NameError, \"Illegal key", "_ExtensionHDU)): err_text = \"HDUList's element %s is not an extension HDU.\" % `i`", "_key = self._cardimage[:8].strip().upper() if _key in Card._commentaryKeys: eqLoc = None else: if _key", "in self.header.ascard: _err.append(_card._verify(option)) return _err def req_cards(self, keywd, pos, test, fix_value, option, errlist):", "_verify(self, option='warn'): \"\"\"TableHDU verify method.\"\"\" _err = _TableBaseHDU._verify(self, option=option) self.req_cards('PCOUNT', None, 'val ==", "8. \"\"\" eqLoc = self._locateEq() if eqLoc is None: eqLoc = 7 return", "handler signal.signal(signal.SIGINT,New_SIGINT) if self.__file.mode not in ('append', 'update'): print \"flush for '%s' mode", "cards into a string.\"\"\" block = '' for card in self: block =", "attribute format, (e.g. 'UInt8', 'Int16', 'Float32' etc.). If is None, use the current", "is the base class for the TableHDU, ImageHDU, and BinTableHDU classes. \"\"\" def", "(2.**8 - 1) else: _zero = (max + min) / 2. # throw", "if (hdr['extend'] == False): hdr['extend'] = True else: if hdr['naxis'] == 0: hdr.update('extend',", "fmt.group('code', 'width', 'prec') else: raise ValueError, valu size = eval(width)+1 strfmt = strfmt", "tricky use of __str__, since normally __str__ has only one argument. \"\"\" result", "the header object hduList._resize = 0 return hduList fitsopen = open # Convenience", "ext = ext1 else: raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif", "make a copy if scaled, so as not to corrupt the original array", "if isinstance(tmp._arrays[i], chararray.CharArray): hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] else: hdu.data._convert[i] = num.zeros(nrows, type=tmp._arrays[i].type()) if _scale", "Delayed: \"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu = hdu self.field =", "'']: commentStr = '' else: commentStr = ' / ' + self.comment else:", "= self._parent.field(indx)[i,1] + self._heapoffset self._file.seek(_offset) if self._coldefs._recformats[indx]._dtype is 'a': dummy[i] = chararray.array(self._file, itemsize=self._parent.field(indx)[i,0],", "of cards of minimal header _list = CardList([ Card('XTENSION', '', ''), Card('BITPIX', 8,", "_size + _shift # pad the FITS data block if _size > 0:", "None @param header: the header associated with 'data', if None, a header of", "1 attr[i] = _end - last_end last_end = _end self._width = _end else:", "correspondence when updating the list(s). # Use lists, instead of dictionaries so the", "strings to numbers if self._coldefs._tbtype == 'TableHDU': _dict = {'I':num.Int32, 'F':num.Float32, 'E':num.Float32, 'D':num.Float64}", "The rest of the arguments are for extension specification. See L{getdata} for explanations/examples.", "the rest of the header can be used to reconstruct another kind of", "= tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use the largest column", "card here, instead of in the respective HDU classes, # so the checking", "re_naxisn = re.compile(r'NAXIS(\\d) =\\s*(\\d+)') re_gcount = re.compile(r'GCOUNT =\\s*(-?\\d+)') re_pcount = re.compile(r'PCOUNT =\\s*(-?\\d+)') re_groups", "hdu.header.get('THEAP', _tbsize) hdu.data._gap = _heapstart - _tbsize _pcount = hdu.data._heapsize + hdu.data._gap if", "if hdr.has_key('extend'): if (hdr['extend'] == False): hdr['extend'] = True else: if hdr['naxis'] ==", "\" + commfmt % i output = output + '%-80s' % commstr return", "key, value, comment=None, before=None, after=None): \"\"\"Update one header card.\"\"\" \"\"\" If the keyword", "the first time, no need to copy, and keep it unchanged else: self.header", "isinstance(val, _FormatP): VLdata = self.data.field(i) VLdata._max = max(map(len, VLdata)) val = 'P' +", "output[...,i]) # shift the unused bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output, dtype):", "_format[_format.rfind('.')+1:] # if data is not touched yet, use header info. else: _shape", "indicates that all of the required data has been written to the stream.", "KeyError, 'Input argument has wrong data type.' if 'header' in extkeys: header =", "to the correct location before calling this method. \"\"\" if isinstance(hdu, _ImageBaseHDU): hdu.update_header()", "(optional) if True and if filename already exists, it will overwrite the file.", "FITS standard (unparsable value string).' raise ValueError, self._err_text + '\\n%s' % self._cardimage #", "keyword, must be a string. force: if new key name already exist, force", "x if _option == 'exception' and x: raise VerifyError def _pad(input): \"\"\"Pad balnk", "start, end): _cards = super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return result def", "a FITS file instead of requiring data to all be written at once.", "raise IndexError, 'No data in this HDU.' if _gethdr: _hdr = hdu.header hdulist.close()", "'extension name')) self.__dict__[attr] = value def _verify(self, option='warn'): _err = _ValidHDU._verify(self, option=option) #", "there is no guarantee # the elements in the object array are consistent.", "else: self.__dict__['_err_text'] = 'Card image is not FITS standard (unparsable value string).' raise", "n\", # where n is an int if isinstance(pos, str): _parse = pos.split()", "hdu.columns = ColDefs(input, tbtype) # read the delayed data for i in range(len(tmp)):", "this function will return a (data, header) tuple. \"\"\" if 'header' in extkeys:", "'+`naxis+4`, _isInt+\" and val == 1\", 1, option, _err) return _err # 0.8.8", "return the string before the equal sign. If there is no equal sign,", "`key`) else: return found def readall(self): \"\"\"Read data of all HDU's into memory.\"\"\"", "index of an HDU from the HDUList. The key can be an integer,", "= self._get_scale_factors(npars)[3:5] if _scale or _zero: self._convert[npars] = input else: self._parent.field(npars)[:] = input", "raise TypeError, \"input to ColDefs must be a table HDU or a list", "field=None): self.hdu = hdu self.field = field # translation table for floating value", "extension if len(self) > 1: self.update_extend() def index_of(self, key): \"\"\"Get the index of", "_ImageBaseHDU): if hdu.data._byteorder != 'big': output = hdu.data.byteswapped() else: output = hdu.data #", "keyboardInterruptSent = False def New_SIGINT(*args): print \"KeyboardInterrupt ignored until flush is complete!\" keyboardInterruptSent", "input string to be multiple of 80.\"\"\" _len = len(input) if _len ==", "an extension of a FITS file. @param filename: input FITS file name @type:", "input file and the base name of the mktemp() output. \"\"\" dirName =", "If integer, it is the index in the list. If string, (a) Field", "the data parbscales: list of bscales for the parameters parbzeros: list of bzeros", "file. If the file does not exist and the provided header is not", "string @param ext: The rest of the arguments are for extension specification. See", "% (self.unit, element) result += _dummy element += 1 return result class _Verify:", "reset _npts = map(len, self._convert[indx]) desc[:len(_npts),0] = _npts _dtype = num.getType(self._coldefs._recformats[indx]._dtype) desc[1:,1] =", "updating self._resize = 0 for hdu in self: hdu.header._mod = 0 hdu.header.ascard._mod =", "commentStr = ' / ' + self.comment else: commentStr = '' # equal", "class _ImageBaseHDU(_ValidHDU): \"\"\"FITS image HDU base class.\"\"\" \"\"\"Attributes: header: image header data: image", "of the HDU, will be the value of the keywod EXTNAME, default=None. \"\"\"", "num.add(output[...,i], input[...,j], output[...,i]) # shift the unused bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input,", "(to be used for updating), default=None. before: name of the keyword, or index", "j in range(groups,naxis): size = size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount =", "data part.\"\"\" self.__file.flush() loc = self.__file.tell() _size = 0 if hdu.data is not", "or group data object @param data: the new data used for appending @type", "bscale = _bscale, bzero = _bzero)) data_shape = self._dimShape()[:-1] dat_format = `int(num.array(data_shape).sum())` +", "is not multiple of %d: %d' % (_blockLen, len(blocks)) elif (blocks[:8] not in", "there is no unique mapping. If there is a field named \"XYZ\" and", "self.hdu = hdu self.field = field # translation table for floating value string", "= _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal format %s\" % fmt return output_format def", "== _booltype: _out = num.zeros(array.shape, type=recfmt) num.where(array==0, ord('F'), ord('T'), _out) array = _out", "string has CONTINUE cards, the \"Card\" is considered # to be more than", "signal.signal(signal.SIGINT,signal.getsignal(signal.SIGINT)) def update_extend(self): \"\"\"Make sure if the primary header needs the keyword EXTEND", "ext = ext1[0], ext2['extver'] raise KeyError, 'Redundant/conflicting keyword argument(s): %s' % ext2 elif", "bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct the P format column", "= False. \"\"\" if (len(self) == 0): print \"There is nothing to write.\"", "= option.lower() if _option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise ValueError,", "(indx.stop == naxis) and (indx.step == 1): return _WholeLine(naxis, 0) else: if indx.step", "8', 8, option, _err) self.req_cards('TFIELDS', '== 7', _isInt+\" and val >= 0 and", "card's keyword in the header. oldkey: old keyword, can be a name or", "= hdu._theap + hdu._datLoc _data._file = hdu._file _tbsize = hdu.header['NAXIS1']*hdu.header['NAXIS2'] _data._gap = hdu._theap", "TNULL, return ASCIITNULL nullval = self._coldefs.nulls[indx].strip() dummy = num.zeros(len(self._parent), type=_type) dummy[:] = ASCIITNULL", "'' mo = re_extver.search(self._raw) if mo: extver = int(mo.group(1)) else: extver = 1", "*ext, **extkeys): \"\"\"Get the header from an extension of a FITS file. @param", "non-standard compliance.\"\"\" _valStr = None # for the unparsable case if input is", "the unused bits num.lshift(output[...,i], unused, output[...,i]) def _makep(input, desp_output, dtype): \"\"\"Construct the P", "array. input: input Boolean array of shape (s, nx) output: output Uint8 array", "rec.RecArray(_mmap, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) else: _data = rec.array(hdu._file, formats=tmp._recformats, names=tmp.names, shape=tmp._shape) if isinstance(hdu._ffile,", "i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] == 's': list[i]=list[i][:-1] for att in list:", "if 'data' in dir(hdu): if hdu.data is not None: hdu._file.seek(hdu._datLoc) self.__file.writeHDUdata(hdu) if (verbose):", "= 1 self._resize = 1 else: raise \"HDUList can only append an HDU\"", "else: del self.header['BSCALE'] if self.data._type != _type: self.data = num.array(num.around(self.data), type=_type) #0.7.7.1 class", "'extver' in keys: ext = ext2['extname'], ext2['extver'] else: ext = ext2['extname'] else: raise", "_makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else: if tbtype == 'TableHDU': # string no need to", "raise VerifyError def _pad(input): \"\"\"Pad balnk space to the input string to be", "keyStr.strip() in Card._commentaryKeys: # not using self.key eqStr = '' if self.__dict__.has_key('value'): valStr", "tmp._arrays: if arr is not None: dim = arr._shape[0] else: dim = 0", "for key in other] indx=range(len(self)) for x in _other: indx.remove(x) tmp = [self[i]", "self._arrays[indx] self._nfields -= 1 def change_attrib(self, col_name, attrib, new_value): \"\"\"Change an attribute (in", "the END card _nch80 = reduce(operator.add, map(Card._ncards, hdu.header.ascard)) _bytes = (_nch80+1) * Card.length", "% input _step = input.step if _step is None: _step = 1 elif", "self.header.update('BZERO', self.data._coldefs.bzeros[npars]) for i in range(npars): self.header.update('PTYPE'+`i+1`, self.data.parnames[i]) (_scale, _zero) = self.data._get_scale_factors(i)[3:5] if", "or 'columns' attribute.\"\"\" if attr == 'data': size = self.size() if size: self._file.seek(self._datLoc)", "`fix_value`) _err.append(self.run_option(option, err_text=err_text, fix_text=fix_text, fix=fix, fixable=fixable)) return _err class _TempHDU(_ValidHDU): \"\"\"Temporary HDU, used", "the close method of the _File class. It has this two-tier calls because", "raise an IOError exception. If the dtype of the input data does not", "will produce 'a7'. if fmt.lstrip()[0] == 'A' and option != '': output_format =", "of) one Column.\"\"\" indx = _get_index(self.names, col_name) for cname in _commonNames: attr =", "if _option not in ['fix', 'silentfix', 'ignore', 'warn', 'exception']: raise ValueError, 'Option %s", "or XTENSION' for i in range(0, len(_blockLen), Card.length): _card = Card('').fromstring(block[i:i+Card.length]) _key =", ", (int, long)): valStr = '%20d' % self.value # XXX need to consider", "CardList.\"\"\" _key = self.index_of(key) super(CardList, self).__delitem__(_key) del self._keylist[_key] # update the keylist self.count_blanks()", "cards (i.e. part of the string value) _key = self._cardimage[:8].strip().upper() if _key in", "if isinstance(self, GroupsHDU): _list.append(Card('GROUPS', True, 'has groups')) if isinstance(self, (_ExtensionHDU, GroupsHDU)): _list.append(Card('PCOUNT', 0,", "_commonNames else: list = attrib.split(',') for i in range(len(list)): list[i]=list[i].strip().lower() if list[i][-1] ==", "data in a manner analogous to tables \"\"\" def __init__(self, input=None, bitpix=None, pardata=None,", "space between words. So it may not look pretty. \"\"\" val_len = 67", "self.value == '': valStr = \"''\" else: _expValStr = self.value.replace(\"'\",\"''\") valStr = \"'%-8s'\"", "header keyword value.\"\"\" return self.ascard[key].value def __setitem__ (self, key, value): \"\"\"Set a header", "@type data: array, table, or group data object @param data: the new data", "a null string elif isinstance(self.value, str): if self.value == '': valStr = \"''\"", "self.key commentStr = '' elif self.__dict__.has_key('comment') or self.__dict__.has_key('_cardimage'): if self.comment in [None, '']:", "minimum length is 80 else: strlen = _len % Card.length return input +", "% self.key else: keyStr = '%-8s' % self.key else: keyStr = ' '*8", "return hdu class _ExtensionHDU(_ValidHDU): \"\"\"An extension HDU class. This class is the base", "value, it may be fixable result = Card._value_FSC_RE.match(self._getValueCommentString()) if result is not None", "_ExtensionHDU): firstkey = 'XTENSION' firstval = self._xtn else: firstkey = 'SIMPLE' firstval =", "keyword, can be a name or index. newkey: new keyword, must be a", "return _text def verify (self, option='warn'): \"\"\"Wrapper for _verify.\"\"\" _option = option.lower() if", "len(key): raise IndexError, 'too many indices.' elif naxis > len(key): key = key", "value must be a sequence with %d arrays/numbers.\" % len(indx) def _getitem(self, offset):", "_valStr = numr.group('sign')+_valStr elif input.group('cplx') != None: real = Card._number_NFSC_RE.match(input.group('real')) _realStr = real.group('digt').translate(_fix_table,", "1 else: raise SyntaxError, \"%s is not a Card\" % str(card) def _pos_insert(self,", "fixable # always fix silently the case where \"=\" is before column 9,", "returns a match object # for a valid value/comment string. # The valu", "append(self, card, useblanks=1, bottom=0): \"\"\"Append a Card to the CardList. card: The Card", "_shape, _format = (), '' _nrows = 0 else: _nrows = len(self.data) _ncols", "_readHDU(self): \"\"\"Read the skeleton structure of the HDU.\"\"\" end_RE = re.compile('END'+' '*77) _hdrLoc", "not at column 8).' raise ValueError, self._err_text, '\\n%s' % self._cardimage elif option in", "indx = _get_index(self.names, col_name) for cname in _commonNames: attr = getattr(self, cname+'s') del", "be created for the data object supplied. \"\"\" if not os.path.exists(filename): writeto(filename, data,", "3rd extension >>> update(file, dat, hdr, 3) # update the 3rd extension >>>", "already exist, force to have duplicate name. \"\"\" oldkey = oldkey.strip().upper() newkey =", ": Header The header object associated with the data to be written to", "been written to the stream. Notes ----- Only the amount of data specified", "parts together output = keyStr + eqStr + valStr + commentStr # need", "def __init__(self, data=None, header=None): self._file, self._offset, self._datLoc = None, None, None self.header =", "in range(len(tmp)): _arr = tmp._arrays[i] if isinstance(_arr, Delayed): tmp._arrays[i] = _arr.hdu.data._parent.field(_arr.field) # use", "# for P format if isinstance(self._coldefs._recformats[indx], _FormatP): dummy = _VLF([None]*len(self._parent)) dummy._dtype = self._coldefs._recformats[indx]._dtype", "parbscales = [None]*npars if parbzeros is None: parbzeros = [None]*npars if bitpix is", "elif isinstance(input, (list, tuple)): for col in input: if not isinstance(col, Column): raise", "in Solaris. self.__file.seek(0, 2) self._size = self.__file.tell() self.__file.seek(0) def __getattr__(self, attr): \"\"\"Get the", "max(_nblanks, len(input)/strlen+1) arr = chararray.array(input+' ', itemsize=1) # locations of the blanks blank_loc", "placed. default=None. \"\"\" if self.has_key(key): j = self.ascard.index_of(key) if comment is not None:", "can be optional. name: column name, corresponding to TTYPE keyword format: column format,", "if the card image already exist (to avoid infinite loop), # fix it", "elements. \"\"\" objects.ObjectArray.__init__(self, input) self._max = 0 def __setitem__(self, key, value): \"\"\"To make", "xoffset = 0 for i in range(nmax): try: loc = num.nonzero(blank_loc >= strlen+offset)[0][0]", "not None: continue def update_tbhdu(self): \"\"\"Update all table HDU's for scaled fields.\"\"\" for", "return tuple(axes) def _summary(self): \"\"\"Summarize the HDU: name, dimensions, and formats.\"\"\" class_name =", "input column definitions.\"\"\" \"\"\" input: a list of Columns or a ColDefs object.", "field. If there is no exact name matched, it will try to match", "column\" % indx+1 _trail = _loc[indx+1] - _width[indx] - self._coldefs.starts[indx] if _trail <", "not isinstance(self.value, str): raise ValueError, 'Value in a commentary card must be a", "for FSC and one for non-FSC (NFSC) format: # NFSC allows lower case", "val def _setcomment(self, val): \"\"\"Set the comment attribute.\"\"\" if isinstance(val,str): self._checkText(val) else: if", "ASCII table does not have Boolean type elif _bool: self._parent.field(indx)[:] = num.choose(self._convert[indx], (ord('F'),ord('T')))", "type = class_name[class_name.rfind('.')+1:] # if data is touched, use data info. if 'data'", "be a name or index. newkey: new keyword, must be a string. force:", "BinTableHDU): val = _cols._recformats[i] if isinstance(val, _FormatX): val = `val._nx` + 'X' elif", "else: gcount = 1 mo = re_pcount.search(block) if mo is not None: pcount", "may be difficult when the extension is a TableHDU containing ASCII data. \"\"\"", "setting a new value '%s'.\" % fix_value if fixable: fix = \"self.header['%s'] =", "it hdu.data._parent.field(i)[:n] = tmp._arrays[i][:n] elif isinstance(tmp._recformats[i], _FormatP): hdu.data._convert[i] = _makep(tmp._arrays[i][:n], hdu.data._parent.field(i)[:n], tmp._recformats[i]._dtype) else:", "AttributeError(attr) def getfile(self): return self.__file def _readheader(self, cardList, keyList, blocks): \"\"\"Read blocks of", "sensitive By combination of EXTNAME and EXTVER, as separate arguments or as a", "# EXTNAME='SCI' & EXTVER=2 >>> getdata('in.fits', extname='sci', extver=2) # equivalent >>> getdata('in.fits', ('sci',", "self._checkKey(val) else: if val[:8].upper() == 'HIERARCH': val = val[8:].strip() self.__class__ = _Hierarch else:", "the header associated with the data. If the 3rd argument is not a", "%5d %-12s %s%s\" % \\ (self.name, type, len(self.header.ascard), _shape, _format, _gcount) def scale(self,", "__getslice__(self, start, end): _cards = super(CardList, self).__getslice__(start,end) result = CardList(_cards, self._keylist[start:end]) return result", "\"\"\"Delayed file-reading data.\"\"\" def __init__(self, hdu=None, field=None): self.hdu = hdu self.field = field", "def insert(self, pos, card, useblanks=1): \"\"\"Insert a Card to the CardList. pos: The", "attrib='all'): \"\"\"Get attribute(s) information of the column definition.\"\"\" \"\"\"The attrib can be one", "out verbose messages? default = 0. \"\"\" # Get the name of the", "of Columns elif isinstance(input, (list, tuple)): for col in input: if not isinstance(col,", "del _keyList[_where:_where+nc] _start = _where # if not the real CONTINUE card, skip", "end of the file. If the file does not already exist, it will", "the file already exists. If it does not, check to see # if", "from files # other than FITS, the close() call can also close the", "extension specification. See L{getdata} for explanations/examples. @rtype: L{Header} object @return: header \"\"\" hdulist,", "'too many indices.' elif naxis > len(key): key = key + (slice(None),) *", "% `key` return indx def _unwrapx(input, output, nx): \"\"\"Unwrap the X format column", "super(HDUList, self).__setitem__(key, _item.setupHDU()) return super(HDUList, self).__getitem__(key) def __getslice__(self, start, end): _hdus = super(HDUList,", "the primary header >>> getdata('in.fits') By extension number: >>> getdata('in.fits', 0) # the", "input else: raise IndexError, 'Illegal slice %s, step must be integer.' % input", "the keywords EXTEND if header is None: dim = `self.header['NAXIS']` if dim ==", "hdu.data if _data is None and isinstance(_ext, _Zero): try: hdu = hdulist[1] _data", "Use the directory of the input file and the base name of the", "append an HDU\" # make sure the EXTEND keyword is in primary HDU", "of the random group, # since binary table does not support ND yet", "the keyword, or index of the Card after which the new card will", "file, return the HDUList and the extension.\"\"\" hdulist = open(filename, mode=mode) n_ext1 =", "opened. This is to speed up the open. Any header will not be", "and BZERO values when the data was read/created. If \"minmax\", use the minimum", "cards. Will deal with CONTINUE cards in a later stage as CONTINUE cards", "header = pyfits.Header() for all the cards you need in the header: header.update(key,value,comment)", "option, _err) return _err class GroupsHDU(PrimaryHDU): \"\"\"FITS Random Groups HDU class.\"\"\" _dict =", "repeat != 1: _repeat = `repeat` output_format = _repeat+_rec2fits[dtype+option] else: raise ValueError, \"Illegal", "# reset the output nbytes = ((nx-1) / 8) + 1 unused =", "= new_table(self._coldefs, nrows=shape[0]) return hdu.data def __repr__(self): tmp = rec.RecArray.__repr__(self) loc = tmp.rfind('\\nnames=')", "= size * self.header['NAXIS'+`j+1`] bitpix = self.header['BITPIX'] gcount = self.header.get('GCOUNT', 1) pcount =", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT", "of HDU's or a single HDU. Default = None, i.e. an empty HDUList.", "axes = list(self.data.getshape()) axes.reverse() elif self.data is None: axes = [] else: raise", "only have one extension with # that name if _ver == None: found", "key can be an integer or string. If integer, it is the index", "an attribute (in the commonName list) of a Column.\"\"\" indx = _get_index(self.names, col_name)", "attribute since it has methods to change # the content of header without", "'_index '+ pos if not eval(test_pos): err_text = \"'%s' card at the wrong", "_fits2rec.keys(): _rec2fits[_fits2rec[key]]=key class _FormatX(str): \"\"\"For X format in binary tables.\"\"\" pass class _FormatP(str):", "= Card._number_NFSC_RE.match(valu.group('real')) _rdigt = real.group('digt').translate(_fix_table2, ' ') if real.group('sign') == None: _val =", "card: The Card to be inserted. useblanks: Use any *extra* blank cards? default=1.", "_err) tfields = self.header['TFIELDS'] for i in range(tfields): self.req_cards('TBCOL'+`i+1`, None, _isInt, None, option,", "oldkey: old keyword, can be a name or index. newkey: new keyword, must", "hdu.columns # populate data to the new table for i in range(len(tmp)): if", "err_text = \"'%s' card at the wrong place (card %d).\" % (keywd, _index)" ]
[ "can be # found in the LICENSE file. \"\"\"NSFW urls in the Alexa", "\"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\",", "source code is governed by a BSD-style license that can be # found", "of this source code is governed by a BSD-style license that can be", "\"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\",", "\"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\",", "2014 The Chromium Authors. All rights reserved. # Use of this source code", "Alexa top 2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\",", "= set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\",", "that can be # found in the LICENSE file. \"\"\"NSFW urls in the", "this source code is governed by a BSD-style license that can be #", "urls in the Alexa top 2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\",", "be # found in the LICENSE file. \"\"\"NSFW urls in the Alexa top", "\"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\",", "\"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\",", "\"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\",", "\"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\",", "\"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\",", "\"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\", \"http://playboy.com/\", \"http://caribbeancom.com/\", \"http://tubewolf.com/\", \"http://xmatch.com/\", \"http://ixxx.com/\",", "\"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\",", "found in the LICENSE file. \"\"\"NSFW urls in the Alexa top 2000 sites.\"\"\"", "\"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\", \"http://playboy.com/\", \"http://caribbeancom.com/\",", "\"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\",", "\"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\",", "\"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\",", "\"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\",", "a BSD-style license that can be # found in the LICENSE file. \"\"\"NSFW", "\"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\",", "\"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\",", "file. \"\"\"NSFW urls in the Alexa top 2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\",", "\"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\",", "\"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\",", "\"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\",", "\"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\",", "in the LICENSE file. \"\"\"NSFW urls in the Alexa top 2000 sites.\"\"\" nsfw_urls", "\"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\",", "Authors. All rights reserved. # Use of this source code is governed by", "\"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\", \"http://playboy.com/\", \"http://caribbeancom.com/\", \"http://tubewolf.com/\", \"http://xmatch.com/\",", "2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\",", "set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\",", "\"\"\"NSFW urls in the Alexa top 2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\",", "The Chromium Authors. All rights reserved. # Use of this source code is", "Use of this source code is governed by a BSD-style license that can", "sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\",", "\"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\",", "\"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\",", "\"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\",", "code is governed by a BSD-style license that can be # found in", "\"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\",", "\"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\",", "\"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\",", "rights reserved. # Use of this source code is governed by a BSD-style", "\"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\",", "\"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\",", "\"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\",", "\"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\",", "in the Alexa top 2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\",", "\"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\",", "All rights reserved. # Use of this source code is governed by a", "\"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\",", "by a BSD-style license that can be # found in the LICENSE file.", "\"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\",", "\"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\",", "\"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\",", "\"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\", \"http://tnaflix.com/\", \"http://pornerbros.com/\",", "\"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\",", "is governed by a BSD-style license that can be # found in the", "# found in the LICENSE file. \"\"\"NSFW urls in the Alexa top 2000", "\"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\",", "governed by a BSD-style license that can be # found in the LICENSE", "\"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\", \"http://playboy.com/\", \"http://caribbeancom.com/\", \"http://tubewolf.com/\",", "# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this", "\"http://tnaflix.com/\", \"http://pornerbros.com/\", \"http://h2porn.com/\", \"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\",", "reserved. # Use of this source code is governed by a BSD-style license", "\"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\", \"http://sunporno.com/\",", "# Use of this source code is governed by a BSD-style license that", "\"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\",", "\"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\",", "\"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\", \"http://playboy.com/\",", "\"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\",", "\"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\",", "\"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\", \"http://video-one.com/\", \"http://perfectgirls.net/\", \"http://slutload.com/\",", "nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\",", "\"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\", \"http://playboy.com/\", \"http://caribbeancom.com/\", \"http://tubewolf.com/\", \"http://xmatch.com/\", \"http://ixxx.com/\", \"http://nymphdate.com/\",", "\"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\", \"http://playboy.com/\", \"http://caribbeancom.com/\", \"http://tubewolf.com/\", \"http://xmatch.com/\", \"http://ixxx.com/\", \"http://nymphdate.com/\", ])", "\"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\",", "Chromium Authors. All rights reserved. # Use of this source code is governed", "\"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\", \"http://tube8.com/\", \"http://youjizz.com/\", \"http://adultfriendfinder.com/\", \"http://hardsextube.com/\", \"http://yourlust.com/\", \"http://drtuber.com/\",", "\"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\", \"http://cerdas.com/\", \"http://overthumbs.com/\", \"http://xvideoslive.com/\",", "\"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\", \"http://met-art.com/\", \"http://gonzoxxxmovies.com/\", \"http://shufuni.com/\", \"http://vid2c.com/\", \"http://dojki.com/\",", "\"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\", \"http://ashemaletube.com/\", \"http://watchmygf.com/\", \"http://redtubelive.com/\",", "LICENSE file. \"\"\"NSFW urls in the Alexa top 2000 sites.\"\"\" nsfw_urls = set([", "\"http://drtuber.com/\", \"http://beeg.com/\", \"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\",", "\"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\", \"http://18andabused.com/\", \"http://tubepleasure.com/\", \"http://18schoolgirlz.com/\", \"http://chaturbate.com/\", \"http://motherless.com/\", \"http://yobt.com/\", \"http://empflix.com/\", \"http://hellporno.com/\",", "the LICENSE file. \"\"\"NSFW urls in the Alexa top 2000 sites.\"\"\" nsfw_urls =", "BSD-style license that can be # found in the LICENSE file. \"\"\"NSFW urls", "Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source", "top 2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\", \"http://youporn.com/\", \"http://xnxx.com/\",", "license that can be # found in the LICENSE file. \"\"\"NSFW urls in", "\"http://largeporntube.com/\", \"http://nuvid.com/\", \"http://bravotube.net/\", \"http://spankwire.com/\", \"http://discreethearts.com/\", \"http://keezmovies.com/\", \"http://xtube.com/\", \"http://alphaporno.com/\", \"http://4tube.com/\", \"http://nudevista.com/\", \"http://porntube.com/\", \"http://xhamstercams.com/\", \"http://porn.com/\",", "\"http://adult-empire.com/\", \"http://pornhublive.com/\", \"http://sexitnow.com/\", \"http://pornsharia.com/\", \"http://freeones.com/\", \"http://tubegalore.com/\", \"http://xvideos.jp/\", \"http://brazzers.com/\", \"http://fapdu.com/\", \"http://pornoxo.com/\", \"http://extremetube.com/\", \"http://hot-sex-tube.com/\", \"http://xhamsterhq.com/\",", "the Alexa top 2000 sites.\"\"\" nsfw_urls = set([ \"http://xhamster.com/\", \"http://xvideos.com/\", \"http://livejasmin.com/\", \"http://pornhub.com/\", \"http://redtube.com/\"," ]
[ "= '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>', packages = setuptools.find_packages(), entry_points =", "'EhwaZoom', author_email = '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>', packages = setuptools.find_packages(),", "'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email = '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>',", "setuptools setuptools.setup( name = 'bpgen', version = '0.1.0', description = 'Boilerplate generator.', url", "<reponame>EhwaZoom/bpgen import setuptools setuptools.setup( name = 'bpgen', version = '0.1.0', description = 'Boilerplate", "= 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email = '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email =", "= 'bpgen', version = '0.1.0', description = 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author", "'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email = '<EMAIL>', maintainer =", "import setuptools setuptools.setup( name = 'bpgen', version = '0.1.0', description = 'Boilerplate generator.',", "'bpgen', version = '0.1.0', description = 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author =", "author = 'EhwaZoom', author_email = '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>', packages", "= 'EhwaZoom', author_email = '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>', packages =", "maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>', packages = setuptools.find_packages(), entry_points = { 'console_scripts':", "= '0.1.0', description = 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email", "= 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email = '<EMAIL>', maintainer", "'<EMAIL>', maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>', packages = setuptools.find_packages(), entry_points = {", "maintainer_email = '<EMAIL>', packages = setuptools.find_packages(), entry_points = { 'console_scripts': ['bpgen=bpgen.main:main'] } )", "version = '0.1.0', description = 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom',", "generator.', url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email = '<EMAIL>', maintainer = 'EhwaZoom',", "author_email = '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email = '<EMAIL>', packages = setuptools.find_packages(), entry_points", "= 'EhwaZoom', maintainer_email = '<EMAIL>', packages = setuptools.find_packages(), entry_points = { 'console_scripts': ['bpgen=bpgen.main:main']", "url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email = '<EMAIL>', maintainer = 'EhwaZoom', maintainer_email", "'0.1.0', description = 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email =", "description = 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen', author = 'EhwaZoom', author_email = '<EMAIL>',", "name = 'bpgen', version = '0.1.0', description = 'Boilerplate generator.', url = 'https://github.com/EhwaZoom/bpgen',", "'EhwaZoom', maintainer_email = '<EMAIL>', packages = setuptools.find_packages(), entry_points = { 'console_scripts': ['bpgen=bpgen.main:main'] }", "setuptools.setup( name = 'bpgen', version = '0.1.0', description = 'Boilerplate generator.', url =" ]
[ "better_number, \"times\") print (\"Word IS used\", is_number, \"times\") print (\"Word NEVER used\", never_number,", "at first unless you're Dutch. Now is better than never. Although never is", "Flat is better than nested. Sparse is better than dense. Readability counts. Special", "is often better than *right* now. If the implementation is hard to explain,", "those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER", "(\"Word IS used\", is_number, \"times\") print (\"Word NEVER used\", never_number, \"times\") upper_case =", "never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number, \"times\") print (\"Word IS used\",", "better than nested. Sparse is better than dense. Readability counts. Special cases aren't", "one-- and preferably only one --obvious way to do it. Although that way", "better than never. Although never is often better than *right* now. If the", "Now is better than never. Although never is often better than *right* now.", "not be obvious at first unless you're Dutch. Now is better than never.", "great idea -- let's do more of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number =", "it's a bad idea. If the implementation is easy to explain, it may", "Simple is better than complex. Complex is better than complicated. Flat is better", "and preferably only one --obvious way to do it. Although that way may", "\"times\") print (\"Word IS used\", is_number, \"times\") print (\"Word NEVER used\", never_number, \"times\")", "than dense. Readability counts. Special cases aren't special enough to break the rules.", "used\", is_number, \"times\") print (\"Word NEVER used\", never_number, \"times\") upper_case = (zen_of_P.upper()) print(upper_case)", "implementation is hard to explain, it's a bad idea. If the implementation is", "is_number, \"times\") print (\"Word NEVER used\", never_number, \"times\") upper_case = (zen_of_P.upper()) print(upper_case) replacing_i", "idea. If the implementation is easy to explain, it may be a good", "implementation is easy to explain, it may be a good idea. Namespaces are", "used\", better_number, \"times\") print (\"Word IS used\", is_number, \"times\") print (\"Word NEVER used\",", "pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation", "bad idea. If the implementation is easy to explain, it may be a", "= (zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number, \"times\") print (\"Word IS used\", is_number,", "nested. Sparse is better than dense. Readability counts. Special cases aren't special enough", "way to do it. Although that way may not be obvious at first", "explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There", "one honking great idea -- let's do more of those!\"\"\" better_number = (zen_of_P.count(\"better\"))", "only one --obvious way to do it. Although that way may not be", "beats purity. Errors should never pass silently. Unless explicitly silenced. In the face", "print (\"Word IS used\", is_number, \"times\") print (\"Word NEVER used\", never_number, \"times\") upper_case", "may not be obvious at first unless you're Dutch. Now is better than", "is better than nested. Sparse is better than dense. Readability counts. Special cases", "be a good idea. Namespaces are one honking great idea -- let's do", "do it. Although that way may not be obvious at first unless you're", "Explicit is better than implicit. Simple is better than complex. Complex is better", "may be a good idea. Namespaces are one honking great idea -- let's", "to explain, it may be a good idea. Namespaces are one honking great", "better than implicit. Simple is better than complex. Complex is better than complicated.", "BETTER used\", better_number, \"times\") print (\"Word IS used\", is_number, \"times\") print (\"Word NEVER", "than ugly. Explicit is better than implicit. Simple is better than complex. Complex", "print (\"Word NEVER used\", never_number, \"times\") upper_case = (zen_of_P.upper()) print(upper_case) replacing_i = (zen_of_P.replace('i','&'))", "= (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number, \"times\") print (\"Word", "unless you're Dutch. Now is better than never. Although never is often better", "In the face of ambiguity, refuse the temptation to guess. There should be", "Sparse is better than dense. Readability counts. Special cases aren't special enough to", "you're Dutch. Now is better than never. Although never is often better than", "to do it. Although that way may not be obvious at first unless", "refuse the temptation to guess. There should be one-- and preferably only one", "more of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print", "Special cases aren't special enough to break the rules. Although practicality beats purity.", "break the rules. Although practicality beats purity. Errors should never pass silently. Unless", "it. Although that way may not be obvious at first unless you're Dutch.", "is easy to explain, it may be a good idea. Namespaces are one", "than nested. Sparse is better than dense. Readability counts. Special cases aren't special", "is better than implicit. Simple is better than complex. Complex is better than", "face of ambiguity, refuse the temptation to guess. There should be one-- and", "implicit. Simple is better than complex. Complex is better than complicated. Flat is", "preferably only one --obvious way to do it. Although that way may not", "There should be one-- and preferably only one --obvious way to do it.", "better than *right* now. If the implementation is hard to explain, it's a", "temptation to guess. There should be one-- and preferably only one --obvious way", "(\"Word BETTER used\", better_number, \"times\") print (\"Word IS used\", is_number, \"times\") print (\"Word", "Readability counts. Special cases aren't special enough to break the rules. Although practicality", "If the implementation is hard to explain, it's a bad idea. If the", "one --obvious way to do it. Although that way may not be obvious", "that way may not be obvious at first unless you're Dutch. Now is", "hard to explain, it's a bad idea. If the implementation is easy to", "Namespaces are one honking great idea -- let's do more of those!\"\"\" better_number", "is hard to explain, it's a bad idea. If the implementation is easy", "are one honking great idea -- let's do more of those!\"\"\" better_number =", "is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number, \"times\") print", "the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly", "ugly. Explicit is better than implicit. Simple is better than complex. Complex is", "now. If the implementation is hard to explain, it's a bad idea. If", "never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the", "guess. There should be one-- and preferably only one --obvious way to do", "-- let's do more of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number", "(zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number, \"times\") print (\"Word IS used\", is_number, \"times\")", "it may be a good idea. Namespaces are one honking great idea --", "purity. Errors should never pass silently. Unless explicitly silenced. In the face of", "better than complicated. Flat is better than nested. Sparse is better than dense.", "a bad idea. If the implementation is easy to explain, it may be", "Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In", "Although never is often better than *right* now. If the implementation is hard", "complicated. Flat is better than nested. Sparse is better than dense. Readability counts.", "of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word", "*right* now. If the implementation is hard to explain, it's a bad idea.", "explain, it's a bad idea. If the implementation is easy to explain, it", "better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER used\",", "enough to break the rules. Although practicality beats purity. Errors should never pass", "practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the", "IS used\", is_number, \"times\") print (\"Word NEVER used\", never_number, \"times\") upper_case = (zen_of_P.upper())", "ambiguity, refuse the temptation to guess. There should be one-- and preferably only", "print (\"Word BETTER used\", better_number, \"times\") print (\"Word IS used\", is_number, \"times\") print", "better than dense. Readability counts. Special cases aren't special enough to break the", "be obvious at first unless you're Dutch. Now is better than never. Although", "to guess. There should be one-- and preferably only one --obvious way to", "way may not be obvious at first unless you're Dutch. Now is better", "is better than never. Although never is often better than *right* now. If", "the implementation is hard to explain, it's a bad idea. If the implementation", "Complex is better than complicated. Flat is better than nested. Sparse is better", "Although that way may not be obvious at first unless you're Dutch. Now", "complex. Complex is better than complicated. Flat is better than nested. Sparse is", "of ambiguity, refuse the temptation to guess. There should be one-- and preferably", "easy to explain, it may be a good idea. Namespaces are one honking", "than *right* now. If the implementation is hard to explain, it's a bad", "idea -- let's do more of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\"))", "the face of ambiguity, refuse the temptation to guess. There should be one--", "good idea. Namespaces are one honking great idea -- let's do more of", "= \"\"\"Beautiful is better than ugly. Explicit is better than implicit. Simple is", "the temptation to guess. There should be one-- and preferably only one --obvious", "to explain, it's a bad idea. If the implementation is easy to explain,", "should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse", "= (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number,", "a good idea. Namespaces are one honking great idea -- let's do more", "\"times\") print (\"Word NEVER used\", never_number, \"times\") upper_case = (zen_of_P.upper()) print(upper_case) replacing_i =", "silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to", "than never. Although never is often better than *right* now. If the implementation", "explain, it may be a good idea. Namespaces are one honking great idea", "to break the rules. Although practicality beats purity. Errors should never pass silently.", "silenced. In the face of ambiguity, refuse the temptation to guess. There should", "obvious at first unless you're Dutch. Now is better than never. Although never", "better than ugly. Explicit is better than implicit. Simple is better than complex.", "be one-- and preferably only one --obvious way to do it. Although that", "--obvious way to do it. Although that way may not be obvious at", "cases aren't special enough to break the rules. Although practicality beats purity. Errors", "is better than ugly. Explicit is better than implicit. Simple is better than", "should be one-- and preferably only one --obvious way to do it. Although", "first unless you're Dutch. Now is better than never. Although never is often", "let's do more of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number =", "special enough to break the rules. Although practicality beats purity. Errors should never", "counts. Special cases aren't special enough to break the rules. Although practicality beats", "If the implementation is easy to explain, it may be a good idea.", "than complex. Complex is better than complicated. Flat is better than nested. Sparse", "Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity,", "honking great idea -- let's do more of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number", "than complicated. Flat is better than nested. Sparse is better than dense. Readability", "idea. Namespaces are one honking great idea -- let's do more of those!\"\"\"", "better than complex. Complex is better than complicated. Flat is better than nested.", "(zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number, \"times\") print (\"Word IS", "never is often better than *right* now. If the implementation is hard to", "Dutch. Now is better than never. Although never is often better than *right*", "Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess.", "(zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\")) print (\"Word BETTER used\", better_number, \"times\")", "rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced.", "(\"Word NEVER used\", never_number, \"times\") upper_case = (zen_of_P.upper()) print(upper_case) replacing_i = (zen_of_P.replace('i','&')) print(replacing_i)", "\"\"\"Beautiful is better than ugly. Explicit is better than implicit. Simple is better", "do more of those!\"\"\" better_number = (zen_of_P.count(\"better\")) is_number = (zen_of_P.count(\"is\")) never_number = (zen_of_P.count(\"never\"))", "the implementation is easy to explain, it may be a good idea. Namespaces", "often better than *right* now. If the implementation is hard to explain, it's", "is better than complicated. Flat is better than nested. Sparse is better than", "dense. Readability counts. Special cases aren't special enough to break the rules. Although", "is better than complex. Complex is better than complicated. Flat is better than", "zen_of_P = \"\"\"Beautiful is better than ugly. Explicit is better than implicit. Simple", "is better than dense. Readability counts. Special cases aren't special enough to break", "than implicit. Simple is better than complex. Complex is better than complicated. Flat", "never. Although never is often better than *right* now. If the implementation is", "aren't special enough to break the rules. Although practicality beats purity. Errors should" ]
[ "class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a class-scoped tab context and binds", "@pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return True @pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates", "pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result)", "browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser instance that", "You may also provide a list of urls to visit to clear cookies", "traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\"", "group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the directory where", "exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for log in exception.logs] if", "yield report = outcome.get_result() if report.when == \"call\": doc = getattr(getattr(item, \"function\", None),", "test.\" ) doc = None test_name = f\"{request.node.name}\" outcome = Outcome.never_started # TODO:", "Only instantiated if it is used, but by default will be used in", "outcome = Outcome.failure if call_summary.report.failed else Outcome.success if call_summary and call_summary.excinfo and not", "= [] timer: Timed with Timed() as timer: yield call_summary = getattr(request.node, \"report_result\",", "chrome_options fixture.\"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser,", "the configured chrome_options fixture.\"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\")", "Aggregate worker reports into this \"root\" report. for result_file in [os.path.join(report_dir, f) for", "need a fresh instance each test, you can set `disable_session_browser=1` in your environment.", "class as 'self.browser'; this tab will close once all tests in the class", "List, Optional, Type, Union import pytest from pydantic import BaseSettings, validator from selenium", "if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this may significantly decrease test performance.\") @pytest.fixture(scope=\"session\")", "it is used, but by default will be used in both the 'browser'", "browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\") def report_dir(request):", "browser.delete_all_cookies() for url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling", "you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: request.cls.browser", "import BaseSettings, validator from selenium import webdriver from .browser import BrowserError, BrowserRecorder, Chrome,", "\"call\": doc = getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc)", "the requesting class as 'self.browser'; this tab will close once all tests in", "the session scope, so you only need to use this if you are", "but by default will be used in both the 'browser' and 'class_browser' fixtures,", "others should be part of 'pytest_addoption()' \"\"\" # If set to True, will", "= Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If there are other workers,", "exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file:", "urls to visit to clear cookies at the end of your session, if", "title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty string or None\"\"\"", "browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]: \"\"\" This fixture allows", "args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\")", "browser_context() -> Callable[..., Chrome]: \"\"\" This fixture allows you to create a fresh", "BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or []", "[] try: yield browser finally: browser.delete_all_cookies() for url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab()", "_SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this may significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def", "return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser instance", "f\"Test {request.node} reported no outcomes; \" f\"this usually indicates a fixture caused an", "dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter() workers =", "extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override it", "outcome = Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception,", "\"\"\" An extensible instance of ChromeOptions with default options configured for a balance", "selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty string or None\"\"\" value = request.config.getoption(\"selenium_server\") if", "create a fresh context for a given browser instance. The default behavior of", "ReportResult, TestResult, Timed from .report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__)", "has a significant performance impact, # but sometimes cannot be avoided. disable_session_browser: Optional[bool]", "following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" ) def inner()", "its ability). If you need a fresh browser instance for each class, you", "fixture. \"\"\" browser = build_browser() request.cls.browser = browser try: yield browser finally: browser.quit()", "into this \"root\" report. for result_file in [os.path.join(report_dir, f) for f in worker_results]:", "# When Foo is bar # \"\"\" # def test_a_baz(self): # \"\"\"and baz", "of the `browser` fixture is to always run in a context of the", "Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns a", "str]]]: args = {\"options\": chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\")", "tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for log in exception.logs] if not", "fresh instance of the browser for use in the requesting class, using the", "to (eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path", "= ReportExporter() workers = list(f for f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results =", "each test. # This has a significant performance impact, # but sometimes cannot", ") @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots =", "used. \" \"You may also provide a constant default by overriding the report_title", "a context of the session scope, so you only need to use this", "session, if the default 'delete_all_cookies' behavior is not enough to cover your use", "= browser try: yield browser finally: browser.quit() else: logger.info( \"Enabling auto-use of 'session_browser';", "you need a fresh instance each test, you can set `disable_session_browser=1` in your", "used, but by default will be used in both the 'browser' and 'class_browser'", "def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh instance of the browser using the", "args = [] if len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started,", "= browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\") def", "set to True, will generate a new browser instance within every request #", "browser for use in the requesting class, using the configure chrome_options fixture. \"\"\"", "other workers, only export the report json of the # current worker. The", "if not v: return None return v _SETTINGS = EnvSettings() def pytest_addoption(parser): group", "cleans up after itself (to the best of its ability). If you need", "instance each test, you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser)", "of your session, if the default 'delete_all_cookies' behavior is not enough to cover", "request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\")", "logger.info( \"Browser generator will build instances using the following settings:\\n\" f\" Browser class:", "localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the", "is kept open for the entire test run. Only instantiated if it is", "and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\" Print", "instance of ChromeOptions with default options configured for a balance between performance and", "as browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates", "browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser generator will build instances using the following", "browser try: yield browser finally: browser.quit() else: logger.info( \"Enabling auto-use of 'session_browser'; if", "and 'class_browser' fixtures, unless \"disable_session_browser=1\" is set in the environment. \"\"\" browser =", "this tab will close once all tests in the class have run, and", "class TestFooBar: # \"\"\" # When Foo is bar # \"\"\" # def", "def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver", "dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional title for your report; if not provided,", "`disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: request.cls.browser = browser yield", "parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect to (eg", "between # performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options", "webdriver.ChromeOptions: \"\"\" An extensible instance of ChromeOptions with default options configured for a", "import os import sys import tempfile from contextlib import contextmanager from typing import", "this if you are not using (or are overriding) the `browser` fixture. The", "is not enough to cover your use case. \"\"\" @contextmanager def inner(browser: BrowserRecorder,", "def browser_context() -> Callable[..., Chrome]: \"\"\" This fixture allows you to create a", "ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override it entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions:", "each class, you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as", "Timed() as timer: yield call_summary = getattr(request.node, \"report_result\", None) if call_summary: doc =", "worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If there are", "if value: return value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\" An", "yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates a", "worker. The last worker running will be responsible for aggregating and reporting results.", "Outcome.never_started # TODO: Figure out a way to include class docs if they", "finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]: \"\"\" This fixture allows you", "connect to (eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The", "tab context for the session_browser which cleans up after itself (to the best", "report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter", "worker running will be responsible for aggregating and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\")", "if not tb: tb = f\"{exception_msg}\\n(No traceback is available)\" else: logging.error( f\"Test {request.node}", "autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for", "browser instance that is kept open for the entire test run. Only instantiated", "if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if", "\"\"\" outcome = yield report = outcome.get_result() if report.when == \"call\": doc =", "class-scoped tab context and binds it to the requesting class as 'self.browser'; this", "with browser_context(session_browser) as browser: request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() ->", "baz is bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb,", "contextmanager from typing import Callable, Dict, List, Optional, Type, Union import pytest from", "instance that is kept open for the entire test run. Only instantiated if", "in your environment. \"\"\" with browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request,", "and call_summary.excinfo and not tb: outcome = Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg", "@pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\") def", "context and binds it to the requesting class as 'self.browser'; this tab will", "in the requesting class, using the configure chrome_options fixture. \"\"\" browser = build_browser()", "for log in exception.logs] if not tb: tb = f\"{exception_msg}\\n(No traceback is available)\"", "binds it to the requesting class as 'self.browser'; this tab will close once", "return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser generator will", "Figure out a way to include class docs if they exist # class", "handle_empty_string(cls, v): if not v: return None return v _SETTINGS = EnvSettings() def", "Our default options promote a balance between # performance and test isolation. options.add_argument(\"--headless\")", "balance between performance and test isolation. You can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options)", "This gives us hooks from which to report status post test-run. \"\"\" outcome", "outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty string or", "tb = f\"{exception_msg}\\n(No traceback is available)\" else: logging.error( f\"Test {request.node} reported no outcomes;", "entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\" options = webdriver.ChromeOptions() #", "Optional[str]: \"\"\"Returns a non-empty string or None\"\"\" value = request.config.getoption(\"selenium_server\") if value: return", "@pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:])", "it entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\" options = webdriver.ChromeOptions()", "fixture.\"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request)", "\"\"\"Creates a fresh instance of the browser using the configured chrome_options fixture.\"\"\" browser", "context of the session scope, so you only need to use this if", "will close once all tests in the class have run, and will clean", "os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f for f in os.listdir(report_dir) if f.endswith(\".result.json\")) if", "there are other workers, only export the report json of the # current", "where artifacts should be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), )", "value: return value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible", "Without this, the results of the test will not be saved. \"\"\" tb", "docs if they exist # class TestFooBar: # \"\"\" # When Foo is", "code that must be conditionally loaded; all others should be part of 'pytest_addoption()'", "test-run. \"\"\" outcome = yield report = outcome.get_result() if report.when == \"call\": doc", "running will be responsible for aggregating and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True)", "BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser", "def session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_,", "and not tb: outcome = Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}:", "your environment.\" ) @pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a function-scoped", "browser instance. The default behavior of the `browser` fixture is to always run", "\"\"\" with browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) ->", "default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the directory where artifacts should be stored.\",", "-> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or [] try: yield browser finally: browser.delete_all_cookies()", "report status post test-run. \"\"\" outcome = yield report = outcome.get_result() if report.when", "None return v _SETTINGS = EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\",", "will be used in both the 'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\" is", "to the requesting class as 'self.browser'; this tab will close once all tests", "of the test will not be saved. \"\"\" tb = None console_logs =", "the entire test run. Only instantiated if it is used, but by default", "group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder", "fixture itself simply passes the context manager, so you can use it like", "import contextmanager from typing import Callable, Dict, List, Optional, Type, Union import pytest", "help=\"The path to the directory where artifacts should be stored.\", ) group.addoption( \"--jinja-template\",", "return None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible instance of ChromeOptions", "= request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\",", "`browser` fixture. The fixture itself simply passes the context manager, so you can", "constant default by overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir", "a list of urls to visit to clear cookies at the end of", "to visit to clear cookies at the end of your session, if the", "to report_file after a test run. Without this, the results of the test", "a test run. Without this, the results of the test will not be", "f\"{request.node.name}\" outcome = Outcome.never_started # TODO: Figure out a way to include class", "string or None\"\"\" value = request.config.getoption(\"selenium_server\") if value: return value.strip() return None @pytest.fixture(scope=\"session\")", "os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers: test_report.outcome = Outcome.success # Aggregate worker reports", ".models import Outcome, Report, ReportResult, TestResult, Timed from .report_exporter import ReportExporter _here =", "current worker. The last worker running will be responsible for aggregating and reporting", "else: logging.error( f\"Test {request.node} reported no outcomes; \" f\"this usually indicates a fixture", "instances using the following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\"", "as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter() workers = list(f", "from .report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\"", "test run. Only instantiated if it is used, but by default will be", "a fresh instance of the browser for use in the requesting class, using", "fresh instance each test, you can set `disable_session_browser=1` in your environment. \"\"\" with", "If there are other workers, only export the report json of the #", "-> BrowserRecorder: \"\"\" Creates a class-scoped tab context and binds it to the", "test will not be saved. \"\"\" tb = None console_logs = [] timer:", "required for code that must be conditionally loaded; all others should be part", "BrowserRecorder: \"\"\" Creates a fresh instance of the browser for use in the", "@pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options} if", "try disabling \" \"by setting 'disable_session_browser=1' in your environment.\" ) @pytest.fixture def browser(session_browser,", "the # current worker. The last worker running will be responsible for aggregating", "browser finally: browser.quit() else: logger.info( \"Enabling auto-use of 'session_browser'; if your tests appear", "to report status post test-run. \"\"\" outcome = yield report = outcome.get_result() if", "behavior is not enough to cover your use case. \"\"\" @contextmanager def inner(browser:", "non-empty string or None\"\"\" value = request.config.getoption(\"selenium_server\") if value: return value.strip() return None", "test_report(report_title) -> Report: args = [] if len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report(", "= call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed else Outcome.success if call_summary and call_summary.excinfo", "BrowserRecorder: \"\"\" A browser instance that is kept open for the entire test", "your environment. \"\"\" with browser_context(session_browser) as browser: request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\")", "default by overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir =", "len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def", "if f.startswith(\"worker.\")) worker_results = list(f for f in os.listdir(report_dir) if f.endswith(\".result.json\")) if not", "try: yield browser finally: browser.delete_all_cookies() for url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return", "@pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh instance of the browser using", "to connect to (eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")),", "use it like so: def test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu') You may", "test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This gives us hooks from which", "\"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect to (eg localhost:4444)\", )", "@pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This gives us hooks from which to", "and translates truthy/falsey strings into bools. Only required for code that must be", "to use this if you are not using (or are overriding) the `browser`", "browser_context(session_browser) as browser: request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool:", "def test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu') You may also provide a list", "request) -> BrowserRecorder: \"\"\" Creates a fresh instance of the browser for use", "This has a significant performance impact, # but sometimes cannot be avoided. disable_session_browser:", "a balance between performance and test isolation. You can extend this: @pytest.fixture(scope='session') def", "{\"options\": chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) ->", "\"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() ->", "= yield report = outcome.get_result() if report.when == \"call\": doc = getattr(getattr(item, \"function\",", "function-scoped tab context for the session_browser which cleans up after itself (to the", "class docs if they exist # class TestFooBar: # \"\"\" # When Foo", "exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs", "in your environment.\" ) @pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a", "'delete_all_cookies' behavior is not enough to cover your use case. \"\"\" @contextmanager def", "yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a class-scoped", "args = {\"options\": chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def", "logger.info( \"Enabling auto-use of 'session_browser'; if your tests appear stuck, try disabling \"", "Optional, Type, Union import pytest from pydantic import BaseSettings, validator from selenium import", "using the configure chrome_options fixture. \"\"\" browser = build_browser() request.cls.browser = browser try:", "\"root\" report. for result_file in [os.path.join(report_dir, f) for f in worker_results]: worker_report =", "console_logs = [] timer: Timed with Timed() as timer: yield call_summary = getattr(request.node,", "can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: request.cls.browser =", "webdriver.ChromeOptions() # Our default options promote a balance between # performance and test", "logging.error( f\"Test {request.node} reported no outcomes; \" f\"this usually indicates a fixture caused", "# \"\"\" # def test_a_baz(self): # \"\"\"and baz is bop\"\"\" # do_work('bop') #", "disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v): if not v:", "@pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser generator will build instances", "import pytest from pydantic import BaseSettings, validator from selenium import webdriver from .browser", "\" \"You may also provide a constant default by overriding the report_title fixture.\",", "if not workers: test_report.outcome = Outcome.success # Aggregate worker reports into this \"root\"", "pytest_runtest_makereport(item, call): \"\"\" This gives us hooks from which to report status post", "and will clean up after itself (to the best of its ability). If", "options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions,", "if it is used, but by default will be used in both the", "clean up after itself (to the best of its ability). If you need", "# but sometimes cannot be avoided. disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True, always=True)", "return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns", "= Outcome.never_started # TODO: Figure out a way to include class docs if", "call): \"\"\" This gives us hooks from which to report status post test-run.", "Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[...,", "Report: args = [] if len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args),", "group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional title for your report;", "browser: request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return False", "action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect to (eg localhost:4444)\", ) group.addoption(", "Callable, Dict, List, Optional, Type, Union import pytest from pydantic import BaseSettings, validator", "The last worker running will be responsible for aggregating and reporting results. exporter.export_json(test_report,", "a class-scoped tab context and binds it to the requesting class as 'self.browser';", "\"\"\" Print the results to report_file after a test run. Without this, the", "return chrome_options or override it entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return ChromeOptions()", "BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for log in", "TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = []", "yield test_report.stop_timer() exporter = ReportExporter() workers = list(f for f in os.listdir(report_dir) if", "If you need a fresh instance each test, you can set `disable_session_browser=1` in", "translates truthy/falsey strings into bools. Only required for code that must be conditionally", "-> ChromeOptions: return ChromeOptions() \"\"\" options = webdriver.ChromeOptions() # Our default options promote", "= TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs =", "they exist # class TestFooBar: # \"\"\" # When Foo is bar #", "os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report: args = [] if len(sys.argv)", "from contextlib import contextmanager from typing import Callable, Dict, List, Optional, Type, Union", "will be responsible for aggregating and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def", "tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter() workers", "overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\")", "-> BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\" A", "overriding) the `browser` fixture. The fixture itself simply passes the context manager, so", "test_a_baz(self): # \"\"\"and baz is bop\"\"\" # do_work('bop') # The report output should", "old_screenshots = os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title)", ") def inner() -> BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) ->", "= False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v): if not v: return None", "report; if not provided, a default will be used. \" \"You may also", "instantiated if it is used, but by default will be used in both", "need to use this if you are not using (or are overriding) the", "= EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote", "configure chrome_options fixture. \"\"\" browser = build_browser() request.cls.browser = browser try: yield browser", ") @pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a function-scoped tab context", "performance impact, # but sometimes cannot be avoided. disable_session_browser: Optional[bool] = False @validator(\"*\",", "are other workers, only export the report json of the # current worker.", "itself simply passes the context manager, so you can use it like so:", "generate a new browser instance within every request # for a given scope,", "performance and test isolation. You can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions:", "a default will be used. \" \"You may also provide a constant default", "browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh instance of the browser using the configured", "request.cls.browser = browser try: yield browser finally: browser.quit() else: logger.info( \"Enabling auto-use of", "results of the test will not be saved. \"\"\" tb = None console_logs", "value = request.config.getoption(\"selenium_server\") if value: return value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options() ->", "error when setting up the test.\" ) doc = None test_name = f\"{request.node.name}\"", "hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This gives us hooks from which to report", "\".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty string", "def class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a class-scoped tab context and", "after a test run. Without this, the results of the test will not", "outcomes; \" f\"this usually indicates a fixture caused an error when setting up", "of urls to visit to clear cookies at the end of your session,", "= build_browser() request.cls.browser = browser try: yield browser finally: browser.quit() else: logger.info( \"Enabling", "to always run in a context of the session scope, so you only", "tests in the class have run, and will clean up after itself (to", "test_name = call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed else Outcome.success if call_summary and", "browser instance within every request # for a given scope, instead of only", "for your report; if not provided, a default will be used. \" \"You", "title for your report; if not provided, a default will be used. \"", "def test_a_baz(self): # \"\"\"and baz is bop\"\"\" # do_work('bop') # The report output", "reports into this \"root\" report. for result_file in [os.path.join(report_dir, f) for f in", "@contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab() cookie_urls =", "build_browser() request.cls.browser = browser try: yield browser finally: browser.quit() else: logger.info( \"Enabling auto-use", "session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a class-scoped tab context and binds it", "call_summary = getattr(request.node, \"report_result\", None) if call_summary: doc = call_summary.doc test_name = call_summary.report.nodeid", ") doc = None test_name = f\"{request.node.name}\" outcome = Outcome.never_started # TODO: Figure", "at the end of your session, if the default 'delete_all_cookies' behavior is not", "os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def", "for a given browser instance. The default behavior of the `browser` fixture is", "args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return", "for result_file in [os.path.join(report_dir, f) for f in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results)", "use this if you are not using (or are overriding) the `browser` fixture.", "f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\",", "caused an error when setting up the test.\" ) doc = None test_name", "which to report status post test-run. \"\"\" outcome = yield report = outcome.get_result()", "f.endswith(\".result.json\")) if not workers: test_report.outcome = Outcome.success # Aggregate worker reports into this", "os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the directory where artifacts should be stored.\", )", "Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return", "appear stuck, try disabling \" \"by setting 'disable_session_browser=1' in your environment.\" ) @pytest.fixture", "class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates a fresh instance of the browser for", "return True @pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh instance of the", "def class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates a fresh instance of the browser", "by default will be used in both the 'browser' and 'class_browser' fixtures, unless", "BrowserRecorder: \"\"\" Creates a class-scoped tab context and binds it to the requesting", "'pytest_addoption()' \"\"\" # If set to True, will generate a new browser instance", "import Callable, Dict, List, Optional, Type, Union import pytest from pydantic import BaseSettings,", "Outcome, Report, ReportResult, TestResult, Timed from .report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger", "variables and translates truthy/falsey strings into bools. Only required for code that must", "-> BrowserRecorder: \"\"\"Creates a fresh instance of the browser using the configured chrome_options", "its ability). If you need a fresh instance each test, you can set", "for aggregating and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report):", "@pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def", "An extensible instance of ChromeOptions with default options configured for a balance between", "-> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override it entirely: @pytest.fixture(scope='session') def chrome_options() ->", "from which to report status post test-run. \"\"\" outcome = yield report =", "logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives from environment variables and translates truthy/falsey strings", "provide a list of urls to visit to clear cookies at the end", "be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\",", ".browser import BrowserError, BrowserRecorder, Chrome, Remote from .models import Outcome, Report, ReportResult, TestResult,", "to clear cookies at the end of your session, if the default 'delete_all_cookies'", "cookie_urls = cookie_urls or [] try: yield browser finally: browser.delete_all_cookies() for url in", "-> webdriver.ChromeOptions: \"\"\" An extensible instance of ChromeOptions with default options configured for", "for the entire test run. Only instantiated if it is used, but by", "a function-scoped tab context for the session_browser which cleans up after itself (to", "webdriver to connect to (eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(),", "options configured for a balance between performance and test isolation. You can extend", "significant performance impact, # but sometimes cannot be avoided. disable_session_browser: Optional[bool] = False", "worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter() workers = list(f for", "(to the best of its ability). If you need a fresh instance each", "requesting class, using the configure chrome_options fixture. \"\"\" browser = build_browser() request.cls.browser =", "list(f for f in os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers: test_report.outcome = Outcome.success", "def test_report(report_title) -> Report: args = [] if len(sys.argv) > 1: args.extend(sys.argv[1:]) return", "You can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or", "= f\"{exception_msg}\\n(No traceback is available)\" else: logging.error( f\"Test {request.node} reported no outcomes; \"", "to include class docs if they exist # class TestFooBar: # \"\"\" #", "bool: return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_", "if call_summary.report.failed else Outcome.success if call_summary and call_summary.excinfo and not tb: outcome =", "to the directory where artifacts should be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\",", "not provided, a default will be used. \" \"You may also provide a", "build instances using the following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\" Browser args:", "else: logger.info( \"Enabling auto-use of 'session_browser'; if your tests appear stuck, try disabling", "are overriding) the `browser` fixture. The fixture itself simply passes the context manager,", "\"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the directory where artifacts", "as 'self.browser'; this tab will close once all tests in the class have", "report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report):", "= None console_logs = [] timer: Timed with Timed() as timer: yield call_summary", "result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs", "configured for a balance between performance and test isolation. You can extend this:", "@pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\" options = webdriver.ChromeOptions() # Our", "unless \"disable_session_browser=1\" is set in the environment. \"\"\" browser = build_browser() try: yield", "visit to clear cookies at the end of your session, if the default", "= f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs =", "Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or [] try: yield", "aggregating and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\"", "logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives from environment variables and translates", "do_work('bop') # The report output should then read \"When foo is bar and", "is to always run in a context of the session scope, so you", "`disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\") def", "= call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if exception.orig: tb =", "in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser',", "the following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" ) def", "will not be saved. \"\"\" tb = None console_logs = [] timer: Timed", "a single instance and generating # contexts for each test. # This has", "path to the directory where artifacts should be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\",", "{dict(browser_args)}\" ) def inner() -> BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser)", "test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter =", "significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return True @pytest.fixture def", "hooks from which to report status post test-run. \"\"\" outcome = yield report", "which cleans up after itself (to the best of its ability). If you", "all tests in the class have run, and will clean up after itself", "generating # contexts for each test. # This has a significant performance impact,", "arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty", "the test will not be saved. \"\"\" tb = None console_logs = []", "call_summary.report.failed else Outcome.success if call_summary and call_summary.excinfo and not tb: outcome = Outcome.failure", "Type, Union import pytest from pydantic import BaseSettings, validator from selenium import webdriver", "= os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir,", "exporter = ReportExporter() workers = list(f for f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results", "autouse=True) def test_report(report_title) -> Report: args = [] if len(sys.argv) > 1: args.extend(sys.argv[1:])", "with default options configured for a balance between performance and test isolation. You", "TestResult, Timed from .report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class", "tab context and binds it to the requesting class as 'self.browser'; this tab", "setting up the test.\" ) doc = None test_name = f\"{request.node.name}\" outcome =", "# for a given scope, instead of only creating a single instance and", "exist # class TestFooBar: # \"\"\" # When Foo is bar # \"\"\"", "bar # \"\"\" # def test_a_baz(self): # \"\"\"and baz is bop\"\"\" # do_work('bop')", "is bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs,", ".report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically", "browser_context) -> BrowserRecorder: \"\"\" Creates a function-scoped tab context for the session_browser which", "typing import Callable, Dict, List, Optional, Type, Union import pytest from pydantic import", "@pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report: args = [] if len(sys.argv) > 1:", "\"When foo is bar and baz is bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name,", "is bop\"\"\" # do_work('bop') # The report output should then read \"When foo", "case. \"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab()", "test, you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser:", "\"Browser generator will build instances using the following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\"", "configured chrome_options fixture.\"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def", "from .browser import BrowserError, BrowserRecorder, Chrome, Remote from .models import Outcome, Report, ReportResult,", "between performance and test isolation. You can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) ->", "for each class, you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser)", "url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of", "options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options}", "directory where artifacts should be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"),", "chrome_options fixture. \"\"\" browser = build_browser() request.cls.browser = browser try: yield browser finally:", "@pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\")", "`browser` fixture is to always run in a context of the session scope,", "EnvSettings(BaseSettings): \"\"\" Automatically derives from environment variables and translates truthy/falsey strings into bools.", "the context manager, so you can use it like so: def test_something(browser_context): with", "bools. Only required for code that must be conditionally loaded; all others should", "you can use it like so: def test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu')", "browser.quit() else: logger.info( \"Enabling auto-use of 'session_browser'; if your tests appear stuck, try", "a fresh instance each test, you can set `disable_session_browser=1` in your environment. \"\"\"", "of its ability). If you need a fresh browser instance for each class,", "by overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir,", "in your environment. \"\"\" with browser_context(session_browser) as browser: request.cls.browser = browser yield browser", "= None) -> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or [] try: yield browser", "the results of the test will not be saved. \"\"\" tb = None", "of 'session_browser', this may significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool:", "console_logs = [log.get(\"message\", \"\") for log in exception.logs] if not tb: tb =", "\" f\"this usually indicates a fixture caused an error when setting up the", "selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"):", "test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True)", "def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls", "BrowserRecorder: \"\"\" Creates a function-scoped tab context for the session_browser which cleans up", "[] if len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, )", "def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser generator will build instances using", "the browser using the configured chrome_options fixture.\"\"\" browser = build_browser() try: yield browser", "\"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\",", "a significant performance impact, # but sometimes cannot be avoided. disable_session_browser: Optional[bool] =", "fresh context for a given browser instance. The default behavior of the `browser`", "also provide a list of urls to visit to clear cookies at the", "browser finally: browser.delete_all_cookies() for url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if", "fresh browser instance for each class, you can set `disable_session_browser=1` in your environment.", "-> Optional[str]: \"\"\"Returns a non-empty string or None\"\"\" value = request.config.getoption(\"selenium_server\") if value:", "> 1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request)", "browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this may", "new browser instance within every request # for a given scope, instead of", "browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ =", "try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates", "if f.endswith(\".result.json\")) if not workers: test_report.outcome = Outcome.success # Aggregate worker reports into", "= call_summary.doc test_name = call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed else Outcome.success if", "if report.when == \"call\": doc = getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result =", "\"webdriver-report\")), help=\"The path to the directory where artifacts should be stored.\", ) group.addoption(", "default will be used. \" \"You may also provide a constant default by", "will build instances using the following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\" Browser", "fixtures, unless \"disable_session_browser=1\" is set in the environment. \"\"\" browser = build_browser() try:", ") group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional title for your", "finally: browser.delete_all_cookies() for url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser:", "up after itself (to the best of its ability). If you need a", "\"\"\" Creates a class-scoped tab context and binds it to the requesting class", "test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True,", "the configure chrome_options fixture. \"\"\" browser = build_browser() request.cls.browser = browser try: yield", "chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\" options = webdriver.ChromeOptions() # Our default options", "A browser instance that is kept open for the entire test run. Only", "yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]: \"\"\" This fixture", "TODO: Figure out a way to include class docs if they exist #", "baz is bop\"\"\" # do_work('bop') # The report output should then read \"When", "is used, but by default will be used in both the 'browser' and", "post test-run. \"\"\" outcome = yield report = outcome.get_result() if report.when == \"call\":", "return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\",", "of the # current worker. The last worker running will be responsible for", "== \"call\": doc = getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo,", "@pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty string or None\"\"\" value =", "for a given scope, instead of only creating a single instance and generating", "for code that must be conditionally loaded; all others should be part of", "\"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab() cookie_urls", "Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If there are other workers, only", "group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect", "\"You may also provide a constant default by overriding the report_title fixture.\", )", "-> Callable[..., Chrome]: \"\"\" This fixture allows you to create a fresh context", "Creates a class-scoped tab context and binds it to the requesting class as", "and generating # contexts for each test. # This has a significant performance", "worker_results = list(f for f in os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers: test_report.outcome", "outcome.get_result() if report.when == \"call\": doc = getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result", "output should then read \"When foo is bar and baz is bop\" result", "True, will generate a new browser instance within every request # for a", "This fixture allows you to create a fresh context for a given browser", "be saved. \"\"\" tb = None console_logs = [] timer: Timed with Timed()", "environment.\" ) @pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a function-scoped tab", "selenium webdriver to connect to (eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\",", "f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f for f in os.listdir(report_dir) if", "return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return Chrome", "for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report: args", "tb: outcome = Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if", "foo is bar and baz is bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc,", "def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a function-scoped tab context for the", "tb = None console_logs = [] timer: Timed with Timed() as timer: yield", "part of 'pytest_addoption()' \"\"\" # If set to True, will generate a new", "Report, ReportResult, TestResult, Timed from .report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger =", "chrome_options or override it entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\"", "but sometimes cannot be avoided. disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True, always=True) def", "build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]: \"\"\"", "the `browser` fixture is to always run in a context of the session", "browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options} if selenium_server: args[\"command_executor\"]", "f\"{exception_msg}\\n(No traceback is available)\" else: logging.error( f\"Test {request.node} reported no outcomes; \" f\"this", "BrowserRecorder, Chrome, Remote from .models import Outcome, Report, ReportResult, TestResult, Timed from .report_exporter", "a fresh browser instance for each class, you can set `disable_session_browser=1` in your", "logging import os import sys import tempfile from contextlib import contextmanager from typing", "Browser args: {dict(browser_args)}\" ) def inner() -> BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\")", "browser_context) -> BrowserRecorder: \"\"\" Creates a class-scoped tab context and binds it to", "may also provide a constant default by overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\",", "setting 'disable_session_browser=1' in your environment.\" ) @pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\"", "be responsible for aggregating and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator,", "logger.warning(\"Disabling auto-use of 'session_browser', this may significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled()", "doc = None test_name = f\"{request.node.name}\" outcome = Outcome.never_started # TODO: Figure out", "default 'delete_all_cookies' behavior is not enough to cover your use case. \"\"\" @contextmanager", "def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png", "a given scope, instead of only creating a single instance and generating #", "tests appear stuck, try disabling \" \"by setting 'disable_session_browser=1' in your environment.\" )", "result_file in [os.path.join(report_dir, f) for f in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file)", "action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An", "report_file after a test run. Without this, the results of the test will", "test_name = f\"{request.node.name}\" outcome = Outcome.never_started # TODO: Figure out a way to", "BrowserRecorder]: logger.info( \"Browser generator will build instances using the following settings:\\n\" f\" Browser", "[os.path.join(report_dir, f) for f in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir)", "available)\" else: logging.error( f\"Test {request.node} reported no outcomes; \" f\"this usually indicates a", "-> Callable[..., BrowserRecorder]: logger.info( \"Browser generator will build instances using the following settings:\\n\"", "= logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives from environment variables and translates truthy/falsey", "can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: yield browser", "# If there are other workers, only export the report json of the", "os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png))", "return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this may significantly decrease test", "impact, # but sometimes cannot be avoided. disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True,", "it like so: def test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu') You may also", "(to the best of its ability). If you need a fresh browser instance", "with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter()", "@pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates a fresh instance of the", "the requesting class, using the configure chrome_options fixture. \"\"\" browser = build_browser() request.cls.browser", "set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: request.cls.browser = browser", "EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium", "fresh instance of the browser using the configured chrome_options fixture.\"\"\" browser = build_browser()", "a non-empty string or None\"\"\" value = request.config.getoption(\"selenium_server\") if value: return value.strip() return", "dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the directory where artifacts should be", "# Our default options promote a balance between # performance and test isolation.", "with browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) -> BrowserRecorder:", "in both the 'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\" is set in the", "of 'pytest_addoption()' \"\"\" # If set to True, will generate a new browser", "outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def", "log in exception.logs] if not tb: tb = f\"{exception_msg}\\n(No traceback is available)\" else:", "Outcome.success if call_summary and call_summary.excinfo and not tb: outcome = Outcome.failure exception: BaseException", "set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\")", "must be conditionally loaded; all others should be part of 'pytest_addoption()' \"\"\" #", "old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report: args = [] if", "action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional title for your report; if not", "inner() -> BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\"", "browser: browser.get('https://www.uw.edu') You may also provide a list of urls to visit to", "def pytest_runtest_makereport(item, call): \"\"\" This gives us hooks from which to report status", "should be part of 'pytest_addoption()' \"\"\" # If set to True, will generate", "Dict, List, Optional, Type, Union import pytest from pydantic import BaseSettings, validator from", "chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options} if selenium_server: args[\"command_executor\"] =", "@pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser instance that is kept open", "browser.open_tab() cookie_urls = cookie_urls or [] try: yield browser finally: browser.delete_all_cookies() for url", "the environment. \"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def", "environment. \"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context()", "'session_browser'; if your tests appear stuck, try disabling \" \"by setting 'disable_session_browser=1' in", "report. for result_file in [os.path.join(report_dir, f) for f in worker_results]: worker_report = Report.parse_file(result_file)", "class EnvSettings(BaseSettings): \"\"\" Automatically derives from environment variables and translates truthy/falsey strings into", "= Outcome.failure if call_summary.report.failed else Outcome.success if call_summary and call_summary.excinfo and not tb:", "Browser class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" ) def inner() -> BrowserRecorder: return", "start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item,", "your use case. \"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) ->", "# class TestFooBar: # \"\"\" # When Foo is bar # \"\"\" #", "\"\"\"Returns a non-empty string or None\"\"\" value = request.config.getoption(\"selenium_server\") if value: return value.strip()", "may also provide a list of urls to visit to clear cookies at", "the end of your session, if the default 'delete_all_cookies' behavior is not enough", "environment. \"\"\" with browser_context(session_browser) as browser: request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\") def", "this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override it entirely:", "{request.node} reported no outcomes; \" f\"this usually indicates a fixture caused an error", "def handle_empty_string(cls, v): if not v: return None return v _SETTINGS = EnvSettings()", "options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args =", "a fresh context for a given browser instance. The default behavior of the", "(or are overriding) the `browser` fixture. The fixture itself simply passes the context", "v): if not v: return None return v _SETTINGS = EnvSettings() def pytest_addoption(parser):", "itself (to the best of its ability). If you need a fresh instance", "If you need a fresh browser instance for each class, you can set", "autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield", "# \"\"\" # When Foo is bar # \"\"\" # def test_a_baz(self): #", "not be saved. \"\"\" tb = None console_logs = [] timer: Timed with", "\"\") for log in exception.logs] if not tb: tb = f\"{exception_msg}\\n(No traceback is", "report_test(report_generator, request, test_report): \"\"\" Print the results to report_file after a test run.", "browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates a fresh", "build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser generator will build instances using the", "Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser generator will build", "= list(f for f in os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers: test_report.outcome =", "test. # This has a significant performance impact, # but sometimes cannot be", "try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]: \"\"\" This", "settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" ) def inner() ->", "class, you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser:", "browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this may significantly decrease", "may significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return True @pytest.fixture", "return v _SETTINGS = EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\",", "Outcome.success # Aggregate worker reports into this \"root\" report. for result_file in [os.path.join(report_dir,", "the 'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\" is set in the environment. \"\"\"", "\"disable_session_browser=1\" is set in the environment. \"\"\" browser = build_browser() try: yield browser", "None console_logs = [] timer: Timed with Timed() as timer: yield call_summary =", "timer: yield call_summary = getattr(request.node, \"report_result\", None) if call_summary: doc = call_summary.doc test_name", "\"\"\" # def test_a_baz(self): # \"\"\"and baz is bop\"\"\" # do_work('bop') # The", "so you only need to use this if you are not using (or", "dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with", "context for the session_browser which cleans up after itself (to the best of", "# \"\"\"and baz is bop\"\"\" # do_work('bop') # The report output should then", "provided, a default will be used. \" \"You may also provide a constant", "a given browser instance. The default behavior of the `browser` fixture is to", "@pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\" Print the results to report_file after a", "import sys import tempfile from contextlib import contextmanager from typing import Callable, Dict,", "simply passes the context manager, so you can use it like so: def", "workers, only export the report json of the # current worker. The last", "BrowserRecorder: \"\"\"Creates a fresh instance of the browser using the configured chrome_options fixture.\"\"\"", "chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]:", "\"\"\" # If set to True, will generate a new browser instance within", "fixture allows you to create a fresh context for a given browser instance.", "balance between # performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return", "given browser instance. The default behavior of the `browser` fixture is to always", "-> bool: return True @pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh instance", "-> BrowserRecorder: \"\"\" Creates a fresh instance of the browser for use in", "the class have run, and will clean up after itself (to the best", "class have run, and will clean up after itself (to the best of", "timer: Timed with Timed() as timer: yield call_summary = getattr(request.node, \"report_result\", None) if", "cover your use case. \"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None)", "Creates a function-scoped tab context for the session_browser which cleans up after itself", "# performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\")", "When Foo is bar # \"\"\" # def test_a_baz(self): # \"\"\"and baz is", "override it entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\" options =", "fixture caused an error when setting up the test.\" ) doc = None", "for f in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: #", "else: # If there are other workers, only export the report json of", "Foo is bar # \"\"\" # def test_a_baz(self): # \"\"\"and baz is bop\"\"\"", "contexts for each test. # This has a significant performance impact, # but", "outcome = yield report = outcome.get_result() if report.when == \"call\": doc = getattr(getattr(item,", "derives from environment variables and translates truthy/falsey strings into bools. Only required for", "getattr(request.node, \"report_result\", None) if call_summary: doc = call_summary.doc test_name = call_summary.report.nodeid outcome =", "report = outcome.get_result() if report.when == \"call\": doc = getattr(getattr(item, \"function\", None), \"__doc__\",", "can use it like so: def test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu') You", "os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives from environment variables and", "test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu') You may also provide a list of", "browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a class-scoped tab", "itself (to the best of its ability). If you need a fresh browser", "bool: return True @pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh instance of", "\"\"\"and baz is bop\"\"\" # do_work('bop') # The report output should then read", "if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True)", "def report_test(report_generator, request, test_report): \"\"\" Print the results to report_file after a test", "test_report): \"\"\" Print the results to report_file after a test run. Without this,", "default options promote a balance between # performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\")", "\"\"\" with browser_context(session_browser) as browser: request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled()", "include class docs if they exist # class TestFooBar: # \"\"\" # When", "read \"When foo is bar and baz is bop\" result = TestResult( pngs=BrowserRecorder.pngs,", "request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir)", "using the following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" )", "the directory where artifacts should be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here,", "options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str,", "manager, so you can use it like so: def test_something(browser_context): with browser_context() as", "png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report: args = [] if len(sys.argv) >", "= Outcome.success # Aggregate worker reports into this \"root\" report. for result_file in", "BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or [] try: yield browser finally: browser.delete_all_cookies() for", "environment variables and translates truthy/falsey strings into bools. Only required for code that", "if len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\")", "as timer: yield call_summary = getattr(request.node, \"report_result\", None) if call_summary: doc = call_summary.doc", "The fixture itself simply passes the context manager, so you can use it", "each test, you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as", "-> BrowserRecorder: \"\"\" A browser instance that is kept open for the entire", "allows you to create a fresh context for a given browser instance. The", "-> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\"", "\"\"\" browser = build_browser() request.cls.browser = browser try: yield browser finally: browser.quit() else:", "exception: BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if exception.orig:", "be used in both the 'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\" is set", "test isolation. You can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return", "browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info(", "context manager, so you can use it like so: def test_something(browser_context): with browser_context()", "browser.get('https://www.uw.edu') You may also provide a list of urls to visit to clear", "= [] if len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title,", "cookies at the end of your session, if the default 'delete_all_cookies' behavior is", "options = webdriver.ChromeOptions() # Our default options promote a balance between # performance", "suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter() workers = list(f for f", "provide a constant default by overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def", "not tb: outcome = Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\"", "test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options)", "are not using (or are overriding) the `browser` fixture. The fixture itself simply", "= None test_name = f\"{request.node.name}\" outcome = Outcome.never_started # TODO: Figure out a", "bop\"\"\" # do_work('bop') # The report output should then read \"When foo is", "with Timed() as timer: yield call_summary = getattr(request.node, \"report_result\", None) if call_summary: doc", "ability). If you need a fresh browser instance for each class, you can", "as browser: browser.get('https://www.uw.edu') You may also provide a list of urls to visit", "worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If there are other", "= f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote", "instance for each class, you can set `disable_session_browser=1` in your environment. \"\"\" with", "None) -> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or [] try: yield browser finally:", "behavior of the `browser` fixture is to always run in a context of", "group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect to (eg localhost:4444)\",", "-> Report: args = [] if len(sys.argv) > 1: args.extend(sys.argv[1:]) return Report( arguments=\"", "best of its ability). If you need a fresh instance each test, you", "of 'session_browser'; if your tests appear stuck, try disabling \" \"by setting 'disable_session_browser=1'", "f in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If", "'session_browser', this may significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return", "= {\"options\": chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args)", "traceback is available)\" else: logging.error( f\"Test {request.node} reported no outcomes; \" f\"this usually", "f\"http://{selenium_server}/wd/hub\" return args @pytest.fixture(scope=\"session\") def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return", "kept open for the entire test run. Only instantiated if it is used,", "request # for a given scope, instead of only creating a single instance", "not v: return None return v _SETTINGS = EnvSettings() def pytest_addoption(parser): group =", "False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True)", "default=\"Webdriver Recorder Summary\", help=\"An optional title for your report; if not provided, a", "\"report_result\", None) if call_summary: doc = call_summary.doc test_name = call_summary.report.nodeid outcome = Outcome.failure", "optional title for your report; if not provided, a default will be used.", "test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return True @pytest.fixture def browser(build_browser) ->", "browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a function-scoped tab context for the session_browser", "for a balance between performance and test isolation. You can extend this: @pytest.fixture(scope='session')", "1: args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) ->", "= os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives from environment variables", "report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir):", "to create a fresh context for a given browser instance. The default behavior", "default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional title", "'class_browser' fixtures, unless \"disable_session_browser=1\" is set in the environment. \"\"\" browser = build_browser()", "of only creating a single instance and generating # contexts for each test.", "None) if call_summary: doc = call_summary.doc test_name = call_summary.report.nodeid outcome = Outcome.failure if", "instance within every request # for a given scope, instead of only creating", "# contexts for each test. # This has a significant performance impact, #", "call_summary.excinfo and not tb: outcome = Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg =", "= request.config.getoption(\"selenium_server\") if value: return value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions:", "list of urls to visit to clear cookies at the end of your", "isolation. You can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options", "contextlib import contextmanager from typing import Callable, Dict, List, Optional, Type, Union import", "exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\" Print the results to", "from pydantic import BaseSettings, validator from selenium import webdriver from .browser import BrowserError,", "a fresh instance of the browser using the configured chrome_options fixture.\"\"\" browser =", "def chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible instance of ChromeOptions with default options", "the best of its ability). If you need a fresh browser instance for", "be avoided. disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v): if", "@pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override it entirely: @pytest.fixture(scope='session')", "return None return v _SETTINGS = EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption(", "that must be conditionally loaded; all others should be part of 'pytest_addoption()' \"\"\"", "from typing import Callable, Dict, List, Optional, Type, Union import pytest from pydantic", "Union import pytest from pydantic import BaseSettings, validator from selenium import webdriver from", "import webdriver from .browser import BrowserError, BrowserRecorder, Chrome, Remote from .models import Outcome,", "pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to", "f\" Browser class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" ) def inner() -> BrowserRecorder:", "= build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder:", "auto-use of 'session_browser'; if your tests appear stuck, try disabling \" \"by setting", "ChromeOptions with default options configured for a balance between performance and test isolation.", "return ChromeOptions() \"\"\" options = webdriver.ChromeOptions() # Our default options promote a balance", "set in the environment. \"\"\" browser = build_browser() try: yield browser finally: browser.quit()", "\"\"\" tb = None console_logs = [] timer: Timed with Timed() as timer:", "bar and baz is bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time,", "browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]: \"\"\" This fixture allows you to", "and binds it to the requesting class as 'self.browser'; this tab will close", "inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser instance that is kept", "def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options} if selenium_server:", "auto-use of 'session_browser', this may significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() ->", "you can set `disable_session_browser=1` in your environment. \"\"\" with browser_context(session_browser) as browser: yield", "\"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional title for your report; if", "after itself (to the best of its ability). If you need a fresh", "need a fresh browser instance for each class, you can set `disable_session_browser=1` in", "call_summary: doc = call_summary.doc test_name = call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed else", ") group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the directory", "out a way to include class docs if they exist # class TestFooBar:", "def session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser instance that is kept open for", "report.when == \"call\": doc = getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result = ReportResult(report=report,", "\"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\",", "results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\" Print the results", "BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This gives us", "your environment. \"\"\" with browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser,", "inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this may significantly decrease test performance.\")", "to cover your use case. \"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] =", "this \"root\" report. for result_file in [os.path.join(report_dir, f) for f in worker_results]: worker_report", "scope, so you only need to use this if you are not using", "like so: def test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu') You may also provide", "os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If there are other workers, only export the", "(eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to", "browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class)", "strings into bools. Only required for code that must be conditionally loaded; all", "yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_", "cookie_urls or [] try: yield browser finally: browser.delete_all_cookies() for url in cookie_urls: browser.get(url)", "then read \"When foo is bar and baz is bop\" result = TestResult(", "is set in the environment. \"\"\" browser = build_browser() try: yield browser finally:", ") group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver", "default will be used in both the 'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\"", "in a context of the session scope, so you only need to use", "ability). If you need a fresh instance each test, you can set `disable_session_browser=1`", "conditionally loaded; all others should be part of 'pytest_addoption()' \"\"\" # If set", "request, test_report): \"\"\" Print the results to report_file after a test run. Without", "test run. Without this, the results of the test will not be saved.", "is available)\" else: logging.error( f\"Test {request.node} reported no outcomes; \" f\"this usually indicates", "inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or", "@pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a class-scoped tab context", "way to include class docs if they exist # class TestFooBar: # \"\"\"", "f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for log in exception.logs] if not tb: tb", "fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots", "be used. \" \"You may also provide a constant default by overriding the", "'self.browser'; this tab will close once all tests in the class have run,", "the report json of the # current worker. The last worker running will", "v: return None return v _SETTINGS = EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\")", "json of the # current worker. The last worker running will be responsible", "disabling \" \"by setting 'disable_session_browser=1' in your environment.\" ) @pytest.fixture def browser(session_browser, browser_context)", "cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this", "if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for log in exception.logs]", "os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report:", "a constant default by overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir):", "import Outcome, Report, ReportResult, TestResult, Timed from .report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__))", "Only required for code that must be conditionally loaded; all others should be", "if the default 'delete_all_cookies' behavior is not enough to cover your use case.", "saved. \"\"\" tb = None console_logs = [] timer: Timed with Timed() as", "call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed else Outcome.success if call_summary and call_summary.excinfo and", "Timed with Timed() as timer: yield call_summary = getattr(request.node, \"report_result\", None) if call_summary:", "the session_browser which cleans up after itself (to the best of its ability).", "def inner() -> BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder:", "-> bool: return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return", "ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives from", "enough to cover your use case. \"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]]", "in [os.path.join(report_dir, f) for f in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report,", "= [log.get(\"message\", \"\") for log in exception.logs] if not tb: tb = f\"{exception_msg}\\n(No", "stuck, try disabling \" \"by setting 'disable_session_browser=1' in your environment.\" ) @pytest.fixture def", "[] timer: Timed with Timed() as timer: yield call_summary = getattr(request.node, \"report_result\", None)", "only creating a single instance and generating # contexts for each test. #", "instance and generating # contexts for each test. # This has a significant", "in the class have run, and will clean up after itself (to the", "import logging import os import sys import tempfile from contextlib import contextmanager from", "browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a", "if call_summary and call_summary.excinfo and not tb: outcome = Outcome.failure exception: BaseException =", ") BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This gives", "# The report output should then read \"When foo is bar and baz", "cookie_urls: Optional[List[str]] = None) -> BrowserRecorder: browser.open_tab() cookie_urls = cookie_urls or [] try:", "usually indicates a fixture caused an error when setting up the test.\" )", "report json of the # current worker. The last worker running will be", "chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible instance of ChromeOptions with default options configured", "a new browser instance within every request # for a given scope, instead", "options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]:", "yield browser finally: browser.quit() else: logger.info( \"Enabling auto-use of 'session_browser'; if your tests", "will generate a new browser instance within every request # for a given", "test_report.stop_timer() exporter = ReportExporter() workers = list(f for f in os.listdir(report_dir) if f.startswith(\"worker.\"))", "ChromeOptions: return ChromeOptions() \"\"\" options = webdriver.ChromeOptions() # Our default options promote a", "If set to True, will generate a new browser instance within every request", "isinstance(exception, BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for log", "test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If there are other workers, only export", "loaded; all others should be part of 'pytest_addoption()' \"\"\" # If set to", "of the browser using the configured chrome_options fixture.\"\"\" browser = build_browser() try: yield", "call_summary.doc test_name = call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed else Outcome.success if call_summary", "def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix = \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer()", "options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args", "for the session_browser which cleans up after itself (to the best of its", "request.config.getoption(\"selenium_server\") if value: return value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\"", "status post test-run. \"\"\" outcome = yield report = outcome.get_result() if report.when ==", "always run in a context of the session scope, so you only need", "def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir,", "\"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional title for", "return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix", "workers: test_report.outcome = Outcome.success # Aggregate worker reports into this \"root\" report. for", "given scope, instead of only creating a single instance and generating # contexts", "report_dir) else: # If there are other workers, only export the report json", "Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\": chrome_options} if selenium_server: args[\"command_executor\"] = f\"http://{selenium_server}/wd/hub\" return args", "# This has a significant performance impact, # but sometimes cannot be avoided.", "png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report: args =", "should be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\",", "'disable_session_browser=1' in your environment.\" ) @pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates", "[] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This gives us hooks from", "v _SETTINGS = EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\",", "return inner @pytest.fixture(scope=\"session\") def session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser instance that is", "@pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]: \"\"\" This fixture allows you to create", "instance of the browser for use in the requesting class, using the configure", "into bools. Only required for code that must be conditionally loaded; all others", "\"\"\" Creates a function-scoped tab context for the session_browser which cleans up after", "in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f for f in os.listdir(report_dir) if f.endswith(\".result.json\"))", "yield call_summary = getattr(request.node, \"report_result\", None) if call_summary: doc = call_summary.doc test_name =", "outcome = Outcome.never_started # TODO: Figure out a way to include class docs", "browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context) -> BrowserRecorder: \"\"\"", "# Aggregate worker reports into this \"root\" report. for result_file in [os.path.join(report_dir, f)", "no outcomes; \" f\"this usually indicates a fixture caused an error when setting", "the test.\" ) doc = None test_name = f\"{request.node.name}\" outcome = Outcome.never_started #", "is bar and baz is bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome,", "so: def test_something(browser_context): with browser_context() as browser: browser.get('https://www.uw.edu') You may also provide a", "requesting class as 'self.browser'; this tab will close once all tests in the", "tb: tb = f\"{exception_msg}\\n(No traceback is available)\" else: logging.error( f\"Test {request.node} reported no", "console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This", "worker reports into this \"root\" report. for result_file in [os.path.join(report_dir, f) for f", "doc = getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc) @pytest.fixture(scope=\"session\")", "not enough to cover your use case. \"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls:", "import BrowserError, BrowserRecorder, Chrome, Remote from .models import Outcome, Report, ReportResult, TestResult, Timed", "to True, will generate a new browser instance within every request # for", "Summary\", help=\"An optional title for your report; if not provided, a default will", "default behavior of the `browser` fixture is to always run in a context", "browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates a fresh instance of", "when setting up the test.\" ) doc = None test_name = f\"{request.node.name}\" outcome", "context for a given browser instance. The default behavior of the `browser` fixture", "# current worker. The last worker running will be responsible for aggregating and", "options promote a balance between # performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\")", "Callable[..., BrowserRecorder]: logger.info( \"Browser generator will build instances using the following settings:\\n\" f\"", "call_summary and call_summary.excinfo and not tb: outcome = Outcome.failure exception: BaseException = call_summary.excinfo.value", "Creates a fresh instance of the browser for use in the requesting class,", "also provide a constant default by overriding the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True)", "indicates a fixture caused an error when setting up the test.\" ) doc", "try: yield browser finally: browser.quit() else: logger.info( \"Enabling auto-use of 'session_browser'; if your", "of the browser for use in the requesting class, using the configure chrome_options", "help=\"Remote selenium webdriver to connect to (eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\", dest=\"report_dir\",", "fixture is to always run in a context of the session scope, so", "this may significantly decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return True", "= getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc) @pytest.fixture(scope=\"session\") def", "instance of the browser using the configured chrome_options fixture.\"\"\" browser = build_browser() try:", "# def test_a_baz(self): # \"\"\"and baz is bop\"\"\" # do_work('bop') # The report", "and test isolation. You can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\")", "extensible instance of ChromeOptions with default options configured for a balance between performance", "tempfile from contextlib import contextmanager from typing import Callable, Dict, List, Optional, Type,", "Callable[..., Chrome]: \"\"\" This fixture allows you to create a fresh context for", "end_time=timer.end_time, traceback=tb, console_errors=console_logs, ) BrowserRecorder.pngs = [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call):", "\"Enabling auto-use of 'session_browser'; if your tests appear stuck, try disabling \" \"by", "the browser for use in the requesting class, using the configure chrome_options fixture.", "getattr(getattr(item, \"function\", None), \"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc) @pytest.fixture(scope=\"session\") def report_title(request)", "Outcome.failure if call_summary.report.failed else Outcome.success if call_summary and call_summary.excinfo and not tb: outcome", "None), \"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc) @pytest.fixture(scope=\"session\") def report_title(request) -> str:", "f\"this usually indicates a fixture caused an error when setting up the test.\"", "= webdriver.ChromeOptions() # Our default options promote a balance between # performance and", "None test_name = f\"{request.node.name}\" outcome = Outcome.never_started # TODO: Figure out a way", "{browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" ) def inner() -> BrowserRecorder: return browser_class(**browser_args) return", "or None\"\"\" value = request.config.getoption(\"selenium_server\") if value: return value.strip() return None @pytest.fixture(scope=\"session\") def", "return value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible instance", "using (or are overriding) the `browser` fixture. The fixture itself simply passes the", "sometimes cannot be avoided. disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls,", "artifacts should be stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption(", "chrome_options.add_argument(\"--option-name\") return chrome_options or override it entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return", "= parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect to", "if isinstance(exception, BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for", "close once all tests in the class have run, and will clean up", "once all tests in the class have run, and will clean up after", "for f in os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers: test_report.outcome = Outcome.success #", "exporter.export_all(test_report, report_dir) else: # If there are other workers, only export the report", "if not provided, a default will be used. \" \"You may also provide", "\"\"\" options = webdriver.ChromeOptions() # Our default options promote a balance between #", "Chrome, Remote from .models import Outcome, Report, ReportResult, TestResult, Timed from .report_exporter import", "import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives", "= \".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter() workers = list(f for f in", "the best of its ability). If you need a fresh instance each test,", "None\"\"\" value = request.config.getoption(\"selenium_server\") if value: return value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options()", "[log.get(\"message\", \"\") for log in exception.logs] if not tb: tb = f\"{exception_msg}\\n(No traceback", "browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use of 'session_browser', this may significantly", "and baz is bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time,", "for url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner if _SETTINGS.disable_session_browser: logger.warning(\"Disabling auto-use", "of its ability). If you need a fresh instance each test, you can", "pytest from pydantic import BaseSettings, validator from selenium import webdriver from .browser import", "always=True) def handle_empty_string(cls, v): if not v: return None return v _SETTINGS =", "None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible instance of ChromeOptions with", "\"\"\" This fixture allows you to create a fresh context for a given", "@pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder: \"\"\" Creates a function-scoped tab context for", "an error when setting up the test.\" ) doc = None test_name =", "@validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v): if not v: return None return v", "= os.listdir(screenshots_dir) for png in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) ->", "\"\"\" A browser instance that is kept open for the entire test run.", "def session_browser_disabled() -> bool: return True @pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a", "run. Without this, the results of the test will not be saved. \"\"\"", "reported no outcomes; \" f\"this usually indicates a fixture caused an error when", "chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override it entirely: @pytest.fixture(scope='session') def chrome_options()", "the report_title fixture.\", ) @pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if", "instead of only creating a single instance and generating # contexts for each", "run in a context of the session scope, so you only need to", "Remote from .models import Outcome, Report, ReportResult, TestResult, Timed from .report_exporter import ReportExporter", "if you are not using (or are overriding) the `browser` fixture. The fixture", "session_browser which cleans up after itself (to the best of its ability). If", "= list(f for f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f for f", "selenium import webdriver from .browser import BrowserError, BrowserRecorder, Chrome, Remote from .models import", "only need to use this if you are not using (or are overriding)", "value.strip() return None @pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible instance of", "validator from selenium import webdriver from .browser import BrowserError, BrowserRecorder, Chrome, Remote from", "or override it entirely: @pytest.fixture(scope='session') def chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\" options", "will be used. \" \"You may also provide a constant default by overriding", "from environment variables and translates truthy/falsey strings into bools. Only required for code", "scope, instead of only creating a single instance and generating # contexts for", "Print the results to report_file after a test run. Without this, the results", "up the test.\" ) doc = None test_name = f\"{request.node.name}\" outcome = Outcome.never_started", "The default behavior of the `browser` fixture is to always run in a", "your report; if not provided, a default will be used. \" \"You may", "pre=True, always=True) def handle_empty_string(cls, v): if not v: return None return v _SETTINGS", "= build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[..., Chrome]:", "Optional[bool] = False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v): if not v: return", "environment. \"\"\" with browser_context(session_browser) as browser: yield browser @pytest.fixture(scope=\"class\") def class_browser(request, session_browser, browser_context)", "the results to report_file after a test run. Without this, the results of", "fixture. The fixture itself simply passes the context manager, so you can use", "have run, and will clean up after itself (to the best of its", "f in os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers: test_report.outcome = Outcome.success # Aggregate", "generator will build instances using the following settings:\\n\" f\" Browser class: {browser_class.__name__}\\n\" f\"", "f.startswith(\"worker.\")) worker_results = list(f for f in os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers:", "call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\"", "browser instance for each class, you can set `disable_session_browser=1` in your environment. \"\"\"", "class, using the configure chrome_options fixture. \"\"\" browser = build_browser() request.cls.browser = browser", "list(f for f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f for f in", "session scope, so you only need to use this if you are not", "= getattr(request.node, \"report_result\", None) if call_summary: doc = call_summary.doc test_name = call_summary.report.nodeid outcome", "session_browser(build_browser) -> BrowserRecorder: \"\"\" A browser instance that is kept open for the", "return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) -> Dict[str, Optional[Union[webdriver.ChromeOptions, str]]]: args = {\"options\":", "sys import tempfile from contextlib import contextmanager from typing import Callable, Dict, List,", "not using (or are overriding) the `browser` fixture. The fixture itself simply passes", "\"\"\" Creates a fresh instance of the browser for use in the requesting", "for use in the requesting class, using the configure chrome_options fixture. \"\"\" browser", "webdriver from .browser import BrowserError, BrowserRecorder, Chrome, Remote from .models import Outcome, Report,", "of ChromeOptions with default options configured for a balance between performance and test", "'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\" is set in the environment. \"\"\" browser", "the default 'delete_all_cookies' behavior is not enough to cover your use case. \"\"\"", "import tempfile from contextlib import contextmanager from typing import Callable, Dict, List, Optional,", "it to the requesting class as 'self.browser'; this tab will close once all", "single instance and generating # contexts for each test. # This has a", "you need a fresh browser instance for each class, you can set `disable_session_browser=1`", "= cookie_urls or [] try: yield browser finally: browser.delete_all_cookies() for url in cookie_urls:", "\"\"\" Automatically derives from environment variables and translates truthy/falsey strings into bools. Only", "= Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError):", "not tb: tb = f\"{exception_msg}\\n(No traceback is available)\" else: logging.error( f\"Test {request.node} reported", "= outcome.get_result() if report.when == \"call\": doc = getattr(getattr(item, \"function\", None), \"__doc__\", None)", "test_report.outcome = Outcome.success # Aggregate worker reports into this \"root\" report. for result_file", "reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\" Print the", "with browser_context() as browser: browser.get('https://www.uw.edu') You may also provide a list of urls", "-> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) ->", "open for the entire test run. Only instantiated if it is used, but", "args: {dict(browser_args)}\" ) def inner() -> BrowserRecorder: return browser_class(**browser_args) return inner @pytest.fixture(scope=\"session\") def", "session_browser_disabled() -> bool: return True @pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh", "\" \"by setting 'disable_session_browser=1' in your environment.\" ) @pytest.fixture def browser(session_browser, browser_context) ->", "def selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty string or None\"\"\" value = request.config.getoption(\"selenium_server\")", "will clean up after itself (to the best of its ability). If you", "if they exist # class TestFooBar: # \"\"\" # When Foo is bar", "passes the context manager, so you can use it like so: def test_something(browser_context):", "you are not using (or are overriding) the `browser` fixture. The fixture itself", "default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect to (eg localhost:4444)\", ) group.addoption( \"--report-dir\", action=\"store\",", "in os.listdir(report_dir) if f.endswith(\".result.json\")) if not workers: test_report.outcome = Outcome.success # Aggregate worker", "\"\"\" # When Foo is bar # \"\"\" # def test_a_baz(self): # \"\"\"and", "dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"), help=\"Remote selenium webdriver to connect to (eg localhost:4444)\", ) group.addoption( \"--report-dir\",", "should then read \"When foo is bar and baz is bop\" result =", "performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def", "browser = build_browser() request.cls.browser = browser try: yield browser finally: browser.quit() else: logger.info(", "your tests appear stuck, try disabling \" \"by setting 'disable_session_browser=1' in your environment.\"", "pydantic import BaseSettings, validator from selenium import webdriver from .browser import BrowserError, BrowserRecorder,", "def browser_class(browser_args) -> Type[BrowserRecorder]: if browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args,", "instance. The default behavior of the `browser` fixture is to always run in", "your session, if the default 'delete_all_cookies' behavior is not enough to cover your", "within every request # for a given scope, instead of only creating a", "browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\") def browser_context() -> Callable[...,", "ReportExporter() workers = list(f for f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f", "if call_summary: doc = call_summary.doc test_name = call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed", "in exception.logs] if not tb: tb = f\"{exception_msg}\\n(No traceback is available)\" else: logging.error(", "\"function\", None), \"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc) @pytest.fixture(scope=\"session\") def report_title(request) ->", "True @pytest.fixture def browser(build_browser) -> BrowserRecorder: \"\"\"Creates a fresh instance of the browser", "run, and will clean up after itself (to the best of its ability).", "Automatically derives from environment variables and translates truthy/falsey strings into bools. Only required", "for f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f for f in os.listdir(report_dir)", "BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if exception.orig: tb", "False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v): if not v: return None return", "every request # for a given scope, instead of only creating a single", "decrease test performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return True @pytest.fixture def browser(build_browser)", "a fixture caused an error when setting up the test.\" ) doc =", "can extend this: @pytest.fixture(scope='session') def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override", "you to create a fresh context for a given browser instance. The default", "only export the report json of the # current worker. The last worker", "def chrome_options(chrome_options) -> ChromeOptions: chrome_options.add_argument(\"--option-name\") return chrome_options or override it entirely: @pytest.fixture(scope='session') def", "from .models import Outcome, Report, ReportResult, TestResult, Timed from .report_exporter import ReportExporter _here", "\".\".join(worker_file.name.split(\".\")[1:]) yield test_report.stop_timer() exporter = ReportExporter() workers = list(f for f in os.listdir(report_dir)", "BaseSettings, validator from selenium import webdriver from .browser import BrowserError, BrowserRecorder, Chrome, Remote", "# TODO: Figure out a way to include class docs if they exist", "report output should then read \"When foo is bar and baz is bop\"", "Outcome.failure exception: BaseException = call_summary.excinfo.value exception_msg = f\"{exception.__class__.__name__}: {str(exception)}\" if isinstance(exception, BrowserError): if", "results to report_file after a test run. Without this, the results of the", "else Outcome.success if call_summary and call_summary.excinfo and not tb: outcome = Outcome.failure exception:", "for each test. # This has a significant performance impact, # but sometimes", "if your tests appear stuck, try disabling \" \"by setting 'disable_session_browser=1' in your", "action=\"store\", dest=\"report_dir\", default=os.environ.get(\"REPORT_DIR\", os.path.join(os.getcwd(), \"webdriver-report\")), help=\"The path to the directory where artifacts should", "def chrome_options() -> ChromeOptions: return ChromeOptions() \"\"\" options = webdriver.ChromeOptions() # Our default", "# do_work('bop') # The report output should then read \"When foo is bar", "f) for f in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else:", "or [] try: yield browser finally: browser.delete_all_cookies() for url in cookie_urls: browser.get(url) browser.delete_all_cookies()", "TestFooBar: # \"\"\" # When Foo is bar # \"\"\" # def test_a_baz(self):", "= [] test_report.results.append(result) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): \"\"\" This gives us hooks", "clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png in", "ChromeOptions() \"\"\" options = webdriver.ChromeOptions() # Our default options promote a balance between", "not workers: test_report.outcome = Outcome.success # Aggregate worker reports into this \"root\" report.", "browser_context() as browser: browser.get('https://www.uw.edu') You may also provide a list of urls to", "you only need to use this if you are not using (or are", "\"by setting 'disable_session_browser=1' in your environment.\" ) @pytest.fixture def browser(session_browser, browser_context) -> BrowserRecorder:", "and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server,", "is bar # \"\"\" # def test_a_baz(self): # \"\"\"and baz is bop\"\"\" #", "args.extend(sys.argv[1:]) return Report( arguments=\" \".join(args), outcome=Outcome.never_started, title=report_title, ) @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]:", "cannot be avoided. disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v):", "return Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser", "export the report json of the # current worker. The last worker running", "creating a single instance and generating # contexts for each test. # This", "end of your session, if the default 'delete_all_cookies' behavior is not enough to", "in the environment. \"\"\" browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"session\")", "that is kept open for the entire test run. Only instantiated if it", "@pytest.fixture(scope=\"session\") def chrome_options() -> webdriver.ChromeOptions: \"\"\" An extensible instance of ChromeOptions with default", "Timed from .report_exporter import ReportExporter _here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings):", "promote a balance between # performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\")", "avoided. disable_session_browser: Optional[bool] = False @validator(\"*\", pre=True, always=True) def handle_empty_string(cls, v): if not", "performance.\") @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return True @pytest.fixture def browser(build_browser) -> BrowserRecorder:", "used in both the 'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\" is set in", "so you can use it like so: def test_something(browser_context): with browser_context() as browser:", "-> BrowserRecorder: \"\"\" Creates a function-scoped tab context for the session_browser which cleans", "stored.\", ) group.addoption( \"--jinja-template\", action=\"store\", dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\",", "in worker_results]: worker_report = Report.parse_file(result_file) test_report.results.extend(worker_report.results) os.remove(result_file) exporter.export_all(test_report, report_dir) else: # If there", "workers = list(f for f in os.listdir(report_dir) if f.startswith(\"worker.\")) worker_results = list(f for", "a balance between # performance and test isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\")", "entire test run. Only instantiated if it is used, but by default will", "default options configured for a balance between performance and test isolation. You can", "finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder: \"\"\" Creates a fresh instance", "finally: browser.quit() else: logger.info( \"Enabling auto-use of 'session_browser'; if your tests appear stuck,", "run. Only instantiated if it is used, but by default will be used", "browser using the configured chrome_options fixture.\"\"\" browser = build_browser() try: yield browser finally:", "help=\"An optional title for your report; if not provided, a default will be", "os.makedirs(dir_, exist_ok=True) return dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as", "truthy/falsey strings into bools. Only required for code that must be conditionally loaded;", "from selenium import webdriver from .browser import BrowserError, BrowserRecorder, Chrome, Remote from .models", ") @pytest.fixture(scope=\"session\") def selenium_server(request) -> Optional[str]: \"\"\"Returns a non-empty string or None\"\"\" value", "best of its ability). If you need a fresh browser instance for each", "The report output should then read \"When foo is bar and baz is", "be conditionally loaded; all others should be part of 'pytest_addoption()' \"\"\" # If", "in old_screenshots: os.remove(os.path.join(screenshots_dir, png)) @pytest.fixture(scope=\"session\", autouse=True) def test_report(report_title) -> Report: args = []", "yield browser finally: browser.delete_all_cookies() for url in cookie_urls: browser.get(url) browser.delete_all_cookies() browser.close_tab() return inner", "os import sys import tempfile from contextlib import contextmanager from typing import Callable,", "of the session scope, so you only need to use this if you", "a way to include class docs if they exist # class TestFooBar: #", "\"__doc__\", None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc) @pytest.fixture(scope=\"session\") def report_title(request) -> str: return", "# If set to True, will generate a new browser instance within every", "screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir) for png in old_screenshots:", "browser = build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) ->", "Recorder Summary\", help=\"An optional title for your report; if not provided, a default", "dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\" Print the results to report_file after", "last worker running will be responsible for aggregating and reporting results. exporter.export_json(test_report, report_dir,", "= f\"{request.node.name}\" outcome = Outcome.never_started # TODO: Figure out a way to include", "f\" Browser args: {dict(browser_args)}\" ) def inner() -> BrowserRecorder: return browser_class(**browser_args) return inner", "both the 'browser' and 'class_browser' fixtures, unless \"disable_session_browser=1\" is set in the environment.", "all others should be part of 'pytest_addoption()' \"\"\" # If set to True,", "dest=\"report_template\", default=os.path.join(_here, \"report.template.html\"), ) group.addoption( \"--report-title\", action=\"store\", dest=\"report_title\", default=\"Webdriver Recorder Summary\", help=\"An optional", "@pytest.fixture(scope=\"session\", autouse=True) def clean_screenshots(report_dir): screenshots_dir = os.path.join(report_dir, \"screenshots\") if os.path.exists(screenshots_dir): old_screenshots = os.listdir(screenshots_dir)", "as browser: request.cls.browser = browser yield browser @pytest.fixture(scope=\"session\") def session_browser_disabled() -> bool: return", "gives us hooks from which to report status post test-run. \"\"\" outcome =", "_SETTINGS = EnvSettings() def pytest_addoption(parser): group = parser.getgroup(\"webdriver_recorder\") group.addoption( \"--selenium-server\", action=\"store\", dest=\"selenium_server\", default=os.environ.get(\"REMOTE_SELENIUM\"),", "dir_ @pytest.fixture(scope=\"session\", autouse=True) def report_generator(report_dir, test_report): with tempfile.NamedTemporaryFile(prefix=\"worker.\", dir=report_dir) as worker_file: suffix =", "use in the requesting class, using the configure chrome_options fixture. \"\"\" browser =", "_here = os.path.abspath(os.path.dirname(__file__)) logger = logging.getLogger(__name__) class EnvSettings(BaseSettings): \"\"\" Automatically derives from environment", "= f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\") for log in exception.logs] if not tb:", "None) item.report_result = ReportResult(report=report, excinfo=call.excinfo, doc=doc) @pytest.fixture(scope=\"session\") def report_title(request) -> str: return request.config.getoption(\"report_title\")", "Chrome]: \"\"\" This fixture allows you to create a fresh context for a", "doc = call_summary.doc test_name = call_summary.report.nodeid outcome = Outcome.failure if call_summary.report.failed else Outcome.success", "Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]: logger.info( \"Browser generator", "us hooks from which to report status post test-run. \"\"\" outcome = yield", "use case. \"\"\" @contextmanager def inner(browser: BrowserRecorder, cookie_urls: Optional[List[str]] = None) -> BrowserRecorder:", "the `browser` fixture. The fixture itself simply passes the context manager, so you", "be part of 'pytest_addoption()' \"\"\" # If set to True, will generate a", "session_browser_disabled() -> bool: return False @pytest.fixture(scope=\"session\") def report_dir(request): dir_ = request.config.getoption(\"report_dir\") os.makedirs(dir_, exist_ok=True)", "responsible for aggregating and reporting results. exporter.export_json(test_report, report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request,", "{str(exception)}\" if isinstance(exception, BrowserError): if exception.orig: tb = f\"{exception_msg}\\n{exception.orig=}\" console_logs = [log.get(\"message\", \"\")", "if browser_args.get(\"command_executor\"): return Remote return Chrome @pytest.fixture(scope=\"session\") def build_browser(browser_args, browser_class) -> Callable[..., BrowserRecorder]:", "bop\" result = TestResult( pngs=BrowserRecorder.pngs, test_name=test_name, test_description=doc, outcome=outcome, start_time=timer.start_time, end_time=timer.end_time, traceback=tb, console_errors=console_logs, )", "exception.logs] if not tb: tb = f\"{exception_msg}\\n(No traceback is available)\" else: logging.error( f\"Test", "this, the results of the test will not be saved. \"\"\" tb =", "BrowserError, BrowserRecorder, Chrome, Remote from .models import Outcome, Report, ReportResult, TestResult, Timed from", "using the configured chrome_options fixture.\"\"\" browser = build_browser() try: yield browser finally: browser.quit()", "build_browser() try: yield browser finally: browser.quit() @pytest.fixture(scope=\"class\") def class_browser(build_browser, request) -> BrowserRecorder: \"\"\"", "class: {browser_class.__name__}\\n\" f\" Browser args: {dict(browser_args)}\" ) def inner() -> BrowserRecorder: return browser_class(**browser_args)", "\"\"\" This gives us hooks from which to report status post test-run. \"\"\"", "tab will close once all tests in the class have run, and will", "clear cookies at the end of your session, if the default 'delete_all_cookies' behavior", "report_dir, dest_filename=f\"{suffix}.result.json\") @pytest.fixture(autouse=True) def report_test(report_generator, request, test_report): \"\"\" Print the results to report_file", "isolation. options.add_argument(\"--headless\") options.add_argument(\"--incognito\") options.add_argument(\"--disable-application-cache\") options.add_argument(\"--no-sandbox\") options.add_argument(\"--disable-dev-shm-usage\") return options @pytest.fixture(scope=\"session\") def browser_args(selenium_server, chrome_options) ->" ]
[ "y) for x in a for y in b) print(r) # <generator object", "(2, 5) print(*zip((1, 2, 3), (10, 20, 30), (100, 200, 300)), sep=\"\\n\") #", "(1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] print(*r) # (1,", "5)] print(*r) # (1, 4) (1, 5) (2, 4) (2, 5) (3, 4)", "300) print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400, 100), sep=\"\\n\") # [1, 2,", "print(r) # [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3,", "print(*zip((1, 2, 3), (10, 20, 30), (100, 200, 300)), sep=\"\\n\") # (1, 10,", "(10, 20), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) print(*[(1, 2, 3),", "3] b = [4, 5] r = ((x, y) for x in a", "# (1, 4) (1, 5) (2, 4) (2, 5) (3, 4) (3, 5)", "300)], sep=\"\\n\") # (1, 2, 3) # (10, 20, 30) # (100, 200,", "20, 30) # (100, 200, 300) print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400,", "(100, 200, 300)), sep=\"\\n\") # (1, 10, 100) # (2, 20, 200) #", "sep=\"\\n\") # (1, 2, 3) # (10, 20, 30) # (100, 200, 300)", "r = ((x, y) for x in a for y in b) print(r)", "5), (3, 4), (3, 5)] print(*r) # (1, 4) (1, 5) (2, 4)", "5) print(*zip((1, 2, 3), (10, 20, 30), (100, 200, 300)), sep=\"\\n\") # (1,", "100) print(*[(1, 2, 3), (10, 20, 30), (100, 200, 300)], sep=\"\\n\") # (1,", "3]) l.append([4, 5]) print(*zip(*l)) # (1, 4) (2, 5) print(*zip((1, 2, 3), (10,", "# (1, 4) (2, 5) print(*zip((1, 2, 3), (10, 20, 30), (100, 200,", "((x, y) for x in a for y in b) print(r) # <generator", "(1, 10, 100) print(*[(1, 2, 3), (10, 20, 30), (100, 200, 300)], sep=\"\\n\")", "[1, 2, 3] # [10, 20, 30] # 100 # 200 # 300", "4), (3, 5)] print(*r) # (1, 4) (1, 5) (2, 4) (2, 5)", "b = [4, 5] r = ((x, y) for x in a for", "(2, 4) (2, 5) (3, 4) (3, 5) r = [(x, y) for", "(3, 5) print(*r) # Prints Nothing l = [] l.append([1, 2, 3]) l.append([4,", "(2, 4) (2, 5) (3, 4) (3, 5) print(*r) # Prints Nothing l", "40, 10)], *range(100, 400, 100), sep=\"\\n\") # [1, 2, 3] # [10, 20,", "*range(100, 400, 100), sep=\"\\n\") # [1, 2, 3] # [10, 20, 30] #", "200) # (3, 30, 300) print(*zip((1,), (10, 20), (100, 200, 300)), sep=\"\\n\") #", "(1, 5) (2, 4) (2, 5) (3, 4) (3, 5) print(*r) # Prints", "for x in a for y in b] print(r) # [(1, 4), (1,", "(10, 20, 30), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) # (2,", "5) (3, 4) (3, 5) r = [(x, y) for x in a", "= ((x, y) for x in a for y in b) print(r) #", "2, 3), (10, 20, 30), (100, 200, 300)], sep=\"\\n\") # (1, 2, 3)", "print(*r) # (1, 4) (1, 5) (2, 4) (2, 5) (3, 4) (3,", "300)), sep=\"\\n\") # (1, 10, 100) print(*[(1, 2, 3), (10, 20, 30), (100,", "(3, 5)] print(*r) # (1, 4) (1, 5) (2, 4) (2, 5) (3,", "= [1, 2, 3] b = [4, 5] r = ((x, y) for", "5) print(*r) # Prints Nothing l = [] l.append([1, 2, 3]) l.append([4, 5])", "5) r = [(x, y) for x in a for y in b]", "4) (3, 5) print(*r) # Prints Nothing l = [] l.append([1, 2, 3])", "(2, 20, 200) # (3, 30, 300) print(*zip((1,), (10, 20), (100, 200, 300)),", "y in b] print(r) # [(1, 4), (1, 5), (2, 4), (2, 5),", "3), (10, 20, 30), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) #", "sep=\"\\n\") # (1, 10, 100) print(*[(1, 2, 3), (10, 20, 30), (100, 200,", "[4, 5] r = ((x, y) for x in a for y in", "(3, 5) r = [(x, y) for x in a for y in", "# (1, 10, 100) # (2, 20, 200) # (3, 30, 300) print(*zip((1,),", "= [] l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l)) # (1, 4) (2, 5)", "5] r = ((x, y) for x in a for y in b)", "= [4, 5] r = ((x, y) for x in a for y", "l.append([4, 5]) print(*zip(*l)) # (1, 4) (2, 5) print(*zip((1, 2, 3), (10, 20,", "y) for x in a for y in b] print(r) # [(1, 4),", "l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l)) # (1, 4) (2, 5) print(*zip((1, 2,", "20, 30), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) # (2, 20,", "(1, 2, 3) # (10, 20, 30) # (100, 200, 300) print([*range(1, 4)],", "b] print(r) # [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4),", "3) # (10, 20, 30) # (100, 200, 300) print([*range(1, 4)], [*range(10, 40,", "30), (100, 200, 300)], sep=\"\\n\") # (1, 2, 3) # (10, 20, 30)", "r = [(x, y) for x in a for y in b] print(r)", "4) (2, 5) print(*zip((1, 2, 3), (10, 20, 30), (100, 200, 300)), sep=\"\\n\")", "[] l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l)) # (1, 4) (2, 5) print(*zip((1,", "(3, 4) (3, 5) r = [(x, y) for x in a for", "400, 100), sep=\"\\n\") # [1, 2, 3] # [10, 20, 30] # 100", "# (100, 200, 300) print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400, 100), sep=\"\\n\")", "[(x, y) for x in a for y in b] print(r) # [(1,", "4) (1, 5) (2, 4) (2, 5) (3, 4) (3, 5) print(*r) #", "20), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) print(*[(1, 2, 3), (10,", "# (3, 30, 300) print(*zip((1,), (10, 20), (100, 200, 300)), sep=\"\\n\") # (1,", "300)), sep=\"\\n\") # (1, 10, 100) # (2, 20, 200) # (3, 30,", "# <generator object <genexpr> at 0x00000182580A0B88> print(*r) # (1, 4) (1, 5) (2,", "4), (2, 5), (3, 4), (3, 5)] print(*r) # (1, 4) (1, 5)", "(10, 20, 30) # (100, 200, 300) print([*range(1, 4)], [*range(10, 40, 10)], *range(100,", "(1, 4) (1, 5) (2, 4) (2, 5) (3, 4) (3, 5) r", "sep=\"\\n\") # (1, 10, 100) # (2, 20, 200) # (3, 30, 300)", "(3, 30, 300) print(*zip((1,), (10, 20), (100, 200, 300)), sep=\"\\n\") # (1, 10,", "4)], [*range(10, 40, 10)], *range(100, 400, 100), sep=\"\\n\") # [1, 2, 3] #", "a = [1, 2, 3] b = [4, 5] r = ((x, y)", "5]) print(*zip(*l)) # (1, 4) (2, 5) print(*zip((1, 2, 3), (10, 20, 30),", "4) (2, 5) (3, 4) (3, 5) print(*r) # Prints Nothing l =", "5) (2, 4) (2, 5) (3, 4) (3, 5) print(*r) # Prints Nothing", "sep=\"\\n\") # [1, 2, 3] # [10, 20, 30] # 100 # 200", "(2, 4), (2, 5), (3, 4), (3, 5)] print(*r) # (1, 4) (1,", "# Prints Nothing l = [] l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l)) #", "a for y in b] print(r) # [(1, 4), (1, 5), (2, 4),", "x in a for y in b) print(r) # <generator object <genexpr> at", "print(r) # <generator object <genexpr> at 0x00000182580A0B88> print(*r) # (1, 4) (1, 5)", "[*range(10, 40, 10)], *range(100, 400, 100), sep=\"\\n\") # [1, 2, 3] # [10,", "a for y in b) print(r) # <generator object <genexpr> at 0x00000182580A0B88> print(*r)", "for y in b] print(r) # [(1, 4), (1, 5), (2, 4), (2,", "2, 3]) l.append([4, 5]) print(*zip(*l)) # (1, 4) (2, 5) print(*zip((1, 2, 3),", "<generator object <genexpr> at 0x00000182580A0B88> print(*r) # (1, 4) (1, 5) (2, 4)", "4) (1, 5) (2, 4) (2, 5) (3, 4) (3, 5) r =", "3), (10, 20, 30), (100, 200, 300)], sep=\"\\n\") # (1, 2, 3) #", "b) print(r) # <generator object <genexpr> at 0x00000182580A0B88> print(*r) # (1, 4) (1,", "30, 300) print(*zip((1,), (10, 20), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100)", "print(*zip((1,), (10, 20), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) print(*[(1, 2,", "0x00000182580A0B88> print(*r) # (1, 4) (1, 5) (2, 4) (2, 5) (3, 4)", "(100, 200, 300)), sep=\"\\n\") # (1, 10, 100) print(*[(1, 2, 3), (10, 20,", "<genexpr> at 0x00000182580A0B88> print(*r) # (1, 4) (1, 5) (2, 4) (2, 5)", "2, 3) # (10, 20, 30) # (100, 200, 300) print([*range(1, 4)], [*range(10,", "200, 300)], sep=\"\\n\") # (1, 2, 3) # (10, 20, 30) # (100,", "5) (2, 4) (2, 5) (3, 4) (3, 5) r = [(x, y)", "[(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] print(*r)", "100) # (2, 20, 200) # (3, 30, 300) print(*zip((1,), (10, 20), (100,", "300) print(*zip((1,), (10, 20), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) print(*[(1,", "4) (3, 5) r = [(x, y) for x in a for y", "Prints Nothing l = [] l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l)) # (1,", "2, 3), (10, 20, 30), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100)", "print(*[(1, 2, 3), (10, 20, 30), (100, 200, 300)], sep=\"\\n\") # (1, 2,", "print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400, 100), sep=\"\\n\") # [1, 2, 3]", "print(*zip(*l)) # (1, 4) (2, 5) print(*zip((1, 2, 3), (10, 20, 30), (100,", "(100, 200, 300) print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400, 100), sep=\"\\n\") #", "(2, 5) (3, 4) (3, 5) print(*r) # Prints Nothing l = []", "l = [] l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l)) # (1, 4) (2,", "30), (100, 200, 300)), sep=\"\\n\") # (1, 10, 100) # (2, 20, 200)", "# [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]", "for x in a for y in b) print(r) # <generator object <genexpr>", "at 0x00000182580A0B88> print(*r) # (1, 4) (1, 5) (2, 4) (2, 5) (3,", "in b) print(r) # <generator object <genexpr> at 0x00000182580A0B88> print(*r) # (1, 4)", "[1, 2, 3] b = [4, 5] r = ((x, y) for x", "= [(x, y) for x in a for y in b] print(r) #", "4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] print(*r) #", "5), (2, 4), (2, 5), (3, 4), (3, 5)] print(*r) # (1, 4)", "200, 300)), sep=\"\\n\") # (1, 10, 100) # (2, 20, 200) # (3,", "# (1, 10, 100) print(*[(1, 2, 3), (10, 20, 30), (100, 200, 300)],", "10, 100) # (2, 20, 200) # (3, 30, 300) print(*zip((1,), (10, 20),", "in a for y in b] print(r) # [(1, 4), (1, 5), (2,", "10)], *range(100, 400, 100), sep=\"\\n\") # [1, 2, 3] # [10, 20, 30]", "5) (3, 4) (3, 5) print(*r) # Prints Nothing l = [] l.append([1,", "Nothing l = [] l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l)) # (1, 4)", "# [1, 2, 3] # [10, 20, 30] # 100 # 200 #", "# (2, 20, 200) # (3, 30, 300) print(*zip((1,), (10, 20), (100, 200,", "(1, 10, 100) # (2, 20, 200) # (3, 30, 300) print(*zip((1,), (10,", "200, 300) print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400, 100), sep=\"\\n\") # [1,", "for y in b) print(r) # <generator object <genexpr> at 0x00000182580A0B88> print(*r) #", "(1, 4) (1, 5) (2, 4) (2, 5) (3, 4) (3, 5) print(*r)", "(3, 4) (3, 5) print(*r) # Prints Nothing l = [] l.append([1, 2,", "(100, 200, 300)], sep=\"\\n\") # (1, 2, 3) # (10, 20, 30) #", "2, 3] b = [4, 5] r = ((x, y) for x in", "20, 200) # (3, 30, 300) print(*zip((1,), (10, 20), (100, 200, 300)), sep=\"\\n\")", "object <genexpr> at 0x00000182580A0B88> print(*r) # (1, 4) (1, 5) (2, 4) (2,", "print(*r) # Prints Nothing l = [] l.append([1, 2, 3]) l.append([4, 5]) print(*zip(*l))", "in b] print(r) # [(1, 4), (1, 5), (2, 4), (2, 5), (3,", "in a for y in b) print(r) # <generator object <genexpr> at 0x00000182580A0B88>", "<gh_stars>0 a = [1, 2, 3] b = [4, 5] r = ((x,", "# (1, 2, 3) # (10, 20, 30) # (100, 200, 300) print([*range(1,", "4) (2, 5) (3, 4) (3, 5) r = [(x, y) for x", "(1, 5) (2, 4) (2, 5) (3, 4) (3, 5) r = [(x,", "(3, 4), (3, 5)] print(*r) # (1, 4) (1, 5) (2, 4) (2,", "20, 30), (100, 200, 300)], sep=\"\\n\") # (1, 2, 3) # (10, 20,", "# (10, 20, 30) # (100, 200, 300) print([*range(1, 4)], [*range(10, 40, 10)],", "200, 300)), sep=\"\\n\") # (1, 10, 100) print(*[(1, 2, 3), (10, 20, 30),", "10, 100) print(*[(1, 2, 3), (10, 20, 30), (100, 200, 300)], sep=\"\\n\") #", "100), sep=\"\\n\") # [1, 2, 3] # [10, 20, 30] # 100 #", "(1, 4) (2, 5) print(*zip((1, 2, 3), (10, 20, 30), (100, 200, 300)),", "(2, 5), (3, 4), (3, 5)] print(*r) # (1, 4) (1, 5) (2,", "30) # (100, 200, 300) print([*range(1, 4)], [*range(10, 40, 10)], *range(100, 400, 100),", "(2, 5) (3, 4) (3, 5) r = [(x, y) for x in", "y in b) print(r) # <generator object <genexpr> at 0x00000182580A0B88> print(*r) # (1,", "x in a for y in b] print(r) # [(1, 4), (1, 5),", "(10, 20, 30), (100, 200, 300)], sep=\"\\n\") # (1, 2, 3) # (10," ]
[ "to allow checking if no files returned dir_paths = [] # retrieve all", "utf-8 -*- # (c) University of St Andrews 2020-2021 # (c) University of", "None: for item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "\"\"\"Script containing functions to retrieve paths to files and directories\"\"\" from pathlib import", "elif prefixes is not None and suffixes is None: for item in files_in_entries:", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "from pathlib import Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all files", "the Software without restriction, including without limitation the rights # to use, copy,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "person obtaining a copy # of this software and associated documentation files (the", "dir_paths.append(item) elif prefixes is not None and suffixes is None: for item in", "files. \"\"\" # create empty list to store the file entries, to allow", "James Hutton Institute 2020-2021 # # Author: # <NAME> # # Contact #", "(c) University of St Andrews 2020-2021 # (c) University of Strathclyde 2020-2021 #", "in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for item in", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "not None: for item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item)", "which files are to be retrieved :param prefixes: List of Str, prefixes of", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "in input dir. :param directory: Path, path to directory from which files are", "suffixes is None: for item in files_in_entries: dir_paths.append(item) elif prefixes is not None", "dir_paths = [] # retrieve all files from input directory files_in_entries = (entry", "suffixes is None: for item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix):", "included in all # copies or substantial portions of the Software. # #", "is None and suffixes is None: for item in files_in_entries: dir_paths.append(item) elif prefixes", "is hereby granted, free of charge, to any person obtaining a copy #", "persons to whom the Software is # furnished to do so, subject to", "item in files_in_entries: for suffix in suffixes: for prefix in prefixes: if item.name.startswith(prefix)", "of paths to fasta files. \"\"\" # create empty list to store the", "Scotland, # UK # # The MIT License # # Permission is hereby", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\"Script containing", "Andrews, # <NAME>, # St Andrews, # KY16 9ST # Scotland, # UK", "to permit persons to whom the Software is # furnished to do so,", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "= [] # retrieve all files from input directory files_in_entries = (entry for", "item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all directories", "of charge, to any person obtaining a copy # of this software and", "suffixes is None: for item in files_in_entries: file_paths.append(item) elif prefixes is not None", "Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all files in input dir.", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\"Script", "so, subject to the following conditions: # # The above copyright notice and", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "prefixes=None, suffixes=None): \"\"\"Retrieve paths to all directories in input dir. :param directory: Path,", "directory: Path, path to directory from which files are to be retrieved :param", "(entry for entry in Path(directory).iterdir() if entry.is_dir()) if prefixes is None and suffixes", "OTHER DEALINGS IN THE # SOFTWARE. \"\"\"Script containing functions to retrieve paths to", "copy # of this software and associated documentation files (the \"Software\"), to deal", "entry in Path(directory).iterdir() if entry.is_file()) if prefixes is None and suffixes is None:", "suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for item in files_in_entries: for suffix in suffixes:", "to the following conditions: # # The above copyright notice and this permission", "fasta files. \"\"\" # create empty list to store the file entries, to", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "returned file_paths = [] # retrieve all files from input directory files_in_entries =", "files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for item in files_in_entries:", "and associated documentation files (the \"Software\"), to deal # in the Software without", "be retrieved :param prefixes: List of Str, prefixes of the file names to", "# # Author: # <NAME> # # Contact # <EMAIL> # # <NAME>,", "paths to all files in input dir. :param directory: Path, path to directory", "and suffixes is None: for item in files_in_entries: for prefix in prefixes: if", "The MIT License # # Permission is hereby granted, free of charge, to", "are to be retrieved :param prefixes: List of Str, prefixes of the file", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "sublicense, and/or sell # copies of the Software, and to permit persons to", "Software is # furnished to do so, subject to the following conditions: #", "directory from which files are to be retrieved :param prefixes: List of Str,", "THE USE OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\"Script containing functions to", "suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths def", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "prefixes is None and suffixes is None: for item in files_in_entries: dir_paths.append(item) elif", "in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None):", ":param prefixes: List of Str, prefixes of the file names to be retrieved", "paths to files and directories\"\"\" from pathlib import Path def get_file_paths(directory, prefixes=None, suffixes=None):", "Biomolecular Sciences Building, # University of St Andrews, # <NAME>, # St Andrews,", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "if prefixes is None and suffixes is None: for item in files_in_entries: file_paths.append(item)", "in Path(directory).iterdir() if entry.is_file()) if prefixes is None and suffixes is None: for", "all # copies or substantial portions of the Software. # # THE SOFTWARE", "in files_in_entries: file_paths.append(item) elif prefixes is not None and suffixes is None: for", "<EMAIL> # # <NAME>, # Biomolecular Sciences Building, # University of St Andrews,", "Strathclyde 2020-2021 # (c) James Hutton Institute 2020-2021 # # Author: # <NAME>", "prefixes: List of Str, prefixes of the file names to be retrieved :param", "all files from input directory files_in_entries = (entry for entry in Path(directory).iterdir() if", "License # # Permission is hereby granted, free of charge, to any person", "in files_in_entries: dir_paths.append(item) elif prefixes is not None and suffixes is None: for", "directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_dir()) if prefixes is", "MIT License # # Permission is hereby granted, free of charge, to any", "if item.name.endswith(suffix): file_paths.append(item) else: for item in files_in_entries: for suffix in suffixes: for", "for suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for item in files_in_entries: for", "item in files_in_entries: dir_paths.append(item) elif prefixes is not None and suffixes is None:", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "# (c) James Hutton Institute 2020-2021 # # Author: # <NAME> # #", "is not None: for item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix):", "# copies of the Software, and to permit persons to whom the Software", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # (c) University of St Andrews", "to be retrieved :param suffixes: List of Str, suffixes of the file names", "IN THE # SOFTWARE. \"\"\"Script containing functions to retrieve paths to files and", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "import Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all files in input", "# # <NAME>, # Biomolecular Sciences Building, # University of St Andrews, #", "permission notice shall be included in all # copies or substantial portions of", "retrieved Returns list of paths to fasta files. \"\"\" # create empty list", "notice and this permission notice shall be included in all # copies or", "allow checking if no files returned file_paths = [] # retrieve all files", "Hutton Institute 2020-2021 # # Author: # <NAME> # # Contact # <EMAIL>", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "returned dir_paths = [] # retrieve all files from input directory files_in_entries =", "software and associated documentation files (the \"Software\"), to deal # in the Software", "input directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_file()) if prefixes", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #", "to files and directories\"\"\" from pathlib import Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve", "Contact # <EMAIL> # # <NAME>, # Biomolecular Sciences Building, # University of", "List of Str, prefixes of the file names to be retrieved :param suffixes:", "None: for item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else:", "# Scotland, # UK # # The MIT License # # Permission is", "and to permit persons to whom the Software is # furnished to do", "(c) James Hutton Institute 2020-2021 # # Author: # <NAME> # # Contact", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "St Andrews 2020-2021 # (c) University of Strathclyde 2020-2021 # (c) James Hutton", "is None and suffixes is not None: for item in files_in_entries: for suffix", "files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None and", "the following conditions: # # The above copyright notice and this permission notice", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "suffixes of the file names to be retrieved Returns list of paths to", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "USE OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\"Script containing functions to retrieve", "Path(directory).iterdir() if entry.is_file()) if prefixes is None and suffixes is None: for item", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "= (entry for entry in Path(directory).iterdir() if entry.is_file()) if prefixes is None and", "DEALINGS IN THE # SOFTWARE. \"\"\"Script containing functions to retrieve paths to files", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "item.name.endswith(suffix): dir_paths.append(item) else: for item in files_in_entries: for suffix in suffixes: for prefix", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "retrieve all files from input directory files_in_entries = (entry for entry in Path(directory).iterdir()", "if no files returned dir_paths = [] # retrieve all files from input", "store the file entries, to allow checking if no files returned dir_paths =", "be retrieved Returns list of paths to fasta files. \"\"\" # create empty", "files in input dir. :param directory: Path, path to directory from which files", "to do so, subject to the following conditions: # # The above copyright", "List of Str, suffixes of the file names to be retrieved Returns list", "for suffix in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item)", "if item.name.endswith(suffix): dir_paths.append(item) else: for item in files_in_entries: for suffix in suffixes: for", "of Str, suffixes of the file names to be retrieved Returns list of", "whom the Software is # furnished to do so, subject to the following", "path to directory from which files are to be retrieved :param prefixes: List", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "(entry for entry in Path(directory).iterdir() if entry.is_file()) if prefixes is None and suffixes", "free of charge, to any person obtaining a copy # of this software", "to retrieve paths to files and directories\"\"\" from pathlib import Path def get_file_paths(directory,", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "file entries, to allow checking if no files returned file_paths = [] #", "dir. :param directory: Path, path to directory from which files are to be", "# Contact # <EMAIL> # # <NAME>, # Biomolecular Sciences Building, # University", "None: for item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item) else:", "if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths", "if no files returned file_paths = [] # retrieve all files from input", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "from input directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_dir()) if", "-*- # (c) University of St Andrews 2020-2021 # (c) University of Strathclyde", "is # furnished to do so, subject to the following conditions: # #", "elif prefixes is None and suffixes is not None: for item in files_in_entries:", "Andrews, # KY16 9ST # Scotland, # UK # # The MIT License", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "= (entry for entry in Path(directory).iterdir() if entry.is_dir()) if prefixes is None and", "for item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes", "prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None and suffixes is not None:", "of St Andrews, # <NAME>, # St Andrews, # KY16 9ST # Scotland,", "file_paths.append(item) elif prefixes is None and suffixes is not None: for item in", "to deal # in the Software without restriction, including without limitation the rights", "to any person obtaining a copy # of this software and associated documentation", "to directory from which files are to be retrieved :param prefixes: List of", "coding: utf-8 -*- # (c) University of St Andrews 2020-2021 # (c) University", "in all # copies or substantial portions of the Software. # # THE", "# # The MIT License # # Permission is hereby granted, free of", "if prefixes is None and suffixes is None: for item in files_in_entries: dir_paths.append(item)", "in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes is None and suffixes is not", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "prefixes is None and suffixes is None: for item in files_in_entries: file_paths.append(item) elif", "\"\"\"Retrieve paths to all directories in input dir. :param directory: Path, path to", "store the file entries, to allow checking if no files returned file_paths =", "def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all files in input dir. :param", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "University of St Andrews 2020-2021 # (c) University of Strathclyde 2020-2021 # (c)", "SOFTWARE. \"\"\"Script containing functions to retrieve paths to files and directories\"\"\" from pathlib", "# retrieve all files from input directory files_in_entries = (entry for entry in", "in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "of the file names to be retrieved :param suffixes: List of Str, suffixes", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "Building, # University of St Andrews, # <NAME>, # St Andrews, # KY16", "file_paths = [] # retrieve all files from input directory files_in_entries = (entry", "list of paths to fasta files. \"\"\" # create empty list to store", "no files returned dir_paths = [] # retrieve all files from input directory", "Software, and to permit persons to whom the Software is # furnished to", "for prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes is None and suffixes", "this software and associated documentation files (the \"Software\"), to deal # in the", "in files_in_entries: for suffix in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and", "to all directories in input dir. :param directory: Path, path to directory from", "item in files_in_entries: file_paths.append(item) elif prefixes is not None and suffixes is None:", "get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all files in input dir. :param directory:", ":param suffixes: List of Str, suffixes of the file names to be retrieved", "suffixes: List of Str, suffixes of the file names to be retrieved Returns", "entry.is_dir()) if prefixes is None and suffixes is None: for item in files_in_entries:", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "UK # # The MIT License # # Permission is hereby granted, free", "of the file names to be retrieved Returns list of paths to fasta", "for entry in Path(directory).iterdir() if entry.is_file()) if prefixes is None and suffixes is", "file_paths.append(item) else: for item in files_in_entries: for suffix in suffixes: for prefix in", "granted, free of charge, to any person obtaining a copy # of this", "create empty list to store the file entries, to allow checking if no", "2020-2021 # # Author: # <NAME> # # Contact # <EMAIL> # #", "paths to fasta files. \"\"\" # create empty list to store the file", "def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all directories in input dir. :param", "for item in files_in_entries: dir_paths.append(item) elif prefixes is not None and suffixes is", "Institute 2020-2021 # # Author: # <NAME> # # Contact # <EMAIL> #", "in Path(directory).iterdir() if entry.is_dir()) if prefixes is None and suffixes is None: for", "files_in_entries: for suffix in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix):", "and suffixes is not None: for item in files_in_entries: for suffix in suffixes:", "furnished to do so, subject to the following conditions: # # The above", "and this permission notice shall be included in all # copies or substantial", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "# Permission is hereby granted, free of charge, to any person obtaining a", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory,", "entries, to allow checking if no files returned dir_paths = [] # retrieve", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for item", "2020-2021 # (c) James Hutton Institute 2020-2021 # # Author: # <NAME> #", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "and suffixes is None: for item in files_in_entries: file_paths.append(item) elif prefixes is not", "suffixes=None): \"\"\"Retrieve paths to all directories in input dir. :param directory: Path, path", "Author: # <NAME> # # Contact # <EMAIL> # # <NAME>, # Biomolecular", "containing functions to retrieve paths to files and directories\"\"\" from pathlib import Path", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "to store the file entries, to allow checking if no files returned dir_paths", "and suffixes is None: for item in files_in_entries: dir_paths.append(item) elif prefixes is not", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "# University of St Andrews, # <NAME>, # St Andrews, # KY16 9ST", "files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_file()) if prefixes is None", "<NAME> # # Contact # <EMAIL> # # <NAME>, # Biomolecular Sciences Building,", "in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for item in", "in the Software without restriction, including without limitation the rights # to use,", "for item in files_in_entries: for suffix in suffixes: for prefix in prefixes: if", "OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\"Script containing functions to retrieve paths", "# create empty list to store the file entries, to allow checking if", "Str, suffixes of the file names to be retrieved Returns list of paths", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "else: for item in files_in_entries: for suffix in suffixes: for prefix in prefixes:", "directories in input dir. :param directory: Path, path to directory from which files", "input dir. :param directory: Path, path to directory from which files are to", "copies of the Software, and to permit persons to whom the Software is", "prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None and suffixes is", "St Andrews, # KY16 9ST # Scotland, # UK # # The MIT", "for entry in Path(directory).iterdir() if entry.is_dir()) if prefixes is None and suffixes is", "entry.is_file()) if prefixes is None and suffixes is None: for item in files_in_entries:", "empty list to store the file entries, to allow checking if no files", "# <EMAIL> # # <NAME>, # Biomolecular Sciences Building, # University of St", "is None: for item in files_in_entries: dir_paths.append(item) elif prefixes is not None and", "of Strathclyde 2020-2021 # (c) James Hutton Institute 2020-2021 # # Author: #", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "no files returned file_paths = [] # retrieve all files from input directory", "for item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes", "None and suffixes is not None: for item in files_in_entries: for suffix in", "for prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None and suffixes", "for suffix in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): dir_paths.append(item)", "item.name.startswith(prefix): file_paths.append(item) elif prefixes is None and suffixes is not None: for item", "notice shall be included in all # copies or substantial portions of the", "# <NAME> # # Contact # <EMAIL> # # <NAME>, # Biomolecular Sciences", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "# UK # # The MIT License # # Permission is hereby granted,", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "is not None and suffixes is None: for item in files_in_entries: for prefix", "Str, prefixes of the file names to be retrieved :param suffixes: List of", "shall be included in all # copies or substantial portions of the Software.", "prefixes=None, suffixes=None): \"\"\"Retrieve paths to all files in input dir. :param directory: Path,", "file_paths.append(item) elif prefixes is not None and suffixes is None: for item in", "if item.name.startswith(prefix): file_paths.append(item) elif prefixes is None and suffixes is not None: for", "file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all directories in", "The above copyright notice and this permission notice shall be included in all", "and/or sell # copies of the Software, and to permit persons to whom", "if entry.is_dir()) if prefixes is None and suffixes is None: for item in", "prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None,", "# in the Software without restriction, including without limitation the rights # to", "file entries, to allow checking if no files returned dir_paths = [] #", "None: for item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif", "from input directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_file()) if", "None and suffixes is None: for item in files_in_entries: for prefix in prefixes:", "suffix in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return", "files from input directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_file())", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "University of Strathclyde 2020-2021 # (c) James Hutton Institute 2020-2021 # # Author:", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "files from input directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_dir())", "# Author: # <NAME> # # Contact # <EMAIL> # # <NAME>, #", "# SOFTWARE. \"\"\"Script containing functions to retrieve paths to files and directories\"\"\" from", "prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve", "any person obtaining a copy # of this software and associated documentation files", "# # The above copyright notice and this permission notice shall be included", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "for item in files_in_entries: file_paths.append(item) elif prefixes is not None and suffixes is", "of Str, prefixes of the file names to be retrieved :param suffixes: List", "return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all directories in input", "files_in_entries: dir_paths.append(item) elif prefixes is not None and suffixes is None: for item", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "the file entries, to allow checking if no files returned file_paths = []", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "# St Andrews, # KY16 9ST # Scotland, # UK # # The", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all directories in input dir. :param directory:", "paths to all directories in input dir. :param directory: Path, path to directory", "University of St Andrews, # <NAME>, # St Andrews, # KY16 9ST #", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "is None: for item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item)", "checking if no files returned file_paths = [] # retrieve all files from", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "Returns list of paths to fasta files. \"\"\" # create empty list to", "KY16 9ST # Scotland, # UK # # The MIT License # #", "in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes is None", "in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): dir_paths.append(item) return dir_paths", "charge, to any person obtaining a copy # of this software and associated", "prefixes of the file names to be retrieved :param suffixes: List of Str,", "all directories in input dir. :param directory: Path, path to directory from which", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "# KY16 9ST # Scotland, # UK # # The MIT License #", "# # Contact # <EMAIL> # # <NAME>, # Biomolecular Sciences Building, #", "file names to be retrieved :param suffixes: List of Str, suffixes of the", "for suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for item in files_in_entries: for", "files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for item in files_in_entries:", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "list to store the file entries, to allow checking if no files returned", "suffixes is not None: for item in files_in_entries: for suffix in suffixes: if", "files and directories\"\"\" from pathlib import Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "to store the file entries, to allow checking if no files returned file_paths", "to whom the Software is # furnished to do so, subject to the", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for item in files_in_entries: for suffix", "to be retrieved :param prefixes: List of Str, prefixes of the file names", "item.name.startswith(prefix) and item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "be included in all # copies or substantial portions of the Software. #", "functions to retrieve paths to files and directories\"\"\" from pathlib import Path def", "to be retrieved Returns list of paths to fasta files. \"\"\" # create", "the file names to be retrieved :param suffixes: List of Str, suffixes of", "is None and suffixes is None: for item in files_in_entries: file_paths.append(item) elif prefixes", "in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for item in files_in_entries: for suffix in", "of St Andrews 2020-2021 # (c) University of Strathclyde 2020-2021 # (c) James", "dir_paths.append(item) elif prefixes is None and suffixes is not None: for item in", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "files returned file_paths = [] # retrieve all files from input directory files_in_entries", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "do so, subject to the following conditions: # # The above copyright notice", "OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. \"\"\"Script containing functions", "retrieved :param suffixes: List of Str, suffixes of the file names to be", "and item.name.endswith(suffix): file_paths.append(item) return file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all", "-*- coding: utf-8 -*- # (c) University of St Andrews 2020-2021 # (c)", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "the file entries, to allow checking if no files returned dir_paths = []", "if entry.is_file()) if prefixes is None and suffixes is None: for item in", "files returned dir_paths = [] # retrieve all files from input directory files_in_entries", "permit persons to whom the Software is # furnished to do so, subject", "Path, path to directory from which files are to be retrieved :param prefixes:", "# <NAME>, # Biomolecular Sciences Building, # University of St Andrews, # <NAME>,", "# Biomolecular Sciences Building, # University of St Andrews, # <NAME>, # St", "Permission is hereby granted, free of charge, to any person obtaining a copy", "# The MIT License # # Permission is hereby granted, free of charge,", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "None and suffixes is None: for item in files_in_entries: dir_paths.append(item) elif prefixes is", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes is None and suffixes is", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "file names to be retrieved Returns list of paths to fasta files. \"\"\"", "pathlib import Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all files in", "the file names to be retrieved Returns list of paths to fasta files.", "retrieved :param prefixes: List of Str, prefixes of the file names to be", "# The above copyright notice and this permission notice shall be included in", "suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for item in files_in_entries: for suffix", "# of this software and associated documentation files (the \"Software\"), to deal #", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "above copyright notice and this permission notice shall be included in all #", "and directories\"\"\" from pathlib import Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to", "to allow checking if no files returned file_paths = [] # retrieve all", "sell # copies of the Software, and to permit persons to whom the", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE.", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "[] # retrieve all files from input directory files_in_entries = (entry for entry", "for item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for", "suffix in suffixes: for prefix in prefixes: if item.name.startswith(prefix) and item.name.endswith(suffix): dir_paths.append(item) return", "files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes is None and", "input directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_dir()) if prefixes", "python3 # -*- coding: utf-8 -*- # (c) University of St Andrews 2020-2021", "9ST # Scotland, # UK # # The MIT License # # Permission", "retrieve paths to files and directories\"\"\" from pathlib import Path def get_file_paths(directory, prefixes=None,", "not None and suffixes is None: for item in files_in_entries: for prefix in", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "directories\"\"\" from pathlib import Path def get_file_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "directory files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_file()) if prefixes is", "# # Permission is hereby granted, free of charge, to any person obtaining", "St Andrews, # <NAME>, # St Andrews, # KY16 9ST # Scotland, #", "entries, to allow checking if no files returned file_paths = [] # retrieve", "\"\"\"Retrieve paths to all files in input dir. :param directory: Path, path to", "files are to be retrieved :param prefixes: List of Str, prefixes of the", "in suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for item in files_in_entries: for suffix in", "<NAME>, # Biomolecular Sciences Building, # University of St Andrews, # <NAME>, #", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "Sciences Building, # University of St Andrews, # <NAME>, # St Andrews, #", "this permission notice shall be included in all # copies or substantial portions", "be retrieved :param suffixes: List of Str, suffixes of the file names to", "THE # SOFTWARE. \"\"\"Script containing functions to retrieve paths to files and directories\"\"\"", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "# copies or substantial portions of the Software. # # THE SOFTWARE IS", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "files (the \"Software\"), to deal # in the Software without restriction, including without", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "to fasta files. \"\"\" # create empty list to store the file entries,", "# (c) University of Strathclyde 2020-2021 # (c) James Hutton Institute 2020-2021 #", "None: for item in files_in_entries: file_paths.append(item) elif prefixes is not None and suffixes", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", ":param directory: Path, path to directory from which files are to be retrieved", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "prefixes is None and suffixes is not None: for item in files_in_entries: for", "Path(directory).iterdir() if entry.is_dir()) if prefixes is None and suffixes is None: for item", "following conditions: # # The above copyright notice and this permission notice shall", "of the Software, and to permit persons to whom the Software is #", "None and suffixes is None: for item in files_in_entries: file_paths.append(item) elif prefixes is", "entry in Path(directory).iterdir() if entry.is_dir()) if prefixes is None and suffixes is None:", "Andrews 2020-2021 # (c) University of Strathclyde 2020-2021 # (c) James Hutton Institute", "None: for item in files_in_entries: dir_paths.append(item) elif prefixes is not None and suffixes", "file_paths def get_dir_paths(directory, prefixes=None, suffixes=None): \"\"\"Retrieve paths to all directories in input dir.", "# (c) University of St Andrews 2020-2021 # (c) University of Strathclyde 2020-2021", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "from which files are to be retrieved :param prefixes: List of Str, prefixes", "item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is", "names to be retrieved :param suffixes: List of Str, suffixes of the file", "in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None", "is None: for item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): dir_paths.append(item)", "\"\"\" # create empty list to store the file entries, to allow checking", "item in files_in_entries: for prefix in prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes is", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "item.name.endswith(suffix): file_paths.append(item) else: for item in files_in_entries: for suffix in suffixes: for prefix", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "to all files in input dir. :param directory: Path, path to directory from", "2020-2021 # (c) University of Strathclyde 2020-2021 # (c) James Hutton Institute 2020-2021", "dir_paths.append(item) else: for item in files_in_entries: for suffix in suffixes: for prefix in", "# -*- coding: utf-8 -*- # (c) University of St Andrews 2020-2021 #", "copyright notice and this permission notice shall be included in all # copies", "item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for item", "(c) University of Strathclyde 2020-2021 # (c) James Hutton Institute 2020-2021 # #", "in prefixes: if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None and suffixes is not", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "prefixes: if item.name.startswith(prefix): file_paths.append(item) elif prefixes is None and suffixes is not None:", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "files_in_entries = (entry for entry in Path(directory).iterdir() if entry.is_dir()) if prefixes is None", "for item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): file_paths.append(item) else: for", "prefixes is not None and suffixes is None: for item in files_in_entries: for", "suffixes=None): \"\"\"Retrieve paths to all files in input dir. :param directory: Path, path", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "is None: for item in files_in_entries: file_paths.append(item) elif prefixes is not None and", "all files in input dir. :param directory: Path, path to directory from which", "if item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None and suffixes is not None: for", "# <NAME>, # St Andrews, # KY16 9ST # Scotland, # UK #", "names to be retrieved Returns list of paths to fasta files. \"\"\" #", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "not None: for item in files_in_entries: for suffix in suffixes: if item.name.endswith(suffix): dir_paths.append(item)", "<NAME>, # St Andrews, # KY16 9ST # Scotland, # UK # #", "checking if no files returned dir_paths = [] # retrieve all files from", "files_in_entries: file_paths.append(item) elif prefixes is not None and suffixes is None: for item", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "allow checking if no files returned dir_paths = [] # retrieve all files", "item.name.startswith(prefix): dir_paths.append(item) elif prefixes is None and suffixes is not None: for item", "the Software is # furnished to do so, subject to the following conditions:", "subject to the following conditions: # # The above copyright notice and this", "suffixes: if item.name.endswith(suffix): dir_paths.append(item) else: for item in files_in_entries: for suffix in suffixes:" ]
[ "= loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def", "tf loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_", "mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_", "mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask =", "= tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real,", "tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred)", "accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask = tf.cast(mask, dtype=tf.float32) return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)", "def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies", "pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype)", "mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies", "tensorflow as tf loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real,", "accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies =", "= tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies =", "= tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *=", "tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask", "= tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask =", "tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask = tf.cast(mask, dtype=tf.float32) return tf.reduce_sum(accuracies) /", "tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0))", "= tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask = tf.cast(mask, dtype=tf.float32) return tf.reduce_sum(accuracies)", "import tensorflow as tf loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) def loss_function(real, pred): mask =", "pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask,", "dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred,", "return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real,", "axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32)", "from_logits=True) def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask", "= tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies", "loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask,", "def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask =", "0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask = tf.cast(mask, dtype=tf.float32)", "as tf loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0))", "mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask", "pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred):", "loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True) def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ =", "tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask = tf.cast(mask,", "tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies =", "accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies, dtype=tf.float32) mask = tf.cast(mask, dtype=tf.float32) return", "accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies)", "loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1))", "tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real,", "*= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real, pred): accuracies = tf.equal(real, tf.argmax(pred, axis=1)) mask", "loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask) def accuracy_function(real,", "loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_)/tf.reduce_sum(mask)", "tf.argmax(pred, axis=1)) mask = tf.math.logical_not(tf.math.equal(real, 0)) accuracies = tf.math.logical_and(mask, accuracies) accuracies = tf.cast(accuracies,", "0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return" ]
[]
[ "models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived # Event settings", "= models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived # Event", "here. class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status =", "Running, Ended, Archived # Event settings parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max", "null=True) name = models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived", "default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE)", "name = models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived #", "class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status = models.CharField(max_length=50)", "<reponame>Sindhuja-SRL/back-end from django.db import models from django.contrib.auth.models import User # Create your models", "models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived # Event settings parameters x_label_min =", "models from django.contrib.auth.models import User # Create your models here. class Event(models.Model): host", "Created, Running, Ended, Archived # Event settings parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True)", "from django.contrib.auth.models import User # Create your models here. class Event(models.Model): host =", "# Create your models here. class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name", "class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model): question =", "event = models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE)", "= models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) description = models.CharField(max_length=200) value =", "x_label_max = models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description =", "models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event = models.ForeignKey(Event,", "your models here. class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200)", "models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class", "import models from django.contrib.auth.models import User # Create your models here. class Event(models.Model):", "models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices: Created, Running,", "models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) description =", "# Event settings parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\",", "= models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200)", "Archived # Event settings parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200,", "= models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices: Created,", "null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description", "from django.db import models from django.contrib.auth.models import User # Create your models here.", "x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event", "Create your models here. class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name =", "Ended, Archived # Event settings parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max =", "django.db import models from django.contrib.auth.models import User # Create your models here. class", "parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model):", "on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) description = models.CharField(max_length=200)", "= models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived # Event settings parameters x_label_min", "models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) description = models.CharField(max_length=200) value = models.IntegerField()", "django.contrib.auth.models import User # Create your models here. class Event(models.Model): host = models.ForeignKey(User,", "on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices: Created, Running, Ended,", "Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question,", "User # Create your models here. class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)", "= models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True) class Question(models.Model): event =", "import User # Create your models here. class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL,", "= models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) description", "default=\"\", null=True) class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model):", "settings parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True) class", "Choices: Created, Running, Ended, Archived # Event settings parameters x_label_min = models.CharField(max_length=200, default=\"\",", "models here. class Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status", "status = models.CharField(max_length=50) # Choices: Created, Running, Ended, Archived # Event settings parameters", "Event(models.Model): host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status = models.CharField(max_length=50) #", "# Choices: Created, Running, Ended, Archived # Event settings parameters x_label_min = models.CharField(max_length=200,", "Event settings parameters x_label_min = models.CharField(max_length=200, default=\"\", null=True) x_label_max = models.CharField(max_length=200, default=\"\", null=True)", "description = models.CharField(max_length=200) class AnswerChoice(models.Model): question = models.ForeignKey(Question, on_delete=models.CASCADE) description = models.CharField(max_length=200) value", "host = models.ForeignKey(User, on_delete=models.SET_NULL, null=True) name = models.CharField(max_length=200) status = models.CharField(max_length=50) # Choices:", "null=True) class Question(models.Model): event = models.ForeignKey(Event, on_delete=models.CASCADE) description = models.CharField(max_length=200) class AnswerChoice(models.Model): question" ]
[ "= cuda.grid(2) if i < C.shape[0] and j < C.shape[1]: tmp = 0.", "j] = tmp import time start=time.time() A, B, C np matmul(A, B, C)", "B, C): \"\"\"Perform square matrix multiplication of C = A * B \"\"\"", "0. for k in range(A.shape[1]): tmp += A[i, k] * B[k, j] C[i,", "numpy as np from numba import cuda, float32 import time @cuda.jit def matmul(A,", "import cuda, float32 import time @cuda.jit def matmul(A, B, C): \"\"\"Perform square matrix", "B, C np matmul(A, B, C) end=time.time() print(f\"Runtime of the program is {end", "of C = A * B \"\"\" i, j = cuda.grid(2) if i", "and j < C.shape[1]: tmp = 0. for k in range(A.shape[1]): tmp +=", "C[i, j] = tmp import time start=time.time() A, B, C np matmul(A, B,", "tmp import time start=time.time() A, B, C np matmul(A, B, C) end=time.time() print(f\"Runtime", "float32 import time @cuda.jit def matmul(A, B, C): \"\"\"Perform square matrix multiplication of", "matmul(A, B, C): \"\"\"Perform square matrix multiplication of C = A * B", "matrix multiplication of C = A * B \"\"\" i, j = cuda.grid(2)", "< C.shape[0] and j < C.shape[1]: tmp = 0. for k in range(A.shape[1]):", "A[i, k] * B[k, j] C[i, j] = tmp import time start=time.time() A,", "B[k, j] C[i, j] = tmp import time start=time.time() A, B, C np", "j < C.shape[1]: tmp = 0. for k in range(A.shape[1]): tmp += A[i,", "import time start=time.time() A, B, C np matmul(A, B, C) end=time.time() print(f\"Runtime of", "matmul(A, B, C) end=time.time() print(f\"Runtime of the program is {end - start} s\")", "np matmul(A, B, C) end=time.time() print(f\"Runtime of the program is {end - start}", "for k in range(A.shape[1]): tmp += A[i, k] * B[k, j] C[i, j]", "* B[k, j] C[i, j] = tmp import time start=time.time() A, B, C", "np from numba import cuda, float32 import time @cuda.jit def matmul(A, B, C):", "i, j = cuda.grid(2) if i < C.shape[0] and j < C.shape[1]: tmp", "i < C.shape[0] and j < C.shape[1]: tmp = 0. for k in", "= 0. for k in range(A.shape[1]): tmp += A[i, k] * B[k, j]", "as np from numba import cuda, float32 import time @cuda.jit def matmul(A, B,", "j = cuda.grid(2) if i < C.shape[0] and j < C.shape[1]: tmp =", "C np matmul(A, B, C) end=time.time() print(f\"Runtime of the program is {end -", "B \"\"\" i, j = cuda.grid(2) if i < C.shape[0] and j <", "start=time.time() A, B, C np matmul(A, B, C) end=time.time() print(f\"Runtime of the program", "C.shape[0] and j < C.shape[1]: tmp = 0. for k in range(A.shape[1]): tmp", "tmp = 0. for k in range(A.shape[1]): tmp += A[i, k] * B[k,", "A * B \"\"\" i, j = cuda.grid(2) if i < C.shape[0] and", "< C.shape[1]: tmp = 0. for k in range(A.shape[1]): tmp += A[i, k]", "@cuda.jit def matmul(A, B, C): \"\"\"Perform square matrix multiplication of C = A", "def matmul(A, B, C): \"\"\"Perform square matrix multiplication of C = A *", "C): \"\"\"Perform square matrix multiplication of C = A * B \"\"\" i,", "square matrix multiplication of C = A * B \"\"\" i, j =", "\"\"\" i, j = cuda.grid(2) if i < C.shape[0] and j < C.shape[1]:", "tmp += A[i, k] * B[k, j] C[i, j] = tmp import time", "time start=time.time() A, B, C np matmul(A, B, C) end=time.time() print(f\"Runtime of the", "import numpy as np from numba import cuda, float32 import time @cuda.jit def", "multiplication of C = A * B \"\"\" i, j = cuda.grid(2) if", "A, B, C np matmul(A, B, C) end=time.time() print(f\"Runtime of the program is", "C = A * B \"\"\" i, j = cuda.grid(2) if i <", "from numba import cuda, float32 import time @cuda.jit def matmul(A, B, C): \"\"\"Perform", "cuda, float32 import time @cuda.jit def matmul(A, B, C): \"\"\"Perform square matrix multiplication", "+= A[i, k] * B[k, j] C[i, j] = tmp import time start=time.time()", "cuda.grid(2) if i < C.shape[0] and j < C.shape[1]: tmp = 0. for", "import time @cuda.jit def matmul(A, B, C): \"\"\"Perform square matrix multiplication of C", "k] * B[k, j] C[i, j] = tmp import time start=time.time() A, B,", "time @cuda.jit def matmul(A, B, C): \"\"\"Perform square matrix multiplication of C =", "range(A.shape[1]): tmp += A[i, k] * B[k, j] C[i, j] = tmp import", "= tmp import time start=time.time() A, B, C np matmul(A, B, C) end=time.time()", "in range(A.shape[1]): tmp += A[i, k] * B[k, j] C[i, j] = tmp", "\"\"\"Perform square matrix multiplication of C = A * B \"\"\" i, j", "C.shape[1]: tmp = 0. for k in range(A.shape[1]): tmp += A[i, k] *", "j] C[i, j] = tmp import time start=time.time() A, B, C np matmul(A,", "* B \"\"\" i, j = cuda.grid(2) if i < C.shape[0] and j", "if i < C.shape[0] and j < C.shape[1]: tmp = 0. for k", "= A * B \"\"\" i, j = cuda.grid(2) if i < C.shape[0]", "k in range(A.shape[1]): tmp += A[i, k] * B[k, j] C[i, j] =", "numba import cuda, float32 import time @cuda.jit def matmul(A, B, C): \"\"\"Perform square" ]
[ "datetime # Console arguments parser = argparse.ArgumentParser( description=\"Estimate number of cars at given", "Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\",", "exit() pt[0] = float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]],", "create_workflow(self, name, description): return self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={ 'name': name, 'description':", "+ job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id", "+ url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return self._post_query(\"/projects/\"", "for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API INTEGRATION #", "self.project_id + \"/jobs/\" + job_id + \"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return self._get_query(", "argparse import datetime # Console arguments parser = argparse.ArgumentParser( description=\"Estimate number of cars", "api_key): self.base_url = base_url self.project_id = project_id self.api_key = api_key self.bearer_auth = BearerAuth(project_id,", "processing_block_name: {}, data_block_name: { \"ids\": None, \"time\": time_period, \"limit\": 1, \"intersects\": { \"type\":", "with this name. pass output_location = os.path.join(target_folder, target_name) content = requests.get(url).content with open(output_location,", "job_params) # await job completion for up to ~tries * 5 seconds. Defaults", "to ~25 minutes. def await_job_completion(job, tries=300): try_counter = 0 if DEBUG_LOGGING: print(\"[+] Awaiting", "url, auth=self.bearer_auth, json=data) def _delete_query(self, url): return requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self):", "TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for workflow to help determine which one", "as source satellite data # processing_block_name will be used for vehicle detection def", "workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id).json() def", "print(\"[+] Running test query first\") job = run_job(workflow_id, polygon, time_period, True) is_success =", "api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method which will extract target blocks", "chain of blocks, one by one, and create it. def set_workflow_tasks(self, workflow_id, task_list):", "\" + workflow_id) polygon_num = 0 for polygon in POLYGONS: polygon_num += 1", "exit() converted_poly = [] for point in poly: pt = point.split(\",\") if len(pt)", "client_secret def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'}", "\" + coordinate_pair) exit() pt[0] = float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly =", "auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r): if time.time() -", "self.token return r # API Client abstraction class ApiClient(object): def __init__(self, base_url, project_id,", "+ url, auth=self.bearer_auth) def _post_query(self, url, data): return requests.post(self.base_url + url, auth=self.bearer_auth, json=data)", "break return ret # Create workflow and initialize the tasks based on target", "self.api_key = api_key self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self, url): return requests.get(self.base_url +", "2: print(\"Bad timeframe: \"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if", "print(\"[+] Creating workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow: \" +", "amount of external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60): self.ts =", "\"limit\": 1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\": [ polygon ] }, \"zoom_level\": 18,", "date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running", "tasks: task_num += 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num)", "target_folder, target_name): try: os.mkdir(target_folder) except: # We already have a preexisting directory with", "Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs, each", "We already have a preexisting directory with this name. pass output_location = os.path.join(target_folder,", "\"--project_id\", help=\"Project ID from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from", "- if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" if DEBUG_LOGGING:", "coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad coordinate pair:", "in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name = task[\"name\"]", "content = requests.get(url).content with open(output_location, \"wb\") as f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID,", "time_limit in TIME_LIMITS: time_limit_num += 1 print( \"Polygon \" + str(polygon_num) + \"", "seconds. Defaults to ~25 minutes. def await_job_completion(job, tries=300): try_counter = 0 if DEBUG_LOGGING:", "ret # Create workflow and initialize the tasks based on target parameters. def", "default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project after script is done\", action='store_false') args", "We can't get this time period. Output a - if not is_success: if", "in workflow. # data_block_name will be used as source satellite data # processing_block_name", "of blocks, one by one, and create it. def set_workflow_tasks(self, workflow_id, task_list): tasks", "argparse.ArgumentParser( description=\"Estimate number of cars at given rectangles (latitude-longitude) on given timeframes\" )", "+ url, auth=self.bearer_auth, json=data) def _delete_query(self, url): return requests.delete(self.base_url + url, auth=self.bearer_auth) def", "if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow:", "information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow", "def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id) def get_job(self,", "workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running test query first\") job = run_job(workflow_id,", "def _delete_query(self, url): return requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def", "api_key) def _get_query(self, url): return requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self, url, data):", "if DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while try_counter < tries: job_status = api_client.get_job(job['data']['id'])", "\" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\") job = run_job(workflow_id, polygon, time_period,", "polygon in POLYGONS: polygon_num += 1 time_limit_num = 0 for time_limit in TIME_LIMITS:", "+ \"/tasks/\" + task_id + \"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder)", "requests import random import time import calendar import os import argparse import datetime", "CLEANUP_WORKFLOW = args.no_cleanup # Process input polygon - validate and convert into UP42", "exit() if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad date:", "automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API INTEGRATION # #", "name and templated parameters def run_job(workflow_id, polygon, time_period, is_dry): job_params = { processing_block_name:", "1 time.sleep(5) return False # Process 1 polygon/time period pair def get_one_polygon(polygon_num, time_num,", "run_job(self, workflow_id, name, is_dry, job_parameters): data = job_parameters.copy() if is_dry: data['config'] = {\"mode\":", "= api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status == 'FAILED': return False if extracted_status", "random import time import calendar import os import argparse import datetime # Console", "date: \"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" )", "cars at given rectangles (latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID", "and convert into UP42 input format. POLYGONS = [] for coordinate_pair in args.coordinates:", "api_client.get_blocks()[\"data\"] ret = [] for block in block_listing: if block[\"name\"] == data_block_name: ret.append(block)", "API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW", "\" + self.token return r # API Client abstraction class ApiClient(object): def __init__(self,", "randomized name and templated parameters def run_job(workflow_id, polygon, time_period, is_dry): job_params = {", "# processing_block_name will be used for vehicle detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"]", "import argparse import datetime # Console arguments parser = argparse.ArgumentParser( description=\"Estimate number of", "in UP42 project after script is done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING =", "Defaults to ~25 minutes. def await_job_completion(job, tries=300): try_counter = 0 if DEBUG_LOGGING: print(\"[+]", "UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42 Console\", type=str, required=True)", "f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method which will extract target", "0 for task in tasks: task_num += 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url,", "task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if DEBUG_LOGGING: print(\"[+] Creating", "not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if", "+ str(polygon_num) + \"_timestamp_\" + str(time_num) + \"_task_\" + str( task_num) + \".tar.gz\")", "workflow_id, name, is_dry, job_parameters): data = job_parameters.copy() if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"}", "+ workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id +", "block names for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API", "# We can't get this time period. Output a - if not is_success:", "folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\" + str(time_num) + \"_task_\" + str( task_num)", "= timeout self.client_id = client_id self.client_secret = client_secret def _get_token(self): return requests.post(BASE_URL +", "saving of raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be", "after script is done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID =", "timeframe in args.timeframes: tf = timeframe.split(\":\") if len(tf) != 2: print(\"Bad timeframe: \"+timeframe)", "folder name for all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp)", "api_key self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self, url): return requests.get(self.base_url + url, auth=self.bearer_auth)", "fill a chain of blocks, one by one, and create it. def set_workflow_tasks(self,", "format. TIME_LIMITS = [] for timeframe in args.timeframes: tf = timeframe.split(\":\") if len(tf)", "job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id).json() def get_job_output(self, job_id): return", "= float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1],", "\"/tasks/\" + task_id + \"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder) except:", "input format. POLYGONS = [] for coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\") if", "\"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners of a square.", "done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id API_KEY =", "extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id']", "up to ~tries * 5 seconds. Defaults to ~25 minutes. def await_job_completion(job, tries=300):", "timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\",", "Running test query first\") job = run_job(workflow_id, polygon, time_period, True) is_success = await_job_completion(job)", "for polygon in POLYGONS: polygon_num += 1 time_limit_num = 0 for time_limit in", "class ApiClient(object): def __init__(self, base_url, project_id, api_key): self.base_url = base_url self.project_id = project_id", "This will fill a chain of blocks, one by one, and create it.", "requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def _delete_query(self, url): return requests.delete(self.base_url + url, auth=self.bearer_auth)", "def run_job(workflow_id, polygon, time_period, is_dry): job_params = { processing_block_name: {}, data_block_name: { \"ids\":", "time_period, is_dry): job_params = { processing_block_name: {}, data_block_name: { \"ids\": None, \"time\": time_period,", "may be useful if the user wants to manually download or view detection", "passed to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project", "base_url, project_id, api_key): self.base_url = base_url self.project_id = project_id self.api_key = api_key self.bearer_auth", "def set_workflow_tasks(self, workflow_id, task_list): tasks = [] previous_name = None for task in", "\"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False } } job_name = randomized_name + \"_job_\"", "+ (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) # Storage folder name for all tarballs.", "job_name = randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return api_client.run_job(workflow_id,", "Awaiting job completion\") while try_counter < tries: job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status']", "pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running test query", "{\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/jobs?name=\" +", "completion\") while try_counter < tries: job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status", "self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\" +", "as f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method which will", "api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num", "INTEGRATION # # Simple bearer auth implementation to reduce amount of external dependencies.", "ret.append(block) break for block in block_listing: if block[\"name\"] == processing_block_name: ret.append(block) break return", "data # processing_block_name will be used for vehicle detection def extract_target_blocks(): block_listing =", "print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\") job =", "import datetime # Console arguments parser = argparse.ArgumentParser( description=\"Estimate number of cars at", "coordinate_pair) exit() pt[0] = float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1],", "target_name) content = requests.get(url).content with open(output_location, \"wb\") as f: f.write(content) api_client = ApiClient(BASE_URL,", "converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1],", "one by one, and create it. def set_workflow_tasks(self, workflow_id, task_list): tasks = []", "period pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running test", "print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+]", "randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return api_client.run_job(workflow_id, job_name, is_dry,", "DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits:", "format. POLYGONS = [] for coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\") if len(poly)", "for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run a job with randomized", "job with randomized name and templated parameters def run_job(workflow_id, polygon, time_period, is_dry): job_params", "job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry, job_parameters): data = job_parameters.copy() if", "client_secret, timeout=60): self.ts = time.time() - timeout * 2 self.timeout = timeout self.client_id", "polygon_num += 1 time_limit_num = 0 for time_limit in TIME_LIMITS: time_limit_num += 1", "json=data) def _delete_query(self, url): return requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json()", "results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task in tasks: task_num +=", "input polygon - validate and convert into UP42 input format. POLYGONS = []", "more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives from UP42\", action='store_false')", "\"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder) except: # We already have", "and convert into UP42 input format. TIME_LIMITS = [] for timeframe in args.timeframes:", "for point in poly: pt = point.split(\",\") if len(pt) != 2: print(\"Bad coordinate", "help determine which one is it in UI later. randomized_name = BASE_WORKFLOW_NAME +", "timeout * 2 self.timeout = timeout self.client_id = client_id self.client_secret = client_secret def", "'name': name, 'description': description }).json() # This will fill a chain of blocks,", "url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\" + str(time_num)", "Job failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing", "api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\" + str(time_num) + \"_task_\"", "_validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append(", "+= 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\"", "\"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners", "return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id =", "running actual job\") job = run_job(workflow_id, polygon, time_period, False) is_success = await_job_completion(job) if", "r.headers[\"authorization\"] = \"Bearer \" + self.token return r # API Client abstraction class", "wants to manually download or view detection data later in UI or API.", "False) is_success = await_job_completion(job) if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return", "= self._get_token() self.ts = time.time() r.headers[\"authorization\"] = \"Bearer \" + self.token return r", "task_id + \"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder) except: # We", "+ url, auth=self.bearer_auth, json=data) def _put_query(self, url, data): return requests.put(self.base_url + url, auth=self.bearer_auth,", "task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name = task[\"name\"] return self._post_query(\"/projects/\" + self.project_id", "+ \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r): if", "DEBUG_LOGGING: print(\"[+] Running test query first\") job = run_job(workflow_id, polygon, time_period, True) is_success", "one is it in UI later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i", "[converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) #", "f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method which will extract", "covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run a job with randomized name", "print(\"[-] Job failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+]", "r # API Client abstraction class ApiClient(object): def __init__(self, base_url, project_id, api_key): self.base_url", "in tasks: task_num += 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" +", "= parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS =", "\"/jobs?name=\" + name, data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\"", "data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r): if time.time() - self.ts > self.timeout: self.token", "workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run a job with", "if len(poly) != 2: print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly = [] for", "if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id'])", "in TIME_LIMITS: time_limit_num += 1 print( \"Polygon \" + str(polygon_num) + \" interval", "= api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\" + str(time_num) +", "time_limit)) # This may be useful if the user wants to manually download", "!= 2: print(\"Bad timeframe: \"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit()", "of external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60): self.ts = time.time()", "download or view detection data later in UI or API. if CLEANUP_WORKFLOW: if", "def __init__(self, base_url, project_id, api_key): self.base_url = base_url self.project_id = project_id self.api_key =", "for up to ~tries * 5 seconds. Defaults to ~25 minutes. def await_job_completion(job,", "and initialize the tasks based on target parameters. def initialize_workflow(): targets = extract_target_blocks()", "Console arguments parser = argparse.ArgumentParser( description=\"Estimate number of cars at given rectangles (latitude-longitude)", "Now running actual job\") job = run_job(workflow_id, polygon, time_period, False) is_success = await_job_completion(job)", "str(polygon_num) + \"_timestamp_\" + str(time_num) + \"_task_\" + str( task_num) + \".tar.gz\") return", "get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks\").json() def", "args.no_cleanup # Process input polygon - validate and convert into UP42 input format.", "job\") job = run_job(workflow_id, polygon, time_period, False) is_success = await_job_completion(job) if not is_success:", "each representing 2 corners of a square. Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\",", "= 0 if DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while try_counter < tries: job_status", "'SUCCEEDED': return True try_counter += 1 time.sleep(5) return False # Process 1 polygon/time", "self.ts > self.timeout: self.token = self._get_token() self.ts = time.time() r.headers[\"authorization\"] = \"Bearer \"", "_put_query(self, url, data): return requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def _delete_query(self, url): return", "workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] #", "(''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params) # await job", "= None for task in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"]", "if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+]", "datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError: return False # Process input timeframes -", "+= 1 print( \"Polygon \" + str(polygon_num) + \" interval \" + str(time_limit_num)", "\" + str(time_limit_num) + \": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) #", "help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives from UP42\",", "print(\"Bad timeframe: \"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if not", "for vehicle detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret = [] for block", "job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks\").json() def get_task_signed_url(self,", "previous_name = task[\"name\"] return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/tasks\",", "coordinate pair: \"+coordinate_pair) exit() converted_poly = [] for point in poly: pt =", "= task[\"name\"] return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json()", "each representing 2 corners of a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\",", "def get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\" + self.project_id + \"/jobs/\" + job_id", "= [] for block in block_listing: if block[\"name\"] == data_block_name: ret.append(block) break for", "is_dry): job_params = { processing_block_name: {}, data_block_name: { \"ids\": None, \"time\": time_period, \"limit\":", "(latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42 Console\", type=str,", "targets) return workflow['data']['id'] # Run a job with randomized name and templated parameters", "with randomized name and templated parameters def run_job(workflow_id, polygon, time_period, is_dry): job_params =", "def _get_query(self, url): return requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self, url, data): return", "} job_name = randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return", "representing 2 corners of a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\",", "job = run_job(workflow_id, polygon, time_period, False) is_success = await_job_completion(job) if not is_success: if", "is done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id API_KEY", "auth=self.bearer_auth, json=data) def _delete_query(self, url): return requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self): return", "POLYGONS = [] for coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\") if len(poly) !=", "+ self.project_id + \"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return", "time_limit_num, workflow_id, polygon, time_limit)) # This may be useful if the user wants", "covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project after script is done\", action='store_false')", "it. def set_workflow_tasks(self, workflow_id, task_list): tasks = [] previous_name = None for task", "processing_block_name = 'sm_veh-detection' # API INTEGRATION # # Simple bearer auth implementation to", "self.project_id + \"/workflows/\" + workflow_id + \"/jobs?name=\" + name, data).json() def get_job_tasks(self, job_id):", "+ self.project_id + \"/jobs/\" + job_id + \"/tasks/\" + task_id + \"/downloads/results\").json() def", "+ \"/workflows\", data={ 'name': name, 'description': description }).json() # This will fill a", "target parameters. def initialize_workflow(): targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow for", "pair: \"+coordinate_pair) exit() converted_poly = [] for point in poly: pt = point.split(\",\")", "used in workflow. # data_block_name will be used as source satellite data #", "get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/outputs/data-json\").json() def", "Process 1 polygon/time period pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING:", "time_period, \"limit\": 1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\": [ polygon ] }, \"zoom_level\":", "# Constant block names for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection'", "project_id self.api_key = api_key self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self, url): return requests.get(self.base_url", "= initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow: \" + workflow_id) polygon_num = 0", "given rectangles (latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42", "\"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id", "try_counter = 0 if DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while try_counter < tries:", "= \"Bearer \" + self.token return r # API Client abstraction class ApiClient(object):", "auth=self.bearer_auth, json=data) def _put_query(self, url, data): return requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def", "task in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name =", "manually download or view detection data later in UI or API. if CLEANUP_WORKFLOW:", "BearerAuth(project_id, api_key) def _get_query(self, url): return requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self, url,", "current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp) # Constant block names for", "+ str(current_timestamp) # Constant block names for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name", "is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id", "} } job_name = randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in range(16)))", "+ str(time_limit_num) + \": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) # This", "detection data later in UI or API. if CLEANUP_WORKFLOW: if DEBUG_LOGGING: print(\"[+] Cleaning", "arguments parser = argparse.ArgumentParser( description=\"Estimate number of cars at given rectangles (latitude-longitude) on", "Process input timeframes - validate and convert into UP42 input format. TIME_LIMITS =", "\" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables", "or view detection data later in UI or API. if CLEANUP_WORKFLOW: if DEBUG_LOGGING:", "+ self.project_id + \"/workflows/\" + workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id", "pass output_location = os.path.join(target_folder, target_name) content = requests.get(url).content with open(output_location, \"wb\") as f:", "Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task in tasks:", "str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\") job = run_job(workflow_id, polygon, time_period, False) is_success", "extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret = [] for block in block_listing: if block[\"name\"]", "\": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) # This may be useful", "data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API INTEGRATION # # Simple bearer", "# # Simple bearer auth implementation to reduce amount of external dependencies. class", "on target parameters. def initialize_workflow(): targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow", "SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0", "== '__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+]", "get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return self._post_query(\"/projects/\" + self.project_id + \"/workflows\",", "\"/jobs/\" + job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" +", "= run_job(workflow_id, polygon, time_period, True) is_success = await_job_completion(job) # We can't get this", "description): return self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={ 'name': name, 'description': description }).json()", "bearer auth implementation to reduce amount of external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self,", "None for task in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] })", "converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation helper.", "_get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def", "Run a job with randomized name and templated parameters def run_job(workflow_id, polygon, time_period,", "help=\"API Key from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude", "{ \"ids\": None, \"time\": time_period, \"limit\": 1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\": [", "len(poly) != 2: print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly = [] for point", "job_name, is_dry, job_params) # await job completion for up to ~tries * 5", "timeframe: \"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if not _validate_date(tf[1]):", "This may be useful if the user wants to manually download or view", "0 if DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while try_counter < tries: job_status =", "* 2 self.timeout = timeout self.client_id = client_id self.client_secret = client_secret def _get_token(self):", "job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task in tasks: task_num", "task[\"id\"] }) previous_name = task[\"name\"] return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id", "SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup #", "requests.get(url).content with open(output_location, \"wb\") as f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) #", "in args.coordinates: poly = coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad coordinate pair: \"+coordinate_pair)", "if DEBUG_LOGGING: print(\"[+] Created workflow: \" + workflow_id) polygon_num = 0 for polygon", "= coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly =", "= BearerAuth(project_id, api_key) def _get_query(self, url): return requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self,", "\"/workflows/\" + workflow_id + \"/jobs?name=\" + name, data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\"", "str(time_num) + \"_task_\" + str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ ==", "return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self,", "determine which one is it in UI later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\")", "def _post_query(self, url, data): return requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def _put_query(self, url,", "_post_query(self, url, data): return requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def _put_query(self, url, data):", "poly = coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly", "# Run a job with randomized name and templated parameters def run_job(workflow_id, polygon,", "{ \"type\": \"Polygon\", \"coordinates\": [ polygon ] }, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\":", "\"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process input polygon - validate", "previous_name = None for task in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\":", "= api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" +", "= [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ]", "api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run a job with randomized name and templated", "+ \"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" +", "job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id +", "convert into UP42 input format. TIME_LIMITS = [] for timeframe in args.timeframes: tf", "timeout=60): self.ts = time.time() - timeout * 2 self.timeout = timeout self.client_id =", "task[\"name\"] return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json() def", "+ str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\") job = run_job(workflow_id, polygon, time_period, False)", "# Process 1 polygon/time period pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if", "not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit()", "required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners of", "UP42 input format. TIME_LIMITS = [] for timeframe in args.timeframes: tf = timeframe.split(\":\")", "'description': description }).json() # This will fill a chain of blocks, one by", "= client_secret def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type':", "used for vehicle detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret = [] for", "range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params) # await job completion for up to", "Constant block names for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' #", "= api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task in tasks: task_num += 1 url", "== 'FAILED': return False if extracted_status == 'SUCCEEDED': return True try_counter += 1", "def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running test query first\")", "for coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad coordinate", "#!/usr/bin/python3 import requests import random import time import calendar import os import argparse", "this name. pass output_location = os.path.join(target_folder, target_name) content = requests.get(url).content with open(output_location, \"wb\")", "the tasks based on target parameters. def initialize_workflow(): targets = extract_target_blocks() workflow =", "a square. Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\",", "it in UI later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in range(16)))", "= job_parameters.copy() if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id +", "return ret # Create workflow and initialize the tasks based on target parameters.", "= \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process input polygon -", "Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now", "parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List", "method which will extract target blocks that will be used in workflow. #", "\"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r): if time.time()", "parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives", "{}, data_block_name: { \"ids\": None, \"time\": time_period, \"limit\": 1, \"intersects\": { \"type\": \"Polygon\",", "[ polygon ] }, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False }", "False # Process 1 polygon/time period pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period):", "pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]],", "parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project after script is done\", action='store_false') args =", "to reduce amount of external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60):", "+ str(time_num) + \"_task_\" + str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__", "run_job(workflow_id, polygon, time_period, is_dry): job_params = { processing_block_name: {}, data_block_name: { \"ids\": None,", "return False # Process 1 polygon/time period pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon,", "requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self, url, data): return requests.post(self.base_url + url, auth=self.bearer_auth,", "return r # API Client abstraction class ApiClient(object): def __init__(self, base_url, project_id, api_key):", "client_id, client_secret, timeout=60): self.ts = time.time() - timeout * 2 self.timeout = timeout", "self.timeout: self.token = self._get_token() self.ts = time.time() r.headers[\"authorization\"] = \"Bearer \" + self.token", "url, target_folder, target_name): try: os.mkdir(target_folder) except: # We already have a preexisting directory", "parameters. def initialize_workflow(): targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19", "get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running test query first\") job", "# This may be useful if the user wants to manually download or", "= 0 for time_limit in TIME_LIMITS: time_limit_num += 1 print( \"Polygon \" +", "def initialize_workflow(): targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script')", "except ValueError: return False # Process input timeframes - validate and convert into", "data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id) def", "self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self, url): return requests.get(self.base_url + url, auth=self.bearer_auth) def", "prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]]", "is_success = await_job_completion(job) if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\"", "def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError: return False # Process", "* 5 seconds. Defaults to ~25 minutes. def await_job_completion(job, tries=300): try_counter = 0", "which will extract target blocks that will be used in workflow. # data_block_name", "for block in block_listing: if block[\"name\"] == data_block_name: ret.append(block) break for block in", "== data_block_name: ret.append(block) break for block in block_listing: if block[\"name\"] == processing_block_name: ret.append(block)", "output_location = os.path.join(target_folder, target_name) content = requests.get(url).content with open(output_location, \"wb\") as f: f.write(content)", "Date validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError: return", "return requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def _put_query(self, url, data): return requests.put(self.base_url +", "input timeframes - validate and convert into UP42 input format. TIME_LIMITS = []", "+ job_id + \"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\" + self.project_id", "Random name for workflow to help determine which one is it in UI", "DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow: \"", "self.client_secret = client_secret def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'},", "point in poly: pt = point.split(\",\") if len(pt) != 2: print(\"Bad coordinate pair:", "Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\",", "await_job_completion(job, tries=300): try_counter = 0 if DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while try_counter", "test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\") job", "the user wants to manually download or view detection data later in UI", "api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run a", "# Date validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError:", "Client abstraction class ApiClient(object): def __init__(self, base_url, project_id, api_key): self.base_url = base_url self.project_id", "= await_job_completion(job) if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" actual_output", "DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL", "return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks\").json() def get_task_signed_url(self, job_id,", "in range(16))) # Storage folder name for all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder", "\"/jobs/\" + job_id + \"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\" +", "will extract target blocks that will be used in workflow. # data_block_name will", "nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners of a square. Sample:", "if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1])", "target_name): try: os.mkdir(target_folder) except: # We already have a preexisting directory with this", "(''.join(random.choice(\"0123456789abcdef\") for i in range(16))) # Storage folder name for all tarballs. current_timestamp", "[ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly)", "+ \"/jobs?name=\" + name, data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id +", "2 corners of a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\",", "{ processing_block_name: {}, data_block_name: { \"ids\": None, \"time\": time_period, \"limit\": 1, \"intersects\": {", "is_dry, job_params) # await job completion for up to ~tries * 5 seconds.", "+ self.project_id + \"/workflows\", data={ 'name': name, 'description': description }).json() # This will", "headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r): if time.time() - self.ts >", "1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\": [ polygon ] }, \"zoom_level\": 18, \"time_series\":", "self.ts = time.time() r.headers[\"authorization\"] = \"Bearer \" + self.token return r # API", "polygon, time_period, False) is_success = await_job_completion(job) if not is_success: if DEBUG_LOGGING: print(\"[-] Job", "a chain of blocks, one by one, and create it. def set_workflow_tasks(self, workflow_id,", "print(\"Bad date: \"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\"", "\"time\": time_period, \"limit\": 1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\": [ polygon ] },", "'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r): if time.time() - self.ts > self.timeout:", "initialize the tasks based on target parameters. def initialize_workflow(): targets = extract_target_blocks() workflow", "api_client.run_job(workflow_id, job_name, is_dry, job_params) # await job completion for up to ~tries *", "[] previous_name = None for task in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name,", "tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp) # Constant block names", "self.base_url = base_url self.project_id = project_id self.api_key = api_key self.bearer_auth = BearerAuth(project_id, api_key)", "a preexisting directory with this name. pass output_location = os.path.join(target_folder, target_name) content =", "def create_workflow(self, name, description): return self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={ 'name': name,", "useful if the user wants to manually download or view detection data later", "pairs, each representing 2 corners of a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True)", "failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job", "\"Bearer \" + self.token return r # API Client abstraction class ApiClient(object): def", "help=\"Project ID from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42", "return self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={ 'name': name, 'description': description }).json() #", "def __call__(self, r): if time.time() - self.ts > self.timeout: self.token = self._get_token() self.ts", "'%Y-%m-%d') return True except ValueError: return False # Process input timeframes - validate", "+ self.project_id + \"/workflows/\" + workflow_id + \"/jobs?name=\" + name, data).json() def get_job_tasks(self,", "DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition", "ValueError: return False # Process input timeframes - validate and convert into UP42", "self.project_id + \"/jobs/\" + job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry, job_parameters):", "url): return requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self, url, data): return requests.post(self.base_url +", "= timeframe.split(\":\") if len(tf) != 2: print(\"Bad timeframe: \"+timeframe) exit() if not _validate_date(tf[0]):", "class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60): self.ts = time.time() - timeout *", "self.project_id = project_id self.api_key = api_key self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self, url):", "os.mkdir(target_folder) except: # We already have a preexisting directory with this name. pass", "auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return self._post_query(\"/projects/\" + self.project_id", "of cars at given rectangles (latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project", "data_block_name: ret.append(block) break for block in block_listing: if block[\"name\"] == processing_block_name: ret.append(block) break", "+ \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving", "= [] for coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\") if len(poly) != 2:", "False # Process input timeframes - validate and convert into UP42 input format.", "\"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+]", "= \"raw_job_\" + str(current_timestamp) # Constant block names for automatic search. data_block_name =", "+ \": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) # This may be", "block in block_listing: if block[\"name\"] == data_block_name: ret.append(block) break for block in block_listing:", "= args.no_cleanup # Process input polygon - validate and convert into UP42 input", "which one is it in UI later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for", "data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id +", "self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id):", "data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id +", "18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False } } job_name = randomized_name +", "actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job results\") tasks =", "url, data): return requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def _put_query(self, url, data): return", "\".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id", "self.token = self._get_token() self.ts = time.time() r.headers[\"authorization\"] = \"Bearer \" + self.token return", ") # Random name for workflow to help determine which one is it", "= await_job_completion(job) # We can't get this time period. Output a - if", "url): return requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name,", "2 corners of a square. Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output", "for timeframe in args.timeframes: tf = timeframe.split(\":\") if len(tf) != 2: print(\"Bad timeframe:", "task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name = task[\"name\"] return", "ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method which will extract target blocks that will", "100, \"panchromatic_band\": False } } job_name = randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for", "1 polygon/time period pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+]", "directory with this name. pass output_location = os.path.join(target_folder, target_name) content = requests.get(url).content with", "\" + str(polygon_num) + \" interval \" + str(time_limit_num) + \": \" +", "+= 1 time_limit_num = 0 for time_limit in TIME_LIMITS: time_limit_num += 1 print(", "time_period): if DEBUG_LOGGING: print(\"[+] Running test query first\") job = run_job(workflow_id, polygon, time_period,", "date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for workflow to help", "self._get_token() self.ts = time.time() r.headers[\"authorization\"] = \"Bearer \" + self.token return r #", "if len(pt) != 2: print(\"Bad coordinate pair: \" + coordinate_pair) exit() pt[0] =", "str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id = initialize_workflow()", "print(\"Bad coordinate pair: \" + coordinate_pair) exit() pt[0] = float(pt[0]) pt[1] = float(pt[1])", "url, auth=self.bearer_auth, json=data) def _put_query(self, url, data): return requests.put(self.base_url + url, auth=self.bearer_auth, json=data)", "break for block in block_listing: if block[\"name\"] == processing_block_name: ret.append(block) break return ret", "return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\"", "return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id,", "[converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation", "\"_timestamp_\" + str(time_num) + \"_task_\" + str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if", "2 self.timeout = timeout self.client_id = client_id self.client_secret = client_secret def _get_token(self): return", "targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets)", "at given rectangles (latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from", "ApiClient(object): def __init__(self, base_url, project_id, api_key): self.base_url = base_url self.project_id = project_id self.api_key", "processing_block_name will be used for vehicle detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret", "POLYGONS: polygon_num += 1 time_limit_num = 0 for time_limit in TIME_LIMITS: time_limit_num +=", "range(16))) # Storage folder name for all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder =", "latitude-longitude pairs, each representing 2 corners of a square. Sample: \" + \"2019-12-01:2020-02-28\",", "test query first\") job = run_job(workflow_id, polygon, time_period, True) is_success = await_job_completion(job) #", "\"ids\": None, \"time\": time_period, \"limit\": 1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\": [ polygon", "_validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for workflow", "actual job\") job = run_job(workflow_id, polygon, time_period, False) is_success = await_job_completion(job) if not", "print(\"[+] Now running actual job\") job = run_job(workflow_id, polygon, time_period, False) is_success =", "calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp) # Constant block names for automatic search.", "be used in workflow. # data_block_name will be used as source satellite data", "by one, and create it. def set_workflow_tasks(self, workflow_id, task_list): tasks = [] previous_name", "randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) # Storage folder name", "type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners", "coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly = []", "await_job_completion(job) # We can't get this time period. Output a - if not", "[] for point in poly: pt = point.split(\",\") if len(pt) != 2: print(\"Bad", "~tries * 5 seconds. Defaults to ~25 minutes. def await_job_completion(job, tries=300): try_counter =", "with open(output_location, \"wb\") as f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific", "job_id, task_id): return self._get_query( \"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks/\"", "}).json() # This will fill a chain of blocks, one by one, and", "run_job(workflow_id, polygon, time_period, False) is_success = await_job_completion(job) if not is_success: if DEBUG_LOGGING: print(\"[-]", "= [] previous_name = None for task in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\":", "\"blockId\": task[\"id\"] }) previous_name = task[\"name\"] return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" +", "help=\"Workflow name prefix to be passed to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\",", "convert into UP42 input format. POLYGONS = [] for coordinate_pair in args.coordinates: poly", "pair: \" + coordinate_pair) exit() pt[0] = float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly", "POLYGONS.append(prepared_poly) # Date validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except", "[] for block in block_listing: if block[\"name\"] == data_block_name: ret.append(block) break for block", "name for all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp) #", "= calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp) # Constant block names for automatic", "if DEBUG_LOGGING: print(\"[+] Running test query first\") job = run_job(workflow_id, polygon, time_period, True)", "UI later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) # Storage", "= {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/jobs?name=\"", "self.project_id + \"/workflows/\" + workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id +", "= float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]],", "pt[0] = float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1],", "url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return self._post_query(\"/projects/\" +", "blocks that will be used in workflow. # data_block_name will be used as", "Process input polygon - validate and convert into UP42 input format. POLYGONS =", "= argparse.ArgumentParser( description=\"Estimate number of cars at given rectangles (latitude-longitude) on given timeframes\"", "exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for workflow to help determine which", "later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) # Storage folder", "self.client_id = client_id self.client_secret = client_secret def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id,", "import requests import random import time import calendar import os import argparse import", "rectangles (latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42 Console\",", "= time.time() r.headers[\"authorization\"] = \"Bearer \" + self.token return r # API Client", "r): if time.time() - self.ts > self.timeout: self.token = self._get_token() self.ts = time.time()", "os.path.join(target_folder, target_name) content = requests.get(url).content with open(output_location, \"wb\") as f: f.write(content) api_client =", "workflow_id, task_list): tasks = [] previous_name = None for task in task_list: tasks.append({", "required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw", "\"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners of a square.", "= api_client.get_blocks()[\"data\"] ret = [] for block in block_listing: if block[\"name\"] == data_block_name:", "\"coordinates\": [ polygon ] }, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False", "polygon/time period pair def get_one_polygon(polygon_num, time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running", "polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running test query first\") job = run_job(workflow_id, polygon,", "if DEBUG_LOGGING: print(\"[+] Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for", "+ \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" +", "+ \"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\" + self.project_id + \"/jobs/\"", "\"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for workflow to help determine", "__init__(self, base_url, project_id, api_key): self.base_url = base_url self.project_id = project_id self.api_key = api_key", "\"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name = task[\"name\"] return self._post_query(\"/projects/\" +", "self._get_query( \"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks/\" + task_id +", "'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run a job", "a job with randomized name and templated parameters def run_job(workflow_id, polygon, time_period, is_dry):", "= run_job(workflow_id, polygon, time_period, False) is_success = await_job_completion(job) if not is_success: if DEBUG_LOGGING:", "polygon_num = 0 for polygon in POLYGONS: polygon_num += 1 time_limit_num = 0", ") parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API", "\"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) #", "+ get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) # This may be useful if the", "self.project_id + \"/workflows\", data={ 'name': name, 'description': description }).json() # This will fill", "[converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation helper. def", "self.timeout = timeout self.client_id = client_id self.client_secret = client_secret def _get_token(self): return requests.post(BASE_URL", "return True except ValueError: return False # Process input timeframes - validate and", "json=data) def _put_query(self, url, data): return requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def _delete_query(self,", "= [] for timeframe in args.timeframes: tf = timeframe.split(\":\") if len(tf) != 2:", "in args.timeframes: tf = timeframe.split(\":\") if len(tf) != 2: print(\"Bad timeframe: \"+timeframe) exit()", "into UP42 input format. POLYGONS = [] for coordinate_pair in args.coordinates: poly =", "!= 2: print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly = [] for point in", "\"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry, job_parameters): data = job_parameters.copy() if is_dry: data['config']", "return self._get_query( \"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks/\" + task_id", "from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42 Console\", type=str,", "in poly: pt = point.split(\",\") if len(pt) != 2: print(\"Bad coordinate pair: \"", "UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project after script", "polygon ] }, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False } }", "TIME_LIMITS: time_limit_num += 1 print( \"Polygon \" + str(polygon_num) + \" interval \"", "'__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created", "workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\"", "square. Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\", action='store_true')", "name, description): return self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={ 'name': name, 'description': description", "if extracted_status == 'FAILED': return False if extracted_status == 'SUCCEEDED': return True try_counter", "time.sleep(5) return False # Process 1 polygon/time period pair def get_one_polygon(polygon_num, time_num, workflow_id,", "templated parameters def run_job(workflow_id, polygon, time_period, is_dry): job_params = { processing_block_name: {}, data_block_name:", "= 0 for polygon in POLYGONS: polygon_num += 1 time_limit_num = 0 for", "\"raw_job_\" + str(current_timestamp) # Constant block names for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped'", "extracted_status == 'SUCCEEDED': return True try_counter += 1 time.sleep(5) return False # Process", "+ \"/workflows/\" + workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\"", "interval \" + str(time_limit_num) + \": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit))", "calendar import os import argparse import datetime # Console arguments parser = argparse.ArgumentParser(", "required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners of", "BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process input polygon", "\"/workflows/\" + workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" +", "api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\" + str(time_num) + \"_task_\" + str(", "str(time_limit_num) + \": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) # This may", "block[\"name\"] == processing_block_name: ret.append(block) break return ret # Create workflow and initialize the", "task_num += 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) +", "get this time period. Output a - if not is_success: if DEBUG_LOGGING: print(\"[-]", "if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" if DEBUG_LOGGING: test_job_output", "str(current_timestamp) # Constant block names for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name =", "len(pt) != 2: print(\"Bad coordinate pair: \" + coordinate_pair) exit() pt[0] = float(pt[0])", "reduce amount of external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60): self.ts", "tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name = task[\"name\"] return self._post_query(\"/projects/\"", "+ \"/jobs/\" + job_id + \"/tasks/\" + task_id + \"/downloads/results\").json() def dump_task_url(self, url,", "to be passed to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in", "converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return", "workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow: \" + workflow_id) polygon_num =", "number of cars at given rectangles (latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\",", "= args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process input polygon - validate and convert", "data={ 'name': name, 'description': description }).json() # This will fill a chain of", "except: # We already have a preexisting directory with this name. pass output_location", "# await job completion for up to ~tries * 5 seconds. Defaults to", "user wants to manually download or view detection data later in UI or", "of latitude-longitude pairs, each representing 2 corners of a square. Sample: \" +", "True try_counter += 1 time.sleep(5) return False # Process 1 polygon/time period pair", "converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date", "\"panchromatic_band\": False } } job_name = randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i", "Creating workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow: \" + workflow_id)", "\"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of", "\"intersects\": { \"type\": \"Polygon\", \"coordinates\": [ polygon ] }, \"zoom_level\": 18, \"time_series\": None,", "API_KEY) # Specific method which will extract target blocks that will be used", "import random import time import calendar import os import argparse import datetime #", "DEBUG_LOGGING: print(\"[+] Created workflow: \" + workflow_id) polygon_num = 0 for polygon in", "view detection data later in UI or API. if CLEANUP_WORKFLOW: if DEBUG_LOGGING: print(\"[+]", "initialize_workflow(): targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'],", "<reponame>jagfu/Qanalysis #!/usr/bin/python3 import requests import random import time import calendar import os import", "Simple bearer auth implementation to reduce amount of external dependencies. class BearerAuth(requests.auth.AuthBase): def", ").json()['access_token'] def __call__(self, r): if time.time() - self.ts > self.timeout: self.token = self._get_token()", "self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={ 'name':", "\"--api_key\", help=\"API Key from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of", "job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status == 'FAILED': return False if", "first\") job = run_job(workflow_id, polygon, time_period, True) is_success = await_job_completion(job) # We can't", "tries: job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status == 'FAILED': return False", "- timeout * 2 self.timeout = timeout self.client_id = client_id self.client_secret = client_secret", "args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process input polygon - validate and convert into", "validate and convert into UP42 input format. TIME_LIMITS = [] for timeframe in", "dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60): self.ts = time.time() - timeout", "api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task in tasks: task_num += 1 url =", "auth=self.bearer_auth) def _post_query(self, url, data): return requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def _put_query(self,", "requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r):", "ret = [] for block in block_listing: if block[\"name\"] == data_block_name: ret.append(block) break", "this time period. Output a - if not is_success: if DEBUG_LOGGING: print(\"[-] Job", "= point.split(\",\") if len(pt) != 2: print(\"Bad coordinate pair: \" + coordinate_pair) exit()", "+ \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\")", "task_num = 0 for task in tasks: task_num += 1 url = api_client.get_task_signed_url(job['data']['id'],", "return self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={", "\"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0]) exit() if not _validate_date(tf[1]): print(\"Bad", "debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\",", "try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError: return False # Process input timeframes", "square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs,", "representing 2 corners of a square. Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\",", "from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs, each", "requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def _put_query(self, url, data): return requests.put(self.base_url + url,", "abstraction class ApiClient(object): def __init__(self, base_url, project_id, api_key): self.base_url = base_url self.project_id =", "+ \"/jobs/\" + job_id + \"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\"", "Output a - if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\"", "== 'SUCCEEDED': return True try_counter += 1 time.sleep(5) return False # Process 1", "True except ValueError: return False # Process input timeframes - validate and convert", "self.project_id + \"/jobs/\" + job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id +", "= requests.get(url).content with open(output_location, \"wb\") as f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY)", "get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\" + self.project_id + \"/jobs/\" + job_id +", "later in UI or API. if CLEANUP_WORKFLOW: if DEBUG_LOGGING: print(\"[+] Cleaning workflow up\")", "job completion\") while try_counter < tries: job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if", "return False # Process input timeframes - validate and convert into UP42 input", "\"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name = task[\"name\"] return self._post_query(\"/projects/\" + self.project_id +", "previous_name, \"blockId\": task[\"id\"] }) previous_name = task[\"name\"] return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\"", "def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks\").json()", "in block_listing: if block[\"name\"] == processing_block_name: ret.append(block) break return ret # Create workflow", "'FAILED': return False if extracted_status == 'SUCCEEDED': return True try_counter += 1 time.sleep(5)", "have a preexisting directory with this name. pass output_location = os.path.join(target_folder, target_name) content", "job completion for up to ~tries * 5 seconds. Defaults to ~25 minutes.", "implementation to reduce amount of external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret,", "helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError: return False #", "'client_credentials'} ).json()['access_token'] def __call__(self, r): if time.time() - self.ts > self.timeout: self.token =", "job_status['data']['status'] if extracted_status == 'FAILED': return False if extracted_status == 'SUCCEEDED': return True", "BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process input polygon - validate and", "print( \"Polygon \" + str(polygon_num) + \" interval \" + str(time_limit_num) + \":", "- validate and convert into UP42 input format. TIME_LIMITS = [] for timeframe", "url, auth=self.bearer_auth) def _post_query(self, url, data): return requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def", "# data_block_name will be used as source satellite data # processing_block_name will be", "help=\"Disables saving of raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to", "help=\"Keep workflow in UP42 project after script is done\", action='store_false') args = parser.parse_args()", "\"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/jobs?name=\" + name,", "+ \" interval \" + str(time_limit_num) + \": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id,", "tries=300): try_counter = 0 if DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while try_counter <", "for task in task_list: tasks.append({ \"name\": task[\"name\"], \"parentName\": previous_name, \"blockId\": task[\"id\"] }) previous_name", "\"--verbose\", help=\"Output more debug information\", action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives from", "names for automatic search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API INTEGRATION", "\"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\" + self.project_id + \"/jobs/\" +", "if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if", "Created workflow: \" + workflow_id) polygon_num = 0 for polygon in POLYGONS: polygon_num", "# API INTEGRATION # # Simple bearer auth implementation to reduce amount of", "data): return requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def _delete_query(self, url): return requests.delete(self.base_url +", "in UI later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) #", "job = run_job(workflow_id, polygon, time_period, True) is_success = await_job_completion(job) # We can't get", "\"_task_\" + str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if", "self._post_query(\"/projects/\" + self.project_id + \"/workflows\", data={ 'name': name, 'description': description }).json() # This", "if len(tf) != 2: print(\"Bad timeframe: \"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad date:", "args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup", "= args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process", "validate and convert into UP42 input format. POLYGONS = [] for coordinate_pair in", "description }).json() # This will fill a chain of blocks, one by one,", "print(\"[-] Job failed\") return \"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date:", "block in block_listing: if block[\"name\"] == processing_block_name: ret.append(block) break return ret # Create", "archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be passed to UP42", "get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) # This may be useful if the user", "[] for coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad", "TIME_LIMITS = [] for timeframe in args.timeframes: tf = timeframe.split(\":\") if len(tf) !=", "name, 'description': description }).json() # This will fill a chain of blocks, one", "+ str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if DEBUG_LOGGING:", "of a square. Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more debug", "\"/workflows\", data={ 'name': name, 'description': description }).json() # This will fill a chain", "5 seconds. Defaults to ~25 minutes. def await_job_completion(job, tries=300): try_counter = 0 if", "minutes. def await_job_completion(job, tries=300): try_counter = 0 if DEBUG_LOGGING: print(\"[+] Awaiting job completion\")", "+ job_id + \"/tasks/\" + task_id + \"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name):", "+= 1 time.sleep(5) return False # Process 1 polygon/time period pair def get_one_polygon(polygon_num,", "args.verbose PROJECT_ID = args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\"", "validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError: return False", "client_id self.client_secret = client_secret def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type':", "return \"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"])", "+ self.project_id + \"/jobs/\" + job_id + \"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return", "type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\",", "- validate and convert into UP42 input format. POLYGONS = [] for coordinate_pair", "time import calendar import os import argparse import datetime # Console arguments parser", "and create it. def set_workflow_tasks(self, workflow_id, task_list): tasks = [] previous_name = None", "\"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params) #", "is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id'])", "workflow_id) polygon_num = 0 for polygon in POLYGONS: polygon_num += 1 time_limit_num =", "if the user wants to manually download or view detection data later in", "query first\") job = run_job(workflow_id, polygon, time_period, True) is_success = await_job_completion(job) # We", "+ \"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder) except: # We already", "DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING:", "\" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon, time_limit)) # This may be useful if", "+ self.token return r # API Client abstraction class ApiClient(object): def __init__(self, base_url,", "name for workflow to help determine which one is it in UI later.", "job_id + \"/tasks\").json() def get_task_signed_url(self, job_id, task_id): return self._get_query( \"/projects/\" + self.project_id +", "self.ts = time.time() - timeout * 2 self.timeout = timeout self.client_id = client_id", "name. pass output_location = os.path.join(target_folder, target_name) content = requests.get(url).content with open(output_location, \"wb\") as", "timeout self.client_id = client_id self.client_secret = client_secret def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\",", "for workflow to help determine which one is it in UI later. randomized_name", "requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return", "parameters def run_job(workflow_id, polygon, time_period, is_dry): job_params = { processing_block_name: {}, data_block_name: {", "coordinate pair: \" + coordinate_pair) exit() pt[0] = float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt)", "\"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job results\") tasks", "tasks based on target parameters. def initialize_workflow(): targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name,", "# Random name for workflow to help determine which one is it in", "_delete_query(self, url): return requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self,", "time_limit_num += 1 print( \"Polygon \" + str(polygon_num) + \" interval \" +", "= args.verbose PROJECT_ID = args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL =", "= args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW =", "corners of a square. Sample: \" + \"2019-12-01:2020-02-28\", required=True) parser.add_argument(\"-v\", \"--verbose\", help=\"Output more", "api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"]))", "return requests.delete(self.base_url + url, auth=self.bearer_auth) def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name, description):", "+ \"_task_\" + str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__':", "a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude", "timeframes - validate and convert into UP42 input format. TIME_LIMITS = [] for", "PROJECT_ID = args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME", "\"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id)", "\"/jobs/\" + job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry, job_parameters): data =", "None, \"time\": time_period, \"limit\": 1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\": [ polygon ]", "def run_job(self, workflow_id, name, is_dry, job_parameters): data = job_parameters.copy() if is_dry: data['config'] =", "workflow and initialize the tasks based on target parameters. def initialize_workflow(): targets =", "\" interval \" + str(time_limit_num) + \": \" + get_one_polygon(polygon_num, time_limit_num, workflow_id, polygon,", "pairs, each representing 2 corners of a square. Sample: \" + \"2019-12-01:2020-02-28\", required=True)", "pt = point.split(\",\") if len(pt) != 2: print(\"Bad coordinate pair: \" + coordinate_pair)", "time period. Output a - if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\")", "> self.timeout: self.token = self._get_token() self.ts = time.time() r.headers[\"authorization\"] = \"Bearer \" +", "used as source satellite data # processing_block_name will be used for vehicle detection", "os import argparse import datetime # Console arguments parser = argparse.ArgumentParser( description=\"Estimate number", "PROJECT_ID, API_KEY) # Specific method which will extract target blocks that will be", "in UI or API. if CLEANUP_WORKFLOW: if DEBUG_LOGGING: print(\"[+] Cleaning workflow up\") api_client.delete_workflow(workflow_id)", "if __name__ == '__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id = initialize_workflow() if", "if block[\"name\"] == data_block_name: ret.append(block) break for block in block_listing: if block[\"name\"] ==", "time_period, False) is_success = await_job_completion(job) if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\")", "< tries: job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status == 'FAILED': return", "API Client abstraction class ApiClient(object): def __init__(self, base_url, project_id, api_key): self.base_url = base_url", "+ \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2", "= api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run", "print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for workflow to", "args.timeframes: tf = timeframe.split(\":\") if len(tf) != 2: print(\"Bad timeframe: \"+timeframe) exit() if", "def __init__(self, client_id, client_secret, timeout=60): self.ts = time.time() - timeout * 2 self.timeout", "that will be used in workflow. # data_block_name will be used as source", "for block in block_listing: if block[\"name\"] == processing_block_name: ret.append(block) break return ret #", "'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API INTEGRATION # # Simple bearer auth implementation", "can't get this time period. Output a - if not is_success: if DEBUG_LOGGING:", "return requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def _delete_query(self, url): return requests.delete(self.base_url + url,", "in block_listing: if block[\"name\"] == data_block_name: ret.append(block) break for block in block_listing: if", "not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" if DEBUG_LOGGING: test_job_output =", "poly: pt = point.split(\",\") if len(pt) != 2: print(\"Bad coordinate pair: \" +", "def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret = [] for block in block_listing: if", "Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project after script is done\",", "folder = \"raw_job_\" + str(current_timestamp) # Constant block names for automatic search. data_block_name", "0 for polygon in POLYGONS: polygon_num += 1 time_limit_num = 0 for time_limit", "if block[\"name\"] == processing_block_name: ret.append(block) break return ret # Create workflow and initialize", "+ (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params) # await", "processing_block_name: ret.append(block) break return ret # Create workflow and initialize the tasks based", "in range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params) # await job completion for up", "converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text,", "all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp) # Constant block", "_validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True except ValueError: return False # Process input", "return api_client.run_job(workflow_id, job_name, is_dry, job_params) # await job completion for up to ~tries", "job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/outputs/data-json\").json() def run_job(self,", "\"type\": \"Polygon\", \"coordinates\": [ polygon ] }, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100,", "# Specific method which will extract target blocks that will be used in", "for time_limit in TIME_LIMITS: time_limit_num += 1 print( \"Polygon \" + str(polygon_num) +", "based on target parameters. def initialize_workflow(): targets = extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp", "will be used for vehicle detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret =", "Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\") job = run_job(workflow_id,", "workflow in UP42 project after script is done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING", "tf = timeframe.split(\":\") if len(tf) != 2: print(\"Bad timeframe: \"+timeframe) exit() if not", "description=\"Estimate number of cars at given rectangles (latitude-longitude) on given timeframes\" ) parser.add_argument(\"-p\",", "= args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME =", "if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" +", "+ test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\")", "self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token'] def __call__(self, r): if time.time() - self.ts", "data later in UI or API. if CLEANUP_WORKFLOW: if DEBUG_LOGGING: print(\"[+] Cleaning workflow", "= extract_target_blocks() workflow = api_client.create_workflow(randomized_name, 'Temp workflow for covid19 script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return", "import os import argparse import datetime # Console arguments parser = argparse.ArgumentParser( description=\"Estimate", "latitude-longitude pairs, each representing 2 corners of a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\",", "extracted_status = job_status['data']['status'] if extracted_status == 'FAILED': return False if extracted_status == 'SUCCEEDED':", "polygon, time_period, True) is_success = await_job_completion(job) # We can't get this time period.", "\"wb\") as f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method which", "task in tasks: task_num += 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\"", "preexisting directory with this name. pass output_location = os.path.join(target_folder, target_name) content = requests.get(url).content", "will fill a chain of blocks, one by one, and create it. def", "data): return requests.post(self.base_url + url, auth=self.bearer_auth, json=data) def _put_query(self, url, data): return requests.put(self.base_url", "Key from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs,", "- self.ts > self.timeout: self.token = self._get_token() self.ts = time.time() r.headers[\"authorization\"] = \"Bearer", "\"Polygon \" + str(polygon_num) + \" interval \" + str(time_limit_num) + \": \"", "= client_id self.client_secret = client_secret def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret),", "while try_counter < tries: job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status ==", "workflow_id, polygon, time_limit)) # This may be useful if the user wants to", "+ str(polygon_num) + \" interval \" + str(time_limit_num) + \": \" + get_one_polygon(polygon_num,", "given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\",", "self.project_id + \"/jobs/\" + job_id + \"/tasks/\" + task_id + \"/downloads/results\").json() def dump_task_url(self,", "float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1],", "input format. TIME_LIMITS = [] for timeframe in args.timeframes: tf = timeframe.split(\":\") if", "DEBUG_LOGGING: print(\"[+] Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task", "try: os.mkdir(target_folder) except: # We already have a preexisting directory with this name.", "= os.path.join(target_folder, target_name) content = requests.get(url).content with open(output_location, \"wb\") as f: f.write(content) api_client", "action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be passed to UP42 console. Default: covid19_car_estimate_\",", "block[\"name\"] == data_block_name: ret.append(block) break for block in block_listing: if block[\"name\"] == processing_block_name:", "] }, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False } } job_name", "+ task_id + \"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder) except: #", "set_workflow_tasks(self, workflow_id, task_list): tasks = [] previous_name = None for task in task_list:", "be used for vehicle detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret = []", "return True try_counter += 1 time.sleep(5) return False # Process 1 polygon/time period", "tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task in tasks: task_num += 1", "job_params = { processing_block_name: {}, data_block_name: { \"ids\": None, \"time\": time_period, \"limit\": 1,", "to help determine which one is it in UI later. randomized_name = BASE_WORKFLOW_NAME", "[] for timeframe in args.timeframes: tf = timeframe.split(\":\") if len(tf) != 2: print(\"Bad", "\"Polygon\", \"coordinates\": [ polygon ] }, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\":", "Specific method which will extract target blocks that will be used in workflow.", "None, \"max_cloud_cover\": 100, \"panchromatic_band\": False } } job_name = randomized_name + \"_job_\" +", "UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing", "be useful if the user wants to manually download or view detection data", "Create workflow and initialize the tasks based on target parameters. def initialize_workflow(): targets", "initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow: \" + workflow_id) polygon_num = 0 for", "open(output_location, \"wb\") as f: f.write(content) api_client = ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method", "if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name", "extract target blocks that will be used in workflow. # data_block_name will be", "will be used as source satellite data # processing_block_name will be used for", "extracted_status == 'FAILED': return False if extracted_status == 'SUCCEEDED': return True try_counter +=", "return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/jobs?name=\" + name, data).json()", "return workflow['data']['id'] # Run a job with randomized name and templated parameters def", "polygon, time_period, is_dry): job_params = { processing_block_name: {}, data_block_name: { \"ids\": None, \"time\":", "try_counter += 1 time.sleep(5) return False # Process 1 polygon/time period pair def", "is it in UI later. randomized_name = BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in", "in POLYGONS: polygon_num += 1 time_limit_num = 0 for time_limit in TIME_LIMITS: time_limit_num", "def get_blocks(self): return self._get_query(\"/blocks\").json() def create_workflow(self, name, description): return self._post_query(\"/projects/\" + self.project_id +", "required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42 Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\",", "i in range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params) # await job completion for", "auth implementation to reduce amount of external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id,", "self.project_id + \"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\"", "+ name, data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" +", "float(pt[1]) converted_poly.append(pt) prepared_poly = [ [converted_poly[0][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[0][0]], [converted_poly[1][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]],", "name prefix to be passed to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep", "False } } job_name = randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in", "parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key", "= randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return api_client.run_job(workflow_id, job_name,", "get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id).json() def get_job_output(self, job_id):", "self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\" +", "\"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False } } job_name = randomized_name", "prefix to be passed to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow", "2: print(\"Bad coordinate pair: \" + coordinate_pair) exit() pt[0] = float(pt[0]) pt[1] =", "len(tf) != 2: print(\"Bad timeframe: \"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad date: \"+tf[0])", "converted_poly = [] for point in poly: pt = point.split(\",\") if len(pt) !=", "+ self.project_id + \"/jobs/\" + job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry,", "self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks\").json() def get_task_signed_url(self, job_id, task_id):", "UP42 project after script is done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING = args.verbose", "will be used in workflow. # data_block_name will be used as source satellite", "return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\"", "print(\"[+] Created workflow: \" + workflow_id) polygon_num = 0 for polygon in POLYGONS:", "await job completion for up to ~tries * 5 seconds. Defaults to ~25", "time_period, True) is_success = await_job_completion(job) # We can't get this time period. Output", "is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS:", "0 for time_limit in TIME_LIMITS: time_limit_num += 1 print( \"Polygon \" + str(polygon_num)", "console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project after script is", "UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be passed to UP42 console. Default:", "return requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self, url, data): return requests.post(self.base_url + url,", "source satellite data # processing_block_name will be used for vehicle detection def extract_target_blocks():", "of a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of", "= api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data']", "}) previous_name = task[\"name\"] return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id +", "\"max_cloud_cover\": 100, \"panchromatic_band\": False } } job_name = randomized_name + \"_job_\" + (''.join(random.choice(\"0123456789abcdef\")", "for all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\" + str(current_timestamp) # Constant", "name, is_dry, job_parameters): data = job_parameters.copy() if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return", "\" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual", "__init__(self, client_id, client_secret, timeout=60): self.ts = time.time() - timeout * 2 self.timeout =", "def _get_token(self): return requests.post(BASE_URL + \"/oauth/token\", auth=(self.client_id, self.client_secret), headers={'Content-Type': 'application/x-www-form-urlencoded'}, data={'grant_type': 'client_credentials'} ).json()['access_token']", "2: print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly = [] for point in poly:", "time.time() - self.ts > self.timeout: self.token = self._get_token() self.ts = time.time() r.headers[\"authorization\"] =", "!= 2: print(\"Bad coordinate pair: \" + coordinate_pair) exit() pt[0] = float(pt[0]) pt[1]", "= api_key self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self, url): return requests.get(self.base_url + url,", "True) is_success = await_job_completion(job) # We can't get this time period. Output a", "name, data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id", "import calendar import os import argparse import datetime # Console arguments parser =", "script is done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id", "_get_query(self, url): return requests.get(self.base_url + url, auth=self.bearer_auth) def _post_query(self, url, data): return requests.post(self.base_url", "task_list): tasks = [] previous_name = None for task in task_list: tasks.append({ \"name\":", "print(\"[+] Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num = 0 for task in", "# This will fill a chain of blocks, one by one, and create", "run_job(workflow_id, polygon, time_period, True) is_success = await_job_completion(job) # We can't get this time", "= job_status['data']['status'] if extracted_status == 'FAILED': return False if extracted_status == 'SUCCEEDED': return", "+ \"_timestamp_\" + str(time_num) + \"_task_\" + str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"]))", "def get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id).json() def get_job_output(self,", "data_block_name: { \"ids\": None, \"time\": time_period, \"limit\": 1, \"intersects\": { \"type\": \"Polygon\", \"coordinates\":", "__name__ == '__main__': if DEBUG_LOGGING: print(\"[+] Creating workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING:", "print(\"[+] Awaiting job completion\") while try_counter < tries: job_status = api_client.get_job(job['data']['id']) extracted_status =", "try_counter < tries: job_status = api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status == 'FAILED':", "on given timeframes\" ) parser.add_argument(\"-p\", \"--project_id\", help=\"Project ID from UP42 Console\", type=str, required=True)", "return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/tasks\", data=tasks).json() def delete_workflow(self,", "= BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) # Storage folder name for", "def dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder) except: # We already have a", "url, data): return requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def _delete_query(self, url): return requests.delete(self.base_url", "failed\") return \"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" +", "be passed to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42", "action='store_false') args = parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id API_KEY = args.api_key", "1 print( \"Polygon \" + str(polygon_num) + \" interval \" + str(time_limit_num) +", "# Process input timeframes - validate and convert into UP42 input format. TIME_LIMITS", "] POLYGONS.append(prepared_poly) # Date validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d') return True", "# Storage folder name for all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\"", "workflow...\") workflow_id = initialize_workflow() if DEBUG_LOGGING: print(\"[+] Created workflow: \" + workflow_id) polygon_num", "\"+coordinate_pair) exit() converted_poly = [] for point in poly: pt = point.split(\",\") if", "+ workflow_id) def get_job(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id).json()", "task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\" + str(time_num) + \"_task_\" +", "BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60): self.ts = time.time() - timeout * 2", "if extracted_status == 'SUCCEEDED': return True try_counter += 1 time.sleep(5) return False #", "= 0 for task in tasks: task_num += 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url']", "args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix", "job_id + \"/tasks/\" + task_id + \"/downloads/results\").json() def dump_task_url(self, url, target_folder, target_name): try:", "args.coordinates: poly = coordinate_pair.split(\":\") if len(poly) != 2: print(\"Bad coordinate pair: \"+coordinate_pair) exit()", "point.split(\",\") if len(pt) != 2: print(\"Bad coordinate pair: \" + coordinate_pair) exit() pt[0]", "[converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation helper. def _validate_date(date_text): try:", "BASE_WORKFLOW_NAME + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) # Storage folder name for all", "data = job_parameters.copy() if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id", "test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated credits: \"", "polygon - validate and convert into UP42 input format. POLYGONS = [] for", "raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be passed to", "workflow['data']['id'] # Run a job with randomized name and templated parameters def run_job(workflow_id,", "parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be passed to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\")", "and templated parameters def run_job(workflow_id, polygon, time_period, is_dry): job_params = { processing_block_name: {},", "to manually download or view detection data later in UI or API. if", "Console\", type=str, required=True) parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2", "action='store_true') parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name", "one, and create it. def set_workflow_tasks(self, workflow_id, task_list): tasks = [] previous_name =", "1 time_limit_num = 0 for time_limit in TIME_LIMITS: time_limit_num += 1 print( \"Polygon", "[converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation helper. def _validate_date(date_text): try: datetime.datetime.strptime(date_text, '%Y-%m-%d')", "block_listing: if block[\"name\"] == processing_block_name: ret.append(block) break return ret # Create workflow and", "parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS = args.no_store", "Storage folder name for all tarballs. current_timestamp = calendar.timegm(time.gmtime()) folder = \"raw_job_\" +", "timeframe.split(\":\") if len(tf) != 2: print(\"Bad timeframe: \"+timeframe) exit() if not _validate_date(tf[0]): print(\"Bad", "block_listing = api_client.get_blocks()[\"data\"] ret = [] for block in block_listing: if block[\"name\"] ==", "script') api_client.set_workflow_tasks(workflow['data']['id'], targets) return workflow['data']['id'] # Run a job with randomized name and", "+ \"/jobs/\" + job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry, job_parameters): data", "exit() if not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random", "period. Output a - if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return", "= project_id self.api_key = api_key self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self, url): return", "tasks = [] previous_name = None for task in task_list: tasks.append({ \"name\": task[\"name\"],", "is_dry, job_parameters): data = job_parameters.copy() if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\"", "job_parameters): data = job_parameters.copy() if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" +", "credits: \" + str(test_job_output[\"features\"][0][\"estimatedCredits\"])) print(\"[+] Now running actual job\") job = run_job(workflow_id, polygon,", "\"polygon_\" + str(polygon_num) + \"_timestamp_\" + str(time_num) + \"_task_\" + str( task_num) +", "parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners of a", "import time import calendar import os import argparse import datetime # Console arguments", "False if extracted_status == 'SUCCEEDED': return True try_counter += 1 time.sleep(5) return False", "time_num, workflow_id, polygon, time_period): if DEBUG_LOGGING: print(\"[+] Running test query first\") job =", "= [] for point in poly: pt = point.split(\",\") if len(pt) != 2:", "if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job results\") tasks = api_client.get_job_tasks(job['data']['id'])['data'] task_num =", "API INTEGRATION # # Simple bearer auth implementation to reduce amount of external", "= time.time() - timeout * 2 self.timeout = timeout self.client_id = client_id self.client_secret", "from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be passed to UP42 console.", "satellite data # processing_block_name will be used for vehicle detection def extract_target_blocks(): block_listing", "workflow. # data_block_name will be used as source satellite data # processing_block_name will", "completion for up to ~tries * 5 seconds. Defaults to ~25 minutes. def", "= base_url self.project_id = project_id self.api_key = api_key self.bearer_auth = BearerAuth(project_id, api_key) def", "+ \"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry, job_parameters): data = job_parameters.copy() if is_dry:", "self._post_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id + \"/jobs?name=\" + name, data).json() def", "block_listing: if block[\"name\"] == data_block_name: ret.append(block) break for block in block_listing: if block[\"name\"]", "# API Client abstraction class ApiClient(object): def __init__(self, base_url, project_id, api_key): self.base_url =", "+ coordinate_pair) exit() pt[0] = float(pt[0]) pt[1] = float(pt[1]) converted_poly.append(pt) prepared_poly = [", "print(\"Bad coordinate pair: \"+coordinate_pair) exit() converted_poly = [] for point in poly: pt", "Job failed\") return \"-\" if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \"", "dump_task_url(self, url, target_folder, target_name): try: os.mkdir(target_folder) except: # We already have a preexisting", "self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id, name,", "+ \"/jobs/\" + job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\"", "converted_poly[1][0]], [converted_poly[0][1], converted_poly[1][0]], [converted_poly[0][1], converted_poly[0][0]] ] POLYGONS.append(prepared_poly) # Date validation helper. def _validate_date(date_text):", "project_id, api_key): self.base_url = base_url self.project_id = project_id self.api_key = api_key self.bearer_auth =", "time.time() - timeout * 2 self.timeout = timeout self.client_id = client_id self.client_secret =", "i in range(16))) # Storage folder name for all tarballs. current_timestamp = calendar.timegm(time.gmtime())", "+ workflow_id + \"/jobs?name=\" + name, data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" +", "= { processing_block_name: {}, data_block_name: { \"ids\": None, \"time\": time_period, \"limit\": 1, \"intersects\":", "polygon, time_limit)) # This may be useful if the user wants to manually", "if DEBUG_LOGGING: test_job_output = api_client.get_job_output(job['data']['id']) print(\"[+] Acquisition date: \" + test_job_output[\"features\"][0][\"properties\"][\"acquisitionDate\"]) print(\"[+] Estimated", "not _validate_date(tf[1]): print(\"Bad date: \"+tf[1]) exit() TIME_LIMITS.append( tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for", "= 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API INTEGRATION # # Simple bearer auth", "base_url self.project_id = project_id self.api_key = api_key self.bearer_auth = BearerAuth(project_id, api_key) def _get_query(self,", "def get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/outputs/data-json\").json()", "parser.add_argument(\"--no_store\", help=\"Disables saving of raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix", "help=\"List of latitude-longitude pairs, each representing 2 corners of a square. Sample: \"", "into UP42 input format. TIME_LIMITS = [] for timeframe in args.timeframes: tf =", "workflow: \" + workflow_id) polygon_num = 0 for polygon in POLYGONS: polygon_num +=", "already have a preexisting directory with this name. pass output_location = os.path.join(target_folder, target_name)", "time_limit_num = 0 for time_limit in TIME_LIMITS: time_limit_num += 1 print( \"Polygon \"", "+ \"/workflows/\" + workflow_id + \"/jobs?name=\" + name, data).json() def get_job_tasks(self, job_id): return", "# Process input polygon - validate and convert into UP42 input format. POLYGONS", "await_job_completion(job) if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" actual_output =", "of raw archives from UP42\", action='store_false') parser.add_argument(\"--workflow_name_prefix\", help=\"Workflow name prefix to be passed", "tf[0]+\"T00:00:00+00:00/\"+tf[1]+\"T23:59:59+00:00\" ) # Random name for workflow to help determine which one is", "+ job_id + \"/outputs/data-json\").json() def run_job(self, workflow_id, name, is_dry, job_parameters): data = job_parameters.copy()", "to ~tries * 5 seconds. Defaults to ~25 minutes. def await_job_completion(job, tries=300): try_counter", "target blocks that will be used in workflow. # data_block_name will be used", "ret.append(block) break return ret # Create workflow and initialize the tasks based on", "data_block_name will be used as source satellite data # processing_block_name will be used", "if time.time() - self.ts > self.timeout: self.token = self._get_token() self.ts = time.time() r.headers[\"authorization\"]", "\" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing", "# We already have a preexisting directory with this name. pass output_location =", "# Simple bearer auth implementation to reduce amount of external dependencies. class BearerAuth(requests.auth.AuthBase):", "= 'sm_veh-detection' # API INTEGRATION # # Simple bearer auth implementation to reduce", "for i in range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params) # await job completion", "+ \"_job_\" + (''.join(random.choice(\"0123456789abcdef\") for i in range(16))) return api_client.run_job(workflow_id, job_name, is_dry, job_params)", "'sm_veh-detection' # API INTEGRATION # # Simple bearer auth implementation to reduce amount", "parser.add_argument(\"-c\", \"--coordinates\", nargs=\"+\", help=\"List of latitude-longitude pairs, each representing 2 corners of a", "a - if not is_success: if DEBUG_LOGGING: print(\"[-] Job failed\") return \"-\" if", "be used as source satellite data # processing_block_name will be used for vehicle", "workflow_id + \"/jobs?name=\" + name, data).json() def get_job_tasks(self, job_id): return self._get_query(\"/projects/\" + self.project_id", "1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder, \"polygon_\" + str(polygon_num) + \"_timestamp_\" +", "task_id): return self._get_query( \"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks/\" +", "== processing_block_name: ret.append(block) break return ret # Create workflow and initialize the tasks", "project after script is done\", action='store_false') args = parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID", "\"/projects/\" + self.project_id + \"/jobs/\" + job_id + \"/tasks/\" + task_id + \"/downloads/results\").json()", "UP42 input format. POLYGONS = [] for coordinate_pair in args.coordinates: poly = coordinate_pair.split(\":\")", "workflow to help determine which one is it in UI later. randomized_name =", "~25 minutes. def await_job_completion(job, tries=300): try_counter = 0 if DEBUG_LOGGING: print(\"[+] Awaiting job", "workflow_id): return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id) def get_job(self, job_id): return", "parser = argparse.ArgumentParser( description=\"Estimate number of cars at given rectangles (latitude-longitude) on given", "__call__(self, r): if time.time() - self.ts > self.timeout: self.token = self._get_token() self.ts =", "time.time() r.headers[\"authorization\"] = \"Bearer \" + self.token return r # API Client abstraction", "for i in range(16))) # Storage folder name for all tarballs. current_timestamp =", "to UP42 console. Default: covid19_car_estimate_\", default=\"covid19_car_estimate_\") parser.add_argument(\"--no_cleanup\", help=\"Keep workflow in UP42 project after", "+ self.project_id + \"/jobs/\" + job_id).json() def get_job_output(self, job_id): return self._get_query(\"/projects/\" + self.project_id", "+ workflow_id) polygon_num = 0 for polygon in POLYGONS: polygon_num += 1 time_limit_num", "def _put_query(self, url, data): return requests.put(self.base_url + url, auth=self.bearer_auth, json=data) def _delete_query(self, url):", "external dependencies. class BearerAuth(requests.auth.AuthBase): def __init__(self, client_id, client_secret, timeout=60): self.ts = time.time() -", "job_parameters.copy() if is_dry: data['config'] = {\"mode\": \"DRY_RUN\"} return self._post_query(\"/projects/\" + self.project_id + \"/workflows/\"", "delete_workflow(self, workflow_id): return self._delete_query(\"/projects/\" + self.project_id + \"/workflows/\" + workflow_id) def get_job(self, job_id):", "vehicle detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret = [] for block in", "str( task_num) + \".tar.gz\") return str(len(actual_output[\"features\"][0][\"properties\"][\"det_details\"])) if __name__ == '__main__': if DEBUG_LOGGING: print(\"[+]", "args = parser.parse_args() DEBUG_LOGGING = args.verbose PROJECT_ID = args.project_id API_KEY = args.api_key SAVE_ALL_JOB_RESULTS", "search. data_block_name = 'oneatlas-pleiades-aoiclipped' processing_block_name = 'sm_veh-detection' # API INTEGRATION # # Simple", "}, \"zoom_level\": 18, \"time_series\": None, \"max_cloud_cover\": 100, \"panchromatic_band\": False } } job_name =", "str(polygon_num) + \" interval \" + str(time_limit_num) + \": \" + get_one_polygon(polygon_num, time_limit_num,", "args.no_store BASE_URL = \"https://api.up42.com\" BASE_WORKFLOW_NAME = args.workflow_name_prefix CLEANUP_WORKFLOW = args.no_cleanup # Process input", "for task in tasks: task_num += 1 url = api_client.get_task_signed_url(job['data']['id'], task['id'])['data']['url'] api_client.dump_task_url(url, folder,", "detection def extract_target_blocks(): block_listing = api_client.get_blocks()[\"data\"] ret = [] for block in block_listing:", "api_client.get_job(job['data']['id']) extracted_status = job_status['data']['status'] if extracted_status == 'FAILED': return False if extracted_status ==", "\"/jobs/\" + job_id + \"/tasks/\" + task_id + \"/downloads/results\").json() def dump_task_url(self, url, target_folder,", "= ApiClient(BASE_URL, PROJECT_ID, API_KEY) # Specific method which will extract target blocks that", "return \"-\" actual_output = api_client.get_job_output(job['data']['id']) if SAVE_ALL_JOB_RESULTS: if DEBUG_LOGGING: print(\"[+] Storing job results\")", "# Console arguments parser = argparse.ArgumentParser( description=\"Estimate number of cars at given rectangles", "blocks, one by one, and create it. def set_workflow_tasks(self, workflow_id, task_list): tasks =", "create it. def set_workflow_tasks(self, workflow_id, task_list): tasks = [] previous_name = None for", "def await_job_completion(job, tries=300): try_counter = 0 if DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while", "DEBUG_LOGGING: print(\"[+] Awaiting job completion\") while try_counter < tries: job_status = api_client.get_job(job['data']['id']) extracted_status", "is_success = await_job_completion(job) # We can't get this time period. Output a -", "corners of a square. Sample: \" + \"37.327035,-121.941054:37.323451,-121.940485\", required=True) parser.add_argument(\"-t\", \"--timeframes\", nargs=\"+\", help=\"List", "ID from UP42 Console\", type=str, required=True) parser.add_argument(\"-k\", \"--api_key\", help=\"API Key from UP42 Console\",", "# Create workflow and initialize the tasks based on target parameters. def initialize_workflow():", "return False if extracted_status == 'SUCCEEDED': return True try_counter += 1 time.sleep(5) return" ]
[ "#!/usr/bin/env python3 # -*- coding: utf-8 -*- import urllib.request import urllib.parse import sys", "#print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'),", "if line is not None and line.strip() != \"\": k, v = line.split(\":\")", "urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args =", "urllib.request import urllib.parse import sys from datetime import datetime url = 'http://zzzzzz/api/upload.php' def", "params = urllib.parse.urlencode(message) params = params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with", "} for line in sys.stdin: if line is not None and line.strip() !=", "for line in sys.stdin: if line is not None and line.strip() != \"\":", "in sys.stdin: if line is not None and line.strip() != \"\": k, v", "# -*- coding: utf-8 -*- import urllib.request import urllib.parse import sys from datetime", "utf-8 -*- import urllib.request import urllib.parse import sys from datetime import datetime url", "{\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in sys.stdin: if line is", "\"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in sys.stdin: if line is not", "sys from datetime import datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params =", "headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv msg", "urllib.parse.urlencode(message) params = params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as", "\"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in sys.stdin: if line is not None", "= urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args", "msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in sys.stdin: if", "params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode())", "coding: utf-8 -*- import urllib.request import urllib.parse import sys from datetime import datetime", "-*- import urllib.request import urllib.parse import sys from datetime import datetime url =", "def sendmessage(message): print(message) params = urllib.parse.urlencode(message) params = params.encode(\"ascii\") req = urllib.request.Request(url, data=params,", "#print(response.getcode()) pass args = sys.argv msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), }", "= 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params = urllib.parse.urlencode(message) params = params.encode(\"ascii\") req =", "sys.stdin: if line is not None and line.strip() != \"\": k, v =", "python3 # -*- coding: utf-8 -*- import urllib.request import urllib.parse import sys from", "req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass", "as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv msg = {\"act\": \"serverwarning\", \"time\":", "import urllib.request import urllib.parse import sys from datetime import datetime url = 'http://zzzzzz/api/upload.php'", "= sys.argv msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in", "url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params = urllib.parse.urlencode(message) params = params.encode(\"ascii\") req", "with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv msg = {\"act\":", "sendmessage(message): print(message) params = urllib.parse.urlencode(message) params = params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type':", "line is not None and line.strip() != \"\": k, v = line.split(\":\") msg[k]", "= params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\"))", "%H:%M:%S'), } for line in sys.stdin: if line is not None and line.strip()", "from datetime import datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params = urllib.parse.urlencode(message)", "= {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in sys.stdin: if line", "params = params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response:", "is not None and line.strip() != \"\": k, v = line.split(\":\") msg[k] =", "response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d", "= urllib.parse.urlencode(message) params = params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req)", "sys.argv msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in sys.stdin:", "None and line.strip() != \"\": k, v = line.split(\":\") msg[k] = v.strip() sendmessage(msg)", "not None and line.strip() != \"\": k, v = line.split(\":\") msg[k] = v.strip()", "urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv msg = {\"act\": \"serverwarning\",", "line in sys.stdin: if line is not None and line.strip() != \"\": k,", "pass args = sys.argv msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for", "urllib.parse import sys from datetime import datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message)", "data=params, headers={'content-type': 'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv", "print(message) params = urllib.parse.urlencode(message) params = params.encode(\"ascii\") req = urllib.request.Request(url, data=params, headers={'content-type': 'application/x-www-form-urlencoded'})", "args = sys.argv msg = {\"act\": \"serverwarning\", \"time\": datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line", "datetime import datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params = urllib.parse.urlencode(message) params", "'application/x-www-form-urlencoded'}) with urllib.request.urlopen(req) as response: #print(response.read().decode(\"unicode_escape\")) #print(response.getcode()) pass args = sys.argv msg =", "datetime.now().strftime('%Y-%m-%d %H:%M:%S'), } for line in sys.stdin: if line is not None and", "import datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params = urllib.parse.urlencode(message) params =", "datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params = urllib.parse.urlencode(message) params = params.encode(\"ascii\")", "-*- coding: utf-8 -*- import urllib.request import urllib.parse import sys from datetime import", "'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params = urllib.parse.urlencode(message) params = params.encode(\"ascii\") req = urllib.request.Request(url,", "import sys from datetime import datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message): print(message) params", "import urllib.parse import sys from datetime import datetime url = 'http://zzzzzz/api/upload.php' def sendmessage(message):" ]
[ "for j in split: word += j value = int(word.strip()) data[d] = value", "data[d] = value month[d-1] += value list = np.array(data[1:]) avg = np.mean(list) sum", "output.write('\\n') continue data = data.split('\\t') for d in range(1,len(data)): split = re.findall('[^,]',data[d]) word", "open('./input.txt','r') as file: data = file.readline() for data in file: output.write('{}'.format(data.strip())) if data.startswith('#'):", "month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with open('./input.txt','r') as file: data =", "[0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with open('./input.txt','r') as file: data = file.readline() for", "= np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i", "'' for j in split: word += j value = int(word.strip()) data[d] =", "np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ')", "as output: with open('./input.txt','r') as file: data = file.readline() for data in file:", "<reponame>gilwoong-kang/education.cloudsecurity<gh_stars>0 import numpy as np import re result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0]", "with open('./output.txt','w') as output: with open('./input.txt','r') as file: data = file.readline() for data", "value list = np.array(data[1:]) avg = np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) #", "for data in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data = data.split('\\t') for", "month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in range(len(month)): output.write('{}\\t'.format(month[i])) output.write('\\navg\\t ') for i", "month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in range(len(month)): output.write('{}\\t'.format(month[i])) output.write('\\navg\\t ') for i in", "data = file.readline() for data in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data", "output.write('sum\\t\\t ') for i in range(len(month)): output.write('{}\\t'.format(month[i])) output.write('\\navg\\t ') for i in range(len(month)-1):", "if data.startswith('#'): output.write('\\n') continue data = data.split('\\t') for d in range(1,len(data)): split =", "avg = np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2))", "data.split('\\t') for d in range(1,len(data)): split = re.findall('[^,]',data[d]) word = '' for j", "output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in range(len(month)):", "word = '' for j in split: word += j value = int(word.strip())", "month[d-1] += value list = np.array(data[1:]) avg = np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2)))", "np import re result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output:", "= np.array(data[1:]) avg = np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값", "continue data = data.split('\\t') for d in range(1,len(data)): split = re.findall('[^,]',data[d]) word =", "+= value list = np.array(data[1:]) avg = np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum])", "= file.readline() for data in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data =", "j value = int(word.strip()) data[d] = value month[d-1] += value list = np.array(data[1:])", "월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in range(len(month)): output.write('{}\\t'.format(month[i])) output.write('\\navg\\t ')", "split = re.findall('[^,]',data[d]) word = '' for j in split: word += j", "= value month[d-1] += value list = np.array(data[1:]) avg = np.mean(list) sum =", "np.array(data[1:]) avg = np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month))", "= int(word.strip()) data[d] = value month[d-1] += value list = np.array(data[1:]) avg =", "result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in range(len(month)): output.write('{}\\t'.format(month[i]))", "value = int(word.strip()) data[d] = value month[d-1] += value list = np.array(data[1:]) avg", "split: word += j value = int(word.strip()) data[d] = value month[d-1] += value", "file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data = data.split('\\t') for d in range(1,len(data)):", "np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in", "# 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in range(len(month)): output.write('{}\\t'.format(month[i])) output.write('\\navg\\t", "data in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data = data.split('\\t') for d", "= np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t", "= [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with open('./input.txt','r') as file: data = file.readline()", "= data.split('\\t') for d in range(1,len(data)): split = re.findall('[^,]',data[d]) word = '' for", "import numpy as np import re result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with", "re result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with open('./input.txt','r')", "with open('./input.txt','r') as file: data = file.readline() for data in file: output.write('{}'.format(data.strip())) if", "= [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with open('./input.txt','r') as file:", "output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data = data.split('\\t') for d in range(1,len(data)): split", "as np import re result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as", "as file: data = file.readline() for data in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n')", "sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별 총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for", "value month[d-1] += value list = np.array(data[1:]) avg = np.mean(list) sum = np.sum(list)", "in range(1,len(data)): split = re.findall('[^,]',data[d]) word = '' for j in split: word", "in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data = data.split('\\t') for d in", "result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with open('./input.txt','r') as", "open('./output.txt','w') as output: with open('./input.txt','r') as file: data = file.readline() for data in", "re.findall('[^,]',data[d]) word = '' for j in split: word += j value =", "= '' for j in split: word += j value = int(word.strip()) data[d]", "file.readline() for data in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue data = data.split('\\t')", "j in split: word += j value = int(word.strip()) data[d] = value month[d-1]", "data = data.split('\\t') for d in range(1,len(data)): split = re.findall('[^,]',data[d]) word = ''", "numpy as np import re result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w')", "[] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with open('./input.txt','r') as file: data", "') for i in range(len(month)): output.write('{}\\t'.format(month[i])) output.write('\\navg\\t ') for i in range(len(month)-1): output.write('{}\\t'.format(round(month[i]/11,2)))", "+= j value = int(word.strip()) data[d] = value month[d-1] += value list =", "file: data = file.readline() for data in file: output.write('{}'.format(data.strip())) if data.startswith('#'): output.write('\\n') continue", "word += j value = int(word.strip()) data[d] = value month[d-1] += value list", "int(word.strip()) data[d] = value month[d-1] += value list = np.array(data[1:]) avg = np.mean(list)", "d in range(1,len(data)): split = re.findall('[^,]',data[d]) word = '' for j in split:", "range(1,len(data)): split = re.findall('[^,]',data[d]) word = '' for j in split: word +=", "data.startswith('#'): output.write('\\n') continue data = data.split('\\t') for d in range(1,len(data)): split = re.findall('[^,]',data[d])", "for d in range(1,len(data)): split = re.findall('[^,]',data[d]) word = '' for j in", "총합값 month.append(np.sum(month)) month.append(round(month[12]/12,2)) output.write('sum\\t\\t ') for i in range(len(month)): output.write('{}\\t'.format(month[i])) output.write('\\navg\\t ') for", "= re.findall('[^,]',data[d]) word = '' for j in split: word += j value", "list = np.array(data[1:]) avg = np.mean(list) sum = np.sum(list) output.write('\\t{}\\t{}\\n'.format(sum,round(avg,2))) result.append([avg,sum]) # 월별", "in split: word += j value = int(word.strip()) data[d] = value month[d-1] +=", "import re result = [] month = [0,0,0,0,0,0,0,0,0,0,0,0] with open('./output.txt','w') as output: with", "output: with open('./input.txt','r') as file: data = file.readline() for data in file: output.write('{}'.format(data.strip()))" ]
[ "unicode_literals def create_match(parent, children=None): if children is None: children = tuple() return (parent,", "is new, bigger than an existing match, and not encapsulated by an existing", "True ((start, end, kind, priority), _) = match for temp_match in matches: if", "== temp_end: if priority <= temp_priority: # More precise classification exists. valid =", "tuple(children)) def is_valid_match(match, matches, filtered): '''Returns true if the match is new, bigger", "def is_valid_match(match, matches, filtered): '''Returns true if the match is new, bigger than", "in an existing match. valid = False break return valid def find_parent_reference(current_kind, references,", "matches: if temp_match in filtered or match == temp_match: continue ((temp_start, temp_end, temp_kind,", "precise classification exists. valid = False break elif start >= temp_start and end", "for temp_match in matches: if temp_match in filtered or match == temp_match: continue", "<= temp_priority: # More precise classification exists. valid = False break elif start", "existing match. valid = False break return valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind", "Encapsulated in an existing match. valid = False break return valid def find_parent_reference(current_kind,", "= match for temp_match in matches: if temp_match in filtered or match ==", "valid = False break return valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind]", "not encapsulated by an existing match.''' valid = True ((start, end, kind, priority),", "tuple() return (parent, tuple(children)) def is_valid_match(match, matches, filtered): '''Returns true if the match", "in matches: if temp_match in filtered or match == temp_match: continue ((temp_start, temp_end,", "False break return valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference", "((start, end, kind, priority), _) = match for temp_match in matches: if temp_match", "None: children = tuple() return (parent, tuple(children)) def is_valid_match(match, matches, filtered): '''Returns true", "match is new, bigger than an existing match, and not encapsulated by an", "break elif start >= temp_start and end <= temp_end: # Encapsulated in an", "<= temp_end: # Encapsulated in an existing match. valid = False break return", "= tuple() return (parent, tuple(children)) def is_valid_match(match, matches, filtered): '''Returns true if the", "valid = False break elif start >= temp_start and end <= temp_end: #", "break return valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference in", "end == temp_end: if priority <= temp_priority: # More precise classification exists. valid", "temp_priority), _) = temp_match if start == temp_start and end == temp_end: if", "temp_end: if priority <= temp_priority: # More precise classification exists. valid = False", "kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference in reversed(references): if reference.kind_hint.kind == parent_kind: return", "temp_start and end == temp_end: if priority <= temp_priority: # More precise classification", "than an existing match, and not encapsulated by an existing match.''' valid =", "== temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match if start ==", "continue ((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match if start == temp_start and", "if start == temp_start and end == temp_end: if priority <= temp_priority: #", "references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference in reversed(references): if reference.kind_hint.kind == parent_kind:", "bigger than an existing match, and not encapsulated by an existing match.''' valid", "== temp_start and end == temp_end: if priority <= temp_priority: # More precise", "the match is new, bigger than an existing match, and not encapsulated by", ">= temp_start and end <= temp_end: # Encapsulated in an existing match. valid", "temp_start and end <= temp_end: # Encapsulated in an existing match. valid =", "temp_match if start == temp_start and end == temp_end: if priority <= temp_priority:", "end <= temp_end: # Encapsulated in an existing match. valid = False break", "by an existing match.''' valid = True ((start, end, kind, priority), _) =", "parent_kind = kinds_hierarchy[current_kind] for reference in reversed(references): if reference.kind_hint.kind == parent_kind: return reference", "temp_match in filtered or match == temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority), _)", "is_valid_match(match, matches, filtered): '''Returns true if the match is new, bigger than an", "start == temp_start and end == temp_end: if priority <= temp_priority: # More", "# More precise classification exists. valid = False break elif start >= temp_start", "if children is None: children = tuple() return (parent, tuple(children)) def is_valid_match(match, matches,", "temp_kind, temp_priority), _) = temp_match if start == temp_start and end == temp_end:", "'''Returns true if the match is new, bigger than an existing match, and", "(parent, tuple(children)) def is_valid_match(match, matches, filtered): '''Returns true if the match is new,", "children=None): if children is None: children = tuple() return (parent, tuple(children)) def is_valid_match(match,", "or match == temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match if", "= temp_match if start == temp_start and end == temp_end: if priority <=", "match. valid = False break return valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind =", "encapsulated by an existing match.''' valid = True ((start, end, kind, priority), _)", "exists. valid = False break elif start >= temp_start and end <= temp_end:", "existing match.''' valid = True ((start, end, kind, priority), _) = match for", "children = tuple() return (parent, tuple(children)) def is_valid_match(match, matches, filtered): '''Returns true if", "temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match if start == temp_start", "valid = True ((start, end, kind, priority), _) = match for temp_match in", "_) = temp_match if start == temp_start and end == temp_end: if priority", "start >= temp_start and end <= temp_end: # Encapsulated in an existing match.", "match.''' valid = True ((start, end, kind, priority), _) = match for temp_match", "False break elif start >= temp_start and end <= temp_end: # Encapsulated in", "temp_priority: # More precise classification exists. valid = False break elif start >=", "return valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference in reversed(references):", "<filename>recodoc2/apps/codeutil/parser.py from __future__ import unicode_literals def create_match(parent, children=None): if children is None: children", "classification exists. valid = False break elif start >= temp_start and end <=", "from __future__ import unicode_literals def create_match(parent, children=None): if children is None: children =", "priority <= temp_priority: # More precise classification exists. valid = False break elif", "an existing match, and not encapsulated by an existing match.''' valid = True", "and end == temp_end: if priority <= temp_priority: # More precise classification exists.", "More precise classification exists. valid = False break elif start >= temp_start and", "= False break elif start >= temp_start and end <= temp_end: # Encapsulated", "an existing match.''' valid = True ((start, end, kind, priority), _) = match", "filtered or match == temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match", "elif start >= temp_start and end <= temp_end: # Encapsulated in an existing", "valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference in reversed(references): if", "if the match is new, bigger than an existing match, and not encapsulated", "matches, filtered): '''Returns true if the match is new, bigger than an existing", "return (parent, tuple(children)) def is_valid_match(match, matches, filtered): '''Returns true if the match is", "_) = match for temp_match in matches: if temp_match in filtered or match", "create_match(parent, children=None): if children is None: children = tuple() return (parent, tuple(children)) def", "find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference in reversed(references): if reference.kind_hint.kind ==", "# Encapsulated in an existing match. valid = False break return valid def", "__future__ import unicode_literals def create_match(parent, children=None): if children is None: children = tuple()", "end, kind, priority), _) = match for temp_match in matches: if temp_match in", "temp_match in matches: if temp_match in filtered or match == temp_match: continue ((temp_start,", "if temp_match in filtered or match == temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority),", "is None: children = tuple() return (parent, tuple(children)) def is_valid_match(match, matches, filtered): '''Returns", "match, and not encapsulated by an existing match.''' valid = True ((start, end,", "children is None: children = tuple() return (parent, tuple(children)) def is_valid_match(match, matches, filtered):", "existing match, and not encapsulated by an existing match.''' valid = True ((start,", "= True ((start, end, kind, priority), _) = match for temp_match in matches:", "temp_end, temp_kind, temp_priority), _) = temp_match if start == temp_start and end ==", "kind, priority), _) = match for temp_match in matches: if temp_match in filtered", "match for temp_match in matches: if temp_match in filtered or match == temp_match:", "new, bigger than an existing match, and not encapsulated by an existing match.'''", "match == temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match if start", "= False break return valid def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for", "import unicode_literals def create_match(parent, children=None): if children is None: children = tuple() return", "and not encapsulated by an existing match.''' valid = True ((start, end, kind,", "true if the match is new, bigger than an existing match, and not", "filtered): '''Returns true if the match is new, bigger than an existing match,", "temp_end: # Encapsulated in an existing match. valid = False break return valid", "def find_parent_reference(current_kind, references, kinds_hierarchy): parent_kind = kinds_hierarchy[current_kind] for reference in reversed(references): if reference.kind_hint.kind", "and end <= temp_end: # Encapsulated in an existing match. valid = False", "if priority <= temp_priority: # More precise classification exists. valid = False break", "priority), _) = match for temp_match in matches: if temp_match in filtered or", "an existing match. valid = False break return valid def find_parent_reference(current_kind, references, kinds_hierarchy):", "def create_match(parent, children=None): if children is None: children = tuple() return (parent, tuple(children))", "((temp_start, temp_end, temp_kind, temp_priority), _) = temp_match if start == temp_start and end", "in filtered or match == temp_match: continue ((temp_start, temp_end, temp_kind, temp_priority), _) =" ]
[ "AlertArray # noqa: E501 from swagger_server.models.updatealert import Updatealert # noqa: E501 from swagger_server", "\"\"\" return 'do some magic!' def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert list", "noqa: E501 :param body: :type body: list | bytes :rtype: None \"\"\" if", "the alerts list # noqa: E501 :param body: :type body: dict | bytes", "E501 \"\"\"delete alert takes the alert id as feed to remove the alert", "connexion.request.is_json: body = [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501 return 'do", "to remove the alert from the alert list # noqa: E501 :param alert_id:", "removed :type alert_id: str :rtype: None \"\"\" return 'do some magic!' def alert_get(alert_id=None):", "alerts in the alerts list # noqa: E501 :param body: :type body: dict", "alert_id: id of the alert need to be removed :type alert_id: str :rtype:", "swagger_server.models.alert import Alert # noqa: E501 from swagger_server.models.alert_array import AlertArray # noqa: E501", "alert list # noqa: E501 :param alert_id: id of the alert need to", ":param body: :type body: list | bytes :rtype: None \"\"\" if connexion.request.is_json: body", "some magic!' def alert_put(body): # noqa: E501 \"\"\"update the alerts updates the alerts", "swagger_server.models.updatealert import Updatealert # noqa: E501 from swagger_server import util def alert_delete(alert_id): #", "\"\"\"add alerts Adds the alerts into the list # noqa: E501 :param body:", "noqa: E501 :param alert_id: identifier for the alert :type alert_id: str :rtype: AlertArray", "| bytes :rtype: None \"\"\" if connexion.request.is_json: body = [Alert.from_dict(d) for d in", "body = [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501 return 'do some", ":rtype: AlertArray \"\"\" return 'do some magic!' def alert_post(body): # noqa: E501 \"\"\"add", "the list # noqa: E501 :param body: :type body: list | bytes :rtype:", "str :rtype: AlertArray \"\"\" return 'do some magic!' def alert_post(body): # noqa: E501", "| bytes :rtype: None \"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json()) # noqa: E501", "# noqa: E501 return 'do some magic!' def alert_put(body): # noqa: E501 \"\"\"update", "str :rtype: None \"\"\" return 'do some magic!' def alert_get(alert_id=None): # noqa: E501", "swagger_server.models.alert_array import AlertArray # noqa: E501 from swagger_server.models.updatealert import Updatealert # noqa: E501", "magic!' def alert_put(body): # noqa: E501 \"\"\"update the alerts updates the alerts in", "the alerts updates the alerts in the alerts list # noqa: E501 :param", "[Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501 return 'do some magic!' def", "need to be removed :type alert_id: str :rtype: None \"\"\" return 'do some", "noqa: E501 :param body: :type body: dict | bytes :rtype: None \"\"\" if", "E501 return 'do some magic!' def alert_put(body): # noqa: E501 \"\"\"update the alerts", "some magic!' def alert_post(body): # noqa: E501 \"\"\"add alerts Adds the alerts into", "alerts updates the alerts in the alerts list # noqa: E501 :param body:", "Alert # noqa: E501 from swagger_server.models.alert_array import AlertArray # noqa: E501 from swagger_server.models.updatealert", "alerts Adds the alerts into the list # noqa: E501 :param body: :type", ":rtype: None \"\"\" if connexion.request.is_json: body = [Alert.from_dict(d) for d in connexion.request.get_json()] #", "# noqa: E501 from swagger_server.models.updatealert import Updatealert # noqa: E501 from swagger_server import", "'do some magic!' def alert_post(body): # noqa: E501 \"\"\"add alerts Adds the alerts", "import Updatealert # noqa: E501 from swagger_server import util def alert_delete(alert_id): # noqa:", "swagger_server import util def alert_delete(alert_id): # noqa: E501 \"\"\"delete alert takes the alert", "list | bytes :rtype: None \"\"\" if connexion.request.is_json: body = [Alert.from_dict(d) for d", "as feed to remove the alert from the alert list # noqa: E501", ":param alert_id: id of the alert need to be removed :type alert_id: str", "\"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'", "alerts list # noqa: E501 :param body: :type body: dict | bytes :rtype:", "noqa: E501 from swagger_server.models.alert_array import AlertArray # noqa: E501 from swagger_server.models.updatealert import Updatealert", "takes the alert id as feed to remove the alert from the alert", "E501 from swagger_server import util def alert_delete(alert_id): # noqa: E501 \"\"\"delete alert takes", "from the alert list # noqa: E501 :param alert_id: id of the alert", "E501 :param body: :type body: list | bytes :rtype: None \"\"\" if connexion.request.is_json:", "list # noqa: E501 :param body: :type body: dict | bytes :rtype: None", "magic!' def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert list get method to obtain", "to be removed :type alert_id: str :rtype: None \"\"\" return 'do some magic!'", "'do some magic!' def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert list get method", "\"\"\" if connexion.request.is_json: body = [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501", "dict | bytes :rtype: None \"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json()) # noqa:", "E501 :param body: :type body: dict | bytes :rtype: None \"\"\" if connexion.request.is_json:", "id as feed to remove the alert from the alert list # noqa:", "def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert list get method to obtain all", "alert takes the alert id as feed to remove the alert from the", "body: list | bytes :rtype: None \"\"\" if connexion.request.is_json: body = [Alert.from_dict(d) for", "bytes :rtype: None \"\"\" if connexion.request.is_json: body = [Alert.from_dict(d) for d in connexion.request.get_json()]", "some magic!' def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert list get method to", "noqa: E501 from swagger_server import util def alert_delete(alert_id): # noqa: E501 \"\"\"delete alert", "for the alert :type alert_id: str :rtype: AlertArray \"\"\" return 'do some magic!'", ":type alert_id: str :rtype: None \"\"\" return 'do some magic!' def alert_get(alert_id=None): #", "alert :type alert_id: str :rtype: AlertArray \"\"\" return 'do some magic!' def alert_post(body):", "body: :type body: dict | bytes :rtype: None \"\"\" if connexion.request.is_json: body =", "the alert list # noqa: E501 :param alert_id: id of the alert need", "import util def alert_delete(alert_id): # noqa: E501 \"\"\"delete alert takes the alert id", "AlertArray \"\"\" return 'do some magic!' def alert_post(body): # noqa: E501 \"\"\"add alerts", "alert_put(body): # noqa: E501 \"\"\"update the alerts updates the alerts in the alerts", "the alert need to be removed :type alert_id: str :rtype: None \"\"\" return", "the alerts into the list # noqa: E501 :param body: :type body: list", "# noqa: E501 from swagger_server import util def alert_delete(alert_id): # noqa: E501 \"\"\"delete", "import AlertArray # noqa: E501 from swagger_server.models.updatealert import Updatealert # noqa: E501 from", "noqa: E501 return 'do some magic!' def alert_put(body): # noqa: E501 \"\"\"update the", "# noqa: E501 :param body: :type body: list | bytes :rtype: None \"\"\"", "util def alert_delete(alert_id): # noqa: E501 \"\"\"delete alert takes the alert id as", "all the alerts # noqa: E501 :param alert_id: identifier for the alert :type", "bytes :rtype: None \"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json()) # noqa: E501 return", "id of the alert need to be removed :type alert_id: str :rtype: None", "list # noqa: E501 :param body: :type body: list | bytes :rtype: None", ":type body: dict | bytes :rtype: None \"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json())", "# noqa: E501 \"\"\"delete alert takes the alert id as feed to remove", "updates the alerts in the alerts list # noqa: E501 :param body: :type", "None \"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some", "E501 :param alert_id: identifier for the alert :type alert_id: str :rtype: AlertArray \"\"\"", "noqa: E501 from swagger_server.models.updatealert import Updatealert # noqa: E501 from swagger_server import util", "E501 from swagger_server.models.updatealert import Updatealert # noqa: E501 from swagger_server import util def", "\"\"\"obtain alert list get method to obtain all the alerts # noqa: E501", "\"\"\" return 'do some magic!' def alert_post(body): # noqa: E501 \"\"\"add alerts Adds", "return 'do some magic!' def alert_put(body): # noqa: E501 \"\"\"update the alerts updates", "alerts into the list # noqa: E501 :param body: :type body: list |", "return 'do some magic!' def alert_post(body): # noqa: E501 \"\"\"add alerts Adds the", "Adds the alerts into the list # noqa: E501 :param body: :type body:", "# noqa: E501 from swagger_server.models.alert_array import AlertArray # noqa: E501 from swagger_server.models.updatealert import", "alerts # noqa: E501 :param alert_id: identifier for the alert :type alert_id: str", "obtain all the alerts # noqa: E501 :param alert_id: identifier for the alert", "be removed :type alert_id: str :rtype: None \"\"\" return 'do some magic!' def", "get method to obtain all the alerts # noqa: E501 :param alert_id: identifier", "into the list # noqa: E501 :param body: :type body: list | bytes", "in connexion.request.get_json()] # noqa: E501 return 'do some magic!' def alert_put(body): # noqa:", "\"\"\"delete alert takes the alert id as feed to remove the alert from", "E501 \"\"\"obtain alert list get method to obtain all the alerts # noqa:", "identifier for the alert :type alert_id: str :rtype: AlertArray \"\"\" return 'do some", "noqa: E501 \"\"\"add alerts Adds the alerts into the list # noqa: E501", "alert id as feed to remove the alert from the alert list #", "E501 :param alert_id: id of the alert need to be removed :type alert_id:", "d in connexion.request.get_json()] # noqa: E501 return 'do some magic!' def alert_put(body): #", "method to obtain all the alerts # noqa: E501 :param alert_id: identifier for", ":rtype: None \"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json()) # noqa: E501 return 'do", "import six from swagger_server.models.alert import Alert # noqa: E501 from swagger_server.models.alert_array import AlertArray", "list get method to obtain all the alerts # noqa: E501 :param alert_id:", "alert_id: str :rtype: None \"\"\" return 'do some magic!' def alert_get(alert_id=None): # noqa:", "the alerts # noqa: E501 :param alert_id: identifier for the alert :type alert_id:", "# noqa: E501 \"\"\"add alerts Adds the alerts into the list # noqa:", "= [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501 return 'do some magic!'", "# noqa: E501 \"\"\"update the alerts updates the alerts in the alerts list", "from swagger_server.models.alert import Alert # noqa: E501 from swagger_server.models.alert_array import AlertArray # noqa:", "if connexion.request.is_json: body = [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa: E501 return", "from swagger_server.models.alert_array import AlertArray # noqa: E501 from swagger_server.models.updatealert import Updatealert # noqa:", "'do some magic!' def alert_put(body): # noqa: E501 \"\"\"update the alerts updates the", "alert_delete(alert_id): # noqa: E501 \"\"\"delete alert takes the alert id as feed to", "<reponame>Surya2709/FlaskSwaggerDemo<filename>swagger_server/controllers/default_controller.py import connexion import six from swagger_server.models.alert import Alert # noqa: E501 from", "# noqa: E501 :param alert_id: identifier for the alert :type alert_id: str :rtype:", "the alerts in the alerts list # noqa: E501 :param body: :type body:", "list # noqa: E501 :param alert_id: id of the alert need to be", "noqa: E501 \"\"\"delete alert takes the alert id as feed to remove the", "alert list get method to obtain all the alerts # noqa: E501 :param", "to obtain all the alerts # noqa: E501 :param alert_id: identifier for the", "body: dict | bytes :rtype: None \"\"\" if connexion.request.is_json: body = Updatealert.from_dict(connexion.request.get_json()) #", "def alert_delete(alert_id): # noqa: E501 \"\"\"delete alert takes the alert id as feed", "connexion import six from swagger_server.models.alert import Alert # noqa: E501 from swagger_server.models.alert_array import", "of the alert need to be removed :type alert_id: str :rtype: None \"\"\"", "for d in connexion.request.get_json()] # noqa: E501 return 'do some magic!' def alert_put(body):", "connexion.request.get_json()] # noqa: E501 return 'do some magic!' def alert_put(body): # noqa: E501", "magic!' def alert_post(body): # noqa: E501 \"\"\"add alerts Adds the alerts into the", "alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert list get method to obtain all the", "from swagger_server.models.updatealert import Updatealert # noqa: E501 from swagger_server import util def alert_delete(alert_id):", "feed to remove the alert from the alert list # noqa: E501 :param", "the alert id as feed to remove the alert from the alert list", "def alert_post(body): # noqa: E501 \"\"\"add alerts Adds the alerts into the list", "\"\"\"update the alerts updates the alerts in the alerts list # noqa: E501", "six from swagger_server.models.alert import Alert # noqa: E501 from swagger_server.models.alert_array import AlertArray #", "alert_post(body): # noqa: E501 \"\"\"add alerts Adds the alerts into the list #", "E501 from swagger_server.models.alert_array import AlertArray # noqa: E501 from swagger_server.models.updatealert import Updatealert #", "# noqa: E501 :param alert_id: id of the alert need to be removed", "return 'do some magic!' def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert list get", "noqa: E501 \"\"\"obtain alert list get method to obtain all the alerts #", ":type alert_id: str :rtype: AlertArray \"\"\" return 'do some magic!' def alert_post(body): #", "# noqa: E501 :param body: :type body: dict | bytes :rtype: None \"\"\"", ":param body: :type body: dict | bytes :rtype: None \"\"\" if connexion.request.is_json: body", "# noqa: E501 \"\"\"obtain alert list get method to obtain all the alerts", "alert_id: identifier for the alert :type alert_id: str :rtype: AlertArray \"\"\" return 'do", "in the alerts list # noqa: E501 :param body: :type body: dict |", "from swagger_server import util def alert_delete(alert_id): # noqa: E501 \"\"\"delete alert takes the", "E501 \"\"\"add alerts Adds the alerts into the list # noqa: E501 :param", "noqa: E501 \"\"\"update the alerts updates the alerts in the alerts list #", ":type body: list | bytes :rtype: None \"\"\" if connexion.request.is_json: body = [Alert.from_dict(d)", "E501 \"\"\"update the alerts updates the alerts in the alerts list # noqa:", "the alert from the alert list # noqa: E501 :param alert_id: id of", "None \"\"\" if connexion.request.is_json: body = [Alert.from_dict(d) for d in connexion.request.get_json()] # noqa:", "def alert_put(body): # noqa: E501 \"\"\"update the alerts updates the alerts in the", "import connexion import six from swagger_server.models.alert import Alert # noqa: E501 from swagger_server.models.alert_array", "alert_id: str :rtype: AlertArray \"\"\" return 'do some magic!' def alert_post(body): # noqa:", "import Alert # noqa: E501 from swagger_server.models.alert_array import AlertArray # noqa: E501 from", ":rtype: None \"\"\" return 'do some magic!' def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain", "remove the alert from the alert list # noqa: E501 :param alert_id: id", "body: :type body: list | bytes :rtype: None \"\"\" if connexion.request.is_json: body =", ":param alert_id: identifier for the alert :type alert_id: str :rtype: AlertArray \"\"\" return", "the alert :type alert_id: str :rtype: AlertArray \"\"\" return 'do some magic!' def", "noqa: E501 :param alert_id: id of the alert need to be removed :type", "alert need to be removed :type alert_id: str :rtype: None \"\"\" return 'do", "Updatealert # noqa: E501 from swagger_server import util def alert_delete(alert_id): # noqa: E501", "None \"\"\" return 'do some magic!' def alert_get(alert_id=None): # noqa: E501 \"\"\"obtain alert", "alert from the alert list # noqa: E501 :param alert_id: id of the" ]
[ "< 0.5) & (gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count()", "group_by_hour_of_day(gaps).count() / 60 # Divide by Frequency ) return blinks_per_hour_of_day / seconds_recorded_per_hour_of_day *", "distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z", "= np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2", "np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 )", "def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks", "= df.time return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values(", "def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 +", "gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2", "def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps < 0.5)", "'right_pupil_diameter']) blinks = gaps[(gaps < 0.5) & (gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count()", ") dist.index = df.time return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps", "as np def time_between_values(df, cols): gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist", "= ( group_by_hour_of_day(gaps).count() / 60 # Divide by Frequency ) return blinks_per_hour_of_day /", "/ 60 # Divide by Frequency ) return blinks_per_hour_of_day / seconds_recorded_per_hour_of_day * 60", "return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y **", "> 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60 # Divide", "= time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps < 0.5) & (gaps >", "blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60 # Divide by Frequency", "( group_by_hour_of_day(gaps).count() / 60 # Divide by Frequency ) return blinks_per_hour_of_day / seconds_recorded_per_hour_of_day", "= df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 +", "df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y", "blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps < 0.5) &", "0.5) & (gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() /", "np def time_between_values(df, cols): gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist =", "['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps < 0.5) & (gaps > 0.1)] blinks_per_hour_of_day =", "2 ) dist.index = df.time return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df):", "def time_between_values(df, cols): gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt(", "** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index =", "gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps < 0.5) & (gaps", "blinks = gaps[(gaps < 0.5) & (gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day", "group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks =", "** 2 ) dist.index = df.time return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def", "(gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60 #", "** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index = df.time return dist def", "dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z **", "df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index = df.time return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour)", "group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60 # Divide by Frequency ) return", "numpy as np def time_between_values(df, cols): gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df):", "df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index = df.time return dist", "2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index = df.time", "df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps < 0.5) & (gaps > 0.1)] blinks_per_hour_of_day", "+ df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index = df.time return", "= gaps[(gaps < 0.5) & (gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day =", "+ df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index = df.time return dist def group_by_hour_of_day(series): return", "time_between_values(df, cols): gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x", "import numpy as np def time_between_values(df, cols): gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def", "gaps[(gaps < 0.5) & (gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = (", "dist.index = df.time return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps =", "df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index", "cols): gap_df = df[cols].dropna(how='any') return gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x **", "return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps", "= group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60 # Divide by Frequency )", "seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60 # Divide by Frequency ) return blinks_per_hour_of_day", "return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter',", "0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60 # Divide by", "dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter'])", "& (gaps > 0.1)] blinks_per_hour_of_day = group_by_hour_of_day(blinks).count() seconds_recorded_per_hour_of_day = ( group_by_hour_of_day(gaps).count() / 60", "2 + df.left_gaze_origin_in_user_coordinate_system_z ** 2 ) dist.index = df.time return dist def group_by_hour_of_day(series):", "series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps <", "df.time return dist def group_by_hour_of_day(series): return series.groupby(series.index.to_series().dt.hour) def blinks_per_minute_by_hour_of_day(df): gaps = time_between_values( df.set_index('time'),", "time_between_values( df.set_index('time'), ['left_pupil_diameter', 'right_pupil_diameter']) blinks = gaps[(gaps < 0.5) & (gaps > 0.1)]", "gap_df.index.to_series().diff(-1).dt.total_seconds().abs() def distance_to_monitor(df): dist = np.sqrt( df.left_gaze_origin_in_user_coordinate_system_x ** 2 + df.left_gaze_origin_in_user_coordinate_system_y ** 2" ]
[ "get_info(self): \"\"\" get_info A sum up of all important characteristics of a class.", "\"\"\" get_info A sum up of all important characteristics of a class. The", "follows: ClassName: attribute_one: value_one - attribute_two: value_two \\ - info_one: info_one_value Returns -------", "- attribute_two: value_two \\ - info_one: info_one_value Returns ------- A string with the", "of the return string is as follows: ClassName: attribute_one: value_one - attribute_two: value_two", "value_two \\ - info_one: info_one_value Returns ------- A string with the class' relevant", "import ABCMeta, abstractmethod class BaseObject(metaclass=ABCMeta): \"\"\" BaseObject The most basic object, from which", "this base class. \"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type The class type is", "described in this base class. \"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type The class", "of object generated by that module. Returns ------- The class type \"\"\" raise", "def get_class_type(self): \"\"\" get_class_type The class type is a string that identifies the", "@abstractmethod def get_class_type(self): \"\"\" get_class_type The class type is a string that identifies", "a string that identifies the type of object generated by that module. Returns", "The most basic object, from which target_values in scikit-multiflow derive from. It guarantees", "Returns ------- The class type \"\"\" raise NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info", "have at least the two basic functions described in this base class. \"\"\"", "the return string is as follows: ClassName: attribute_one: value_one - attribute_two: value_two \\", "The class type is a string that identifies the type of object generated", "important characteristics of a class. The default format of the return string is", "in scikit-multiflow derive from. It guarantees that all target_values have at least the", "of all important characteristics of a class. The default format of the return", "string is as follows: ClassName: attribute_one: value_one - attribute_two: value_two \\ - info_one:", "all target_values have at least the two basic functions described in this base", "value_one - attribute_two: value_two \\ - info_one: info_one_value Returns ------- A string with", "the type of object generated by that module. Returns ------- The class type", "type of object generated by that module. Returns ------- The class type \"\"\"", "\"\"\" raise NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info A sum up of all", "of a class. The default format of the return string is as follows:", "ClassName: attribute_one: value_one - attribute_two: value_two \\ - info_one: info_one_value Returns ------- A", "is as follows: ClassName: attribute_one: value_one - attribute_two: value_two \\ - info_one: info_one_value", "\"\"\" BaseObject The most basic object, from which target_values in scikit-multiflow derive from.", "characteristics of a class. The default format of the return string is as", "the two basic functions described in this base class. \"\"\" @abstractmethod def get_class_type(self):", "------- The class type \"\"\" raise NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info A", "derive from. It guarantees that all target_values have at least the two basic", "sum up of all important characteristics of a class. The default format of", "return string is as follows: ClassName: attribute_one: value_one - attribute_two: value_two \\ -", "all important characteristics of a class. The default format of the return string", "attribute_one: value_one - attribute_two: value_two \\ - info_one: info_one_value Returns ------- A string", "The class type \"\"\" raise NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info A sum", "in this base class. \"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type The class type", "which target_values in scikit-multiflow derive from. It guarantees that all target_values have at", "scikit-multiflow derive from. It guarantees that all target_values have at least the two", "target_values have at least the two basic functions described in this base class.", "basic functions described in this base class. \"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type", "@abstractmethod def get_info(self): \"\"\" get_info A sum up of all important characteristics of", "up of all important characteristics of a class. The default format of the", "as follows: ClassName: attribute_one: value_one - attribute_two: value_two \\ - info_one: info_one_value Returns", "class BaseObject(metaclass=ABCMeta): \"\"\" BaseObject The most basic object, from which target_values in scikit-multiflow", "abc import ABCMeta, abstractmethod class BaseObject(metaclass=ABCMeta): \"\"\" BaseObject The most basic object, from", "object generated by that module. Returns ------- The class type \"\"\" raise NotImplementedError", "It guarantees that all target_values have at least the two basic functions described", "format of the return string is as follows: ClassName: attribute_one: value_one - attribute_two:", "a class. The default format of the return string is as follows: ClassName:", "most basic object, from which target_values in scikit-multiflow derive from. It guarantees that", "that identifies the type of object generated by that module. Returns ------- The", "string that identifies the type of object generated by that module. Returns -------", "class type \"\"\" raise NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info A sum up", "A sum up of all important characteristics of a class. The default format", "identifies the type of object generated by that module. Returns ------- The class", "object, from which target_values in scikit-multiflow derive from. It guarantees that all target_values", "\\ - info_one: info_one_value Returns ------- A string with the class' relevant information.", "info_one: info_one_value Returns ------- A string with the class' relevant information. \"\"\" raise", "least the two basic functions described in this base class. \"\"\" @abstractmethod def", "generated by that module. Returns ------- The class type \"\"\" raise NotImplementedError @abstractmethod", "get_class_type The class type is a string that identifies the type of object", "def get_info(self): \"\"\" get_info A sum up of all important characteristics of a", "at least the two basic functions described in this base class. \"\"\" @abstractmethod", "The default format of the return string is as follows: ClassName: attribute_one: value_one", "\"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type The class type is a string that", "by that module. Returns ------- The class type \"\"\" raise NotImplementedError @abstractmethod def", "info_one_value Returns ------- A string with the class' relevant information. \"\"\" raise NotImplementedError", "raise NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info A sum up of all important", "that module. Returns ------- The class type \"\"\" raise NotImplementedError @abstractmethod def get_info(self):", "NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info A sum up of all important characteristics", "target_values in scikit-multiflow derive from. It guarantees that all target_values have at least", "that all target_values have at least the two basic functions described in this", "get_info A sum up of all important characteristics of a class. The default", "attribute_two: value_two \\ - info_one: info_one_value Returns ------- A string with the class'", "base class. \"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type The class type is a", "type \"\"\" raise NotImplementedError @abstractmethod def get_info(self): \"\"\" get_info A sum up of", "module. Returns ------- The class type \"\"\" raise NotImplementedError @abstractmethod def get_info(self): \"\"\"", "from abc import ABCMeta, abstractmethod class BaseObject(metaclass=ABCMeta): \"\"\" BaseObject The most basic object,", "from. It guarantees that all target_values have at least the two basic functions", "BaseObject(metaclass=ABCMeta): \"\"\" BaseObject The most basic object, from which target_values in scikit-multiflow derive", "class. The default format of the return string is as follows: ClassName: attribute_one:", "\"\"\" get_class_type The class type is a string that identifies the type of", "class. \"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type The class type is a string", "get_class_type(self): \"\"\" get_class_type The class type is a string that identifies the type", "default format of the return string is as follows: ClassName: attribute_one: value_one -", "ABCMeta, abstractmethod class BaseObject(metaclass=ABCMeta): \"\"\" BaseObject The most basic object, from which target_values", "BaseObject The most basic object, from which target_values in scikit-multiflow derive from. It", "abstractmethod class BaseObject(metaclass=ABCMeta): \"\"\" BaseObject The most basic object, from which target_values in", "is a string that identifies the type of object generated by that module.", "guarantees that all target_values have at least the two basic functions described in", "class type is a string that identifies the type of object generated by", "basic object, from which target_values in scikit-multiflow derive from. It guarantees that all", "type is a string that identifies the type of object generated by that", "two basic functions described in this base class. \"\"\" @abstractmethod def get_class_type(self): \"\"\"", "functions described in this base class. \"\"\" @abstractmethod def get_class_type(self): \"\"\" get_class_type The", "- info_one: info_one_value Returns ------- A string with the class' relevant information. \"\"\"", "from which target_values in scikit-multiflow derive from. It guarantees that all target_values have" ]
[ "(batch, max_n_mentions, 2) int cat (batch, max_n_mentions, n_cats) bool image (batch, d_image) region", "5 args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000 args.d_rank = 1024 args.d_fusion", "1) % args.print_every == 0: logger.info('Iter %d / %d\\tloss_train = %.4f' % (it+1,", "args = parser.parse_args() args.d_lang = 1024 args.max_n_mentions = 20 args.n_cats = 8 args.d_image", "random import numpy as np import torch import torch.nn as nn import torch.optim", "args.save_every == 0: checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path =", "tokens_dev) if args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading model ...') if not args.do_train:", "r = opt[c][m] if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else:", "(it + 1) % args.save_every == 0: checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict':", "argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train',", "from model import Model from utils.data import load_tokens from utils.vision import iou, clip_bbox_to_image,", "exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True)", "def train (args, model, optimizer, tokens_train, tokens_dev): model.train() stat_loss = StatLoss() for it", "optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it + 1)", "parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int)", "args.max_n_regions = 1000 args.d_rank = 1024 args.d_fusion = 1024 args.d_aff = 1 args.d_reg", "= clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir,", "if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev", "for c, caption in enumerate(instance.captions): for m, mention in enumerate(caption.mentions): r = opt[c][m]", "''' X tokenid (batch, seqlen, .) int span (batch, max_n_mentions, 2) int cat", "8 args.d_image = 1024 args.d_region_visual = 2048 args.d_region_spatial = 5 args.d_region = args.d_region_visual", "model ...') train(args, model, optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test = load_tokens('test', args.n_test)", "optim sys.path.insert(0, os.path.dirname(__file__)) from model import Model from utils.data import load_tokens from utils.vision", "stat_loss = StatLoss() for it in range(args.iters): samples = random.choices(tokens_train, k=args.batch) X =", "= 1024 args.max_n_mentions = 20 args.n_cats = 8 args.d_image = 1024 args.d_region_visual =", "region (batch, max_n_regions, d_region) n_mentions [batch * int] n_regions [batch * int] _aff", "when --crf') args = parser.parse_args() args.d_lang = 1024 args.max_n_mentions = 20 args.n_cats =", "import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt", "tokens_dev): model.train() stat_loss = StatLoss() for it in range(args.iters): samples = random.choices(tokens_train, k=args.batch)", "optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args, model,", "numpy as np import torch import torch.nn as nn import torch.optim as optim", "in range(args.iters): samples = random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss =", "type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int)", "= '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level", "= 1024 args.d_aff = 1 args.d_reg = 4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir,", "in enumerate(instance.captions): for m, mention in enumerate(caption.mentions): r = opt[c][m] if not args.no_box_regression:", "StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%Y/%m/%d", "load_tokens('test', args.n_test) logger.info('Loading model ...') if not args.do_train: model = Model(args).to(args.device) model_path =", "= model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it + 1) % args.print_every", "True ''' X tokenid (batch, seqlen, .) int span (batch, max_n_mentions, 2) int", "split='dev') model.train() if (it + 1) % args.save_every == 0: checkpoint = {", "%d\\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if (it +", "[none, m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of [viterbi, smoothing]", "import Model from utils.data import load_tokens from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from", "load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad,", "parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition", "image (batch, d_image) region (batch, max_n_regions, d_region) n_mentions [batch * int] n_regions [batch", "os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train", "'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path)", "== 0: eval(args, model, tokens_dev, split='dev') model.train() if (it + 1) % args.save_every", "nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it + 1) % args.print_every == 0: logger.info('Iter", "import load_tokens from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats from", "= random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(),", "parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One of [none, m, mlr,", "(batch, max_n_mentions, n_cats) bool image (batch, d_image) region (batch, max_n_regions, d_region) n_mentions [batch", "type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500,", "help='Transition score context. One of [none, m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode", "import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats from utils.stat import StatLoss, StatResult", "opt[c][m] if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred =", "'%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level =", "default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma',", "np import torch import torch.nn as nn import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__))", "args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if", "default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test',", "action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One of [none, m,", "= torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model, tokens_test, split='test') if __name__ ==", "samples = random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward()", "d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4) ''' def train (args, model, optimizer, tokens_train,", "StatLoss() if (it + 1) % args.eval_every == 0: eval(args, model, tokens_dev, split='dev')", "torch.no_grad(): loss, opt, reg = model(X) stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions): for", "lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args, model, optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test", "= load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model = Model(args).to(args.device)", "os, sys import argparse import logging import random import numpy as np import", "action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int)", "<reponame>liujch1998/SoftLabelCCRF import os, sys import argparse import logging import random import numpy as", "if (it + 1) % args.eval_every == 0: eval(args, model, tokens_dev, split='dev') model.train()", "tokenid (batch, seqlen, .) int span (batch, max_n_mentions, 2) int cat (batch, max_n_mentions,", "1024 args.d_region_visual = 2048 args.d_region_spatial = 5 args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions", "1024 args.max_n_mentions = 20 args.n_cats = 8 args.d_image = 1024 args.d_region_visual = 2048", "tokens_test = load_tokens('test', args.n_test) logger.info('Loading model ...') if not args.do_train: model = Model(args).to(args.device)", "2) int cat (batch, max_n_mentions, n_cats) bool image (batch, d_image) region (batch, max_n_regions,", "torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X tokenid (batch, seqlen, .) int span", "%(name)s - %(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__)", "= 1000 args.d_rank = 1024 args.d_fusion = 1024 args.d_aff = 1 args.d_reg =", "stat_loss = StatLoss() stat_result = StatResult() for token in tokens: X, instance =", "logger.info('Training model ...') train(args, model, optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test = load_tokens('test',", "max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4) ''' def train (args, model, optimizer,", "torch import torch.nn as nn import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from model", "- %(levelname)s - %(name)s - %(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level = logging.INFO)", "(batch, d_image) region (batch, max_n_regions, d_region) n_mentions [batch * int] n_regions [batch *", "if (it + 1) % args.print_every == 0: logger.info('Iter %d / %d\\tloss_train =", "parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr',", "= StatLoss() if (it + 1) % args.eval_every == 0: eval(args, model, tokens_dev,", "tokens_train, tokens_dev): model.train() stat_loss = StatLoss() for it in range(args.iters): samples = random.choices(tokens_train,", "1 args.d_reg = 4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if", "'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev)", "1) % args.save_every == 0: checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), }", "set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X tokenid (batch,", "import logging import random import numpy as np import torch import torch.nn as", "collect_feats(args, token=token) with torch.no_grad(): loss, opt, reg = model(X) stat_loss.insert(loss.item()) for c, caption", "token=token) with torch.no_grad(): loss, opt, reg = model(X) stat_loss.insert(loss.item()) for c, caption in", "== 0: checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir,", "utils.data import load_tokens from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats", "= StatResult() for token in tokens: X, instance = collect_feats(args, token=token) with torch.no_grad():", "...') if not args.do_train: model = Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint =", "deparameterize_bbox_error from utils.feats import collect_feats from utils.stat import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s", "...') model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training", "= 20 args.n_cats = 8 args.d_image = 1024 args.d_region_visual = 2048 args.d_region_spatial =", "model.eval() stat_loss = StatLoss() stat_result = StatResult() for token in tokens: X, instance", "tokens_dev, split='dev') model.train() if (it + 1) % args.save_every == 0: checkpoint =", "+ 1) % args.eval_every == 0: eval(args, model, tokens_dev, split='dev') model.train() if (it", "torch.nn as nn import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from model import Model", "parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One of [none, m, mlr, mlrg]') parser.add_argument('--decode',", "'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True)", "parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true')", "def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X tokenid", "(): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true')", "+ args.d_region_spatial args.max_n_regions = 1000 args.d_rank = 1024 args.d_fusion = 1024 args.d_aff =", "default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every',", "args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading model ...') if not args.do_train: model =", "parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every',", "= 8 args.d_image = 1024 args.d_region_visual = 2048 args.d_region_spatial = 5 args.d_region =", "* int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4) '''", "os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def eval (args, model, tokens, split): model.eval()", "split): model.eval() stat_loss = StatLoss() stat_result = StatResult() for token in tokens: X,", "opt, reg = model(X) stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions): for m, mention", "= model(X) stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions): for m, mention in enumerate(caption.mentions):", "torch.save(checkpoint, model_path) def eval (args, model, tokens, split): model.eval() stat_loss = StatLoss() stat_result", "model.train() stat_loss = StatLoss() for it in range(args.iters): samples = random.choices(tokens_train, k=args.batch) X", "type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev',", "def main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str)", "args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000 args.d_rank = 1024 args.d_fusion =", "os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev = load_tokens('dev',", "exist_ok=True) if args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model", "args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it + 1) % args.print_every == 0: logger.info('Iter %d", "X, instance = collect_feats(args, token=token) with torch.no_grad(): loss, opt, reg = model(X) stat_loss.insert(loss.item())", "as nn import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from model import Model from", "torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize',", "= True ''' X tokenid (batch, seqlen, .) int span (batch, max_n_mentions, 2)", "for it in range(args.iters): samples = random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad()", "instance = collect_feats(args, token=token) with torch.no_grad(): loss, opt, reg = model(X) stat_loss.insert(loss.item()) for", "--crf') args = parser.parse_args() args.d_lang = 1024 args.max_n_mentions = 20 args.n_cats = 8", "= os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def eval (args, model, tokens, split):", "default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none',", "train(args, model, optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading model", "model = Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model", "p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args, model, optimizer, tokens_train, tokens_dev) if", "torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from model import Model from utils.data import load_tokens", "type=str, help='Transition score context. One of [none, m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str,", "default='none', type=str, help='Transition score context. One of [none, m, mlr, mlrg]') parser.add_argument('--decode', default='none',", "StatLoss, StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt =", "tokens: X, instance = collect_feats(args, token=token) with torch.no_grad(): loss, opt, reg = model(X)", "default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2,", "max_n_mentions, 2) int cat (batch, max_n_mentions, n_cats) bool image (batch, d_image) region (batch,", "torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model, tokens_test, split='test') if __name__ == '__main__':", "mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of [viterbi, smoothing] when --crf')", "= 4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir,", "reg = model(X) stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions): for m, mention in", "tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda", "m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of [viterbi, smoothing] when", "clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split)", "eval(args, model, tokens_dev, split='dev') model.train() if (it + 1) % args.save_every == 0:", "model ...') model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98))", "= { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1))", "= Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...')", "= optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args, model, optimizer,", "args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize',", "parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test',", "enumerate(caption.mentions): r = opt[c][m] if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size)", "mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg)", "os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'),", "model ...') if not args.do_train: model = Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint", "eval (args, model, tokens, split): model.eval() stat_loss = StatLoss() stat_result = StatResult() for", "optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading model ...') if", "from utils.stat import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -", "if (it + 1) % args.save_every == 0: checkpoint = { 'model_state_dict': model.state_dict(),", "d_reg=4) ''' def train (args, model, optimizer, tokens_train, tokens_dev): model.train() stat_loss = StatLoss()", "[batch * int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4)", "logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X", "set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'),", "'test'), exist_ok=True) if args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing", "default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true')", "args.d_reg = 4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train:", "nn import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from model import Model from utils.data", "if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if", "default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed',", "optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args, model, optimizer, tokens_train,", "%H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed)", "'%Y/%m/%d %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed)", "in tokens: X, instance = collect_feats(args, token=token) with torch.no_grad(): loss, opt, reg =", "type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float)", "= 5 args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000 args.d_rank = 1024", "load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model = Model(args).to(args.device) optimizer", "= opt[c][m] if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred", "+ 1) % args.print_every == 0: logger.info('Iter %d / %d\\tloss_train = %.4f' %", "load_tokens from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats from utils.stat", "for m, mention in enumerate(caption.mentions): r = opt[c][m] if not args.no_box_regression: mention.bbox_pred =", "parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm',", "argparse import logging import random import numpy as np import torch import torch.nn", "model_path) def eval (args, model, tokens, split): model.eval() stat_loss = StatLoss() stat_result =", "= StatLoss() stat_result = StatResult() for token in tokens: X, instance = collect_feats(args,", "collect_feats from utils.stat import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s", "(args, model, tokens, split): model.eval() stat_loss = StatLoss() stat_result = StatResult() for token", "model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args, model, optimizer, tokens_train, tokens_dev) if args.do_test:", "- %(name)s - %(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level = logging.INFO) logger =", "% stat_loss.loss_avg) stat_result.print(logger) def main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True)", "parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int)", "np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X tokenid (batch, seqlen, .) int", "% args.save_every == 0: checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path", "args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model = Model(args).to(args.device) optimizer =", "parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float)", "args.max_n_mentions = 20 args.n_cats = 8 args.d_image = 1024 args.d_region_visual = 2048 args.d_region_spatial", "2048 args.d_region_spatial = 5 args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000 args.d_rank", "import argparse import logging import random import numpy as np import torch import", "checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth' %", "One of [none, m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of", "args.n_cats = 8 args.d_image = 1024 args.d_region_visual = 2048 args.d_region_spatial = 5 args.d_region", "= StatLoss() for it in range(args.iters): samples = random.choices(tokens_train, k=args.batch) X = collect_feats(args,", "os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'),", "n_regions [batch * int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions,", "parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int)", "stat_loss = StatLoss() if (it + 1) % args.eval_every == 0: eval(args, model,", "def eval (args, model, tokens, split): model.eval() stat_loss = StatLoss() stat_result = StatResult()", "action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One of", "d_image) region (batch, max_n_regions, d_region) n_mentions [batch * int] n_regions [batch * int]", "if args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading model ...') if not args.do_train: model", "parser.parse_args() args.d_lang = 1024 args.max_n_mentions = 20 args.n_cats = 8 args.d_image = 1024", "as np import torch import torch.nn as nn import torch.optim as optim sys.path.insert(0,", "parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int)", "train (args, model, optimizer, tokens_train, tokens_dev): model.train() stat_loss = StatLoss() for it in", "loss, opt, reg = model(X) stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions): for m,", "args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test:", "torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X tokenid (batch, seqlen, .) int span (batch,", "main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train',", "= logging.INFO) logger = logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic", "it in range(args.iters): samples = random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss", "= torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir,", "parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch',", "enumerate(instance.captions): for m, mention in enumerate(caption.mentions): r = opt[c][m] if not args.no_box_regression: mention.bbox_pred", "parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783,", "% args.eval_every == 0: eval(args, model, tokens_dev, split='dev') model.train() if (it + 1)", "token in tokens: X, instance = collect_feats(args, token=token) with torch.no_grad(): loss, opt, reg", "import random import numpy as np import torch import torch.nn as nn import", "parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of [viterbi, smoothing] when --crf') args =", "parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int)", "action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One of [none, m, mlr, mlrg]')", "help='Decode algo. One of [viterbi, smoothing] when --crf') args = parser.parse_args() args.d_lang =", "default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf',", "exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train = load_tokens('train', args.n_train)", "default='none', type=str, help='Decode algo. One of [viterbi, smoothing] when --crf') args = parser.parse_args()", "%(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def set_seed", "score context. One of [none, m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo.", "else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' %", "model(X) stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions): for m, mention in enumerate(caption.mentions): r", "- %(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def", "X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item())", "from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats from utils.stat import", "Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args,", "type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float)", "int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4) ''' def", "logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt = '%Y/%m/%d %H:%M:%S',", "1024 args.d_fusion = 1024 args.d_aff = 1 args.d_reg = 4 set_seed(args.seed) args.device =", "args.n_test) logger.info('Loading model ...') if not args.do_train: model = Model(args).to(args.device) model_path = os.path.join(args.output_dir,", "(batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4) ''' def train (args,", "= %.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if (it + 1)", "model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model", "= os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model, tokens_test,", "'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def eval (args, model, tokens, split): model.eval() stat_loss", "not args.do_train: model = Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict'])", "bool image (batch, d_image) region (batch, max_n_regions, d_region) n_mentions [batch * int] n_regions", "tokens_train = load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model =", "type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000,", "instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f'", "'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train =", "p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args, model, optimizer, tokens_train, tokens_dev)", "max_n_regions, d_reg=4) ''' def train (args, model, optimizer, tokens_train, tokens_dev): model.train() stat_loss =", "{ 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint,", "max_n_mentions, n_cats) bool image (batch, d_image) region (batch, max_n_regions, d_region) n_mentions [batch *", "import torch import torch.nn as nn import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from", "int] n_regions [batch * int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions,", "stat_result = StatResult() for token in tokens: X, instance = collect_feats(args, token=token) with", "logger.info('Initializing model ...') model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr,", "type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld',", "(it + 1) % args.print_every == 0: logger.info('Iter %d / %d\\tloss_train = %.4f'", "4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize',", "'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if", "tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it +", "= Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...')", "model, optimizer, tokens_train, tokens_dev): model.train() stat_loss = StatLoss() for it in range(args.iters): samples", "default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int)", "args.d_region_spatial args.max_n_regions = 1000 args.d_rank = 1024 args.d_fusion = 1024 args.d_aff = 1", "= parser.parse_args() args.d_lang = 1024 args.max_n_mentions = 20 args.n_cats = 8 args.d_image =", "from utils.data import load_tokens from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import", "import os, sys import argparse import logging import random import numpy as np", "instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval =", "instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger) def", "model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it + 1) % args.print_every ==", "with torch.no_grad(): loss, opt, reg = model(X) stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions):", "split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger) def main (): parser = argparse.ArgumentParser()", "parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize',", "(batch, max_n_mentions, max_n_regions, d_reg=4) ''' def train (args, model, optimizer, tokens_train, tokens_dev): model.train()", "= instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger)", "parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One", "(it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if (it + 1) % args.eval_every ==", ".) int span (batch, max_n_mentions, 2) int cat (batch, max_n_mentions, n_cats) bool image", "optimizer, tokens_train, tokens_dev): model.train() stat_loss = StatLoss() for it in range(args.iters): samples =", "parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score context. One of [none,", "if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger) def main ():", "args.d_region_visual = 2048 args.d_region_spatial = 5 args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions =", "StatLoss() for it in range(args.iters): samples = random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples)", "args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000 args.d_rank = 1024 args.d_fusion = 1024 args.d_aff", "% (it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if (it + 1) % args.eval_every", "iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats from utils.stat import StatLoss, StatResult logging.basicConfig(format", "type=str, help='Decode algo. One of [viterbi, smoothing] when --crf') args = parser.parse_args() args.d_lang", "range(args.iters): samples = random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X)", "1000 args.d_rank = 1024 args.d_fusion = 1024 args.d_aff = 1 args.d_reg = 4", "+ 1) % args.save_every == 0: checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(),", "type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430,", "logger.info('Loading model ...') if not args.do_train: model = Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name)", "import numpy as np import torch import torch.nn as nn import torch.optim as", "smoothing] when --crf') args = parser.parse_args() args.d_lang = 1024 args.max_n_mentions = 20 args.n_cats", "args.do_train: model = Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing", "required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000,", "args.d_rank = 1024 args.d_fusion = 1024 args.d_aff = 1 args.d_reg = 4 set_seed(args.seed)", "if not args.do_train: model = Model(args).to(args.device) model_path = os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path)", "for token in tokens: X, instance = collect_feats(args, token=token) with torch.no_grad(): loss, opt,", "level = logging.INFO) logger = logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed)", "context. One of [none, m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo. One", "StatResult() for token in tokens: X, instance = collect_feats(args, token=token) with torch.no_grad(): loss,", "loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it + 1) %", "caption in enumerate(instance.captions): for m, mention in enumerate(caption.mentions): r = opt[c][m] if not", "model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model, tokens_test, split='test') if __name__ == '__main__': main()", "(args, model, optimizer, tokens_train, tokens_dev): model.train() stat_loss = StatLoss() for it in range(args.iters):", "1) % args.eval_every == 0: eval(args, model, tokens_dev, split='dev') model.train() if (it +", "parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000,", "n_mentions [batch * int] n_regions [batch * int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1)", "default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000,", "instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval", "= args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000 args.d_rank = 1024 args.d_fusion = 1024", "'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def eval", "args.print_every == 0: logger.info('Iter %d / %d\\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg))", "mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of [viterbi, smoothing] when --crf') args", "args.eval_every == 0: eval(args, model, tokens_dev, split='dev') model.train() if (it + 1) %", "%.4f' % stat_loss.loss_avg) stat_result.print(logger) def main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str,", "mention in enumerate(caption.mentions): r = opt[c][m] if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox,", "from utils.feats import collect_feats from utils.stat import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s -", "= 1024 args.d_fusion = 1024 args.d_aff = 1 args.d_reg = 4 set_seed(args.seed) args.device", "loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it + 1) % args.print_every == 0:", "parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context',", "default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0,", "default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score", "args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev =", "mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance) if args.visualize:", "''' def train (args, model, optimizer, tokens_train, tokens_dev): model.train() stat_loss = StatLoss() for", "[batch * int] n_regions [batch * int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg", "model_path = os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model,", "Model from utils.data import load_tokens from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats", "int cat (batch, max_n_mentions, n_cats) bool image (batch, d_image) region (batch, max_n_regions, d_region)", "type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int)", "utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats from utils.stat import StatLoss,", "_reg (batch, max_n_mentions, max_n_regions, d_reg=4) ''' def train (args, model, optimizer, tokens_train, tokens_dev):", "tokens, split): model.eval() stat_loss = StatLoss() stat_result = StatResult() for token in tokens:", "(batch, max_n_regions, d_region) n_mentions [batch * int] n_regions [batch * int] _aff (batch,", "optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def eval (args,", "One of [viterbi, smoothing] when --crf') args = parser.parse_args() args.d_lang = 1024 args.max_n_mentions", "args.d_lang = 1024 args.max_n_mentions = 20 args.n_cats = 8 args.d_image = 1024 args.d_region_visual", "(it+1)) torch.save(checkpoint, model_path) def eval (args, model, tokens, split): model.eval() stat_loss = StatLoss()", "= load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p:", "...') train(args, model, optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading", "%(levelname)s - %(name)s - %(message)s', datefmt = '%Y/%m/%d %H:%M:%S', level = logging.INFO) logger", "model import Model from utils.data import load_tokens from utils.vision import iou, clip_bbox_to_image, deparameterize_bbox_error", "cat (batch, max_n_mentions, n_cats) bool image (batch, d_image) region (batch, max_n_regions, d_region) n_mentions", "(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X tokenid (batch, seqlen,", "_aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4) ''' def train", "type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000,", "stat_loss.insert(loss.item()) for c, caption in enumerate(instance.captions): for m, mention in enumerate(caption.mentions): r =", "parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob',", "type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str,", "if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox", "== 0: logger.info('Iter %d / %d\\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss", "= 1024 args.d_region_visual = 2048 args.d_region_spatial = 5 args.d_region = args.d_region_visual + args.d_region_spatial", "args.d_image = 1024 args.d_region_visual = 2048 args.d_region_spatial = 5 args.d_region = args.d_region_visual +", "0: checkpoint = { 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth'", "Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, betas=(0.9,0.98)) logger.info('Training model ...') train(args,", "args.n_dev) logger.info('Initializing model ...') model = Model(args).to(args.device) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),", "m, mention in enumerate(caption.mentions): r = opt[c][m] if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(),", "args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger) def main (): parser", "%.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if (it + 1) %", "max_n_mentions, max_n_regions, d_reg=4) ''' def train (args, model, optimizer, tokens_train, tokens_dev): model.train() stat_loss", "max_n_regions, d_region) n_mentions [batch * int] n_regions [batch * int] _aff (batch, max_n_mentions,", "clip_bbox_to_image, deparameterize_bbox_error from utils.feats import collect_feats from utils.stat import StatLoss, StatResult logging.basicConfig(format =", "model, tokens_dev, split='dev') model.train() if (it + 1) % args.save_every == 0: checkpoint", "= load_tokens('test', args.n_test) logger.info('Loading model ...') if not args.do_train: model = Model(args).to(args.device) model_path", "model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def", "stat_loss.loss_avg) stat_result.print(logger) def main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name',", "algo. One of [viterbi, smoothing] when --crf') args = parser.parse_args() args.d_lang = 1024", "StatLoss() stat_result = StatResult() for token in tokens: X, instance = collect_feats(args, token=token)", "[viterbi, smoothing] when --crf') args = parser.parse_args() args.d_lang = 1024 args.max_n_mentions = 20", "datefmt = '%Y/%m/%d %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def set_seed (seed):", "args.d_region_spatial = 5 args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000 args.d_rank =", "random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True ''' X tokenid (batch, seqlen, .)", "logging.INFO) logger = logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic =", "X tokenid (batch, seqlen, .) int span (batch, max_n_mentions, 2) int cat (batch,", "stat_result.print(logger) def main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth',", "n_cats) bool image (batch, d_image) region (batch, max_n_regions, d_region) n_mentions [batch * int]", "k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step()", "stat_loss.loss_avg)) stat_loss = StatLoss() if (it + 1) % args.eval_every == 0: eval(args,", "random.choices(tokens_train, k=args.batch) X = collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm)", "betas=(0.9,0.98)) logger.info('Training model ...') train(args, model, optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test =", "model, optimizer, tokens_train, tokens_dev) if args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading model ...')", "import torch.nn as nn import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from model import", "exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train:", "action='store_true') parser.add_argument('--do_test', action='store_true') parser.add_argument('--n_train', default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters',", "int span (batch, max_n_mentions, 2) int cat (batch, max_n_mentions, n_cats) bool image (batch,", "checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model, tokens_test, split='test') if __name__", "in enumerate(caption.mentions): r = opt[c][m] if not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size),", "logger.info('Iter %d / %d\\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss()", "import collect_feats from utils.stat import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s -", "0: logger.info('Iter %d / %d\\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss =", "model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def eval (args, model, tokens,", "type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true')", "tokens_train, tokens_dev) if args.do_test: tokens_test = load_tokens('test', args.n_test) logger.info('Loading model ...') if not", "default=29783, type=int) parser.add_argument('--n_dev', default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16,", "type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true') parser.add_argument('--crf', action='store_true') parser.add_argument('--tran_context', default='none', type=str, help='Transition score context.", "args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize: if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True)", "1024 args.d_aff = 1 args.d_reg = 4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True)", "utils.stat import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',", "(it + 1) % args.eval_every == 0: eval(args, model, tokens_dev, split='dev') model.train() if", "args.d_aff = 1 args.d_reg = 4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if", "torch.backends.cudnn.deterministic = True ''' X tokenid (batch, seqlen, .) int span (batch, max_n_mentions,", "'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir, 'visualize', 'test'), exist_ok=True) if args.do_train: tokens_train = load_tokens('train',", "utils.feats import collect_feats from utils.stat import StatLoss, StatResult logging.basicConfig(format = '%(asctime)s - %(levelname)s", "c, caption in enumerate(instance.captions): for m, mention in enumerate(caption.mentions): r = opt[c][m] if", "max_n_mentions, max_n_regions, d_aff=1) _reg (batch, max_n_mentions, max_n_regions, d_reg=4) ''' def train (args, model,", "= collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if", "20 args.n_cats = 8 args.d_image = 1024 args.d_region_visual = 2048 args.d_region_spatial = 5", "0: eval(args, model, tokens_dev, split='dev') model.train() if (it + 1) % args.save_every ==", "% (it+1)) torch.save(checkpoint, model_path) def eval (args, model, tokens, split): model.eval() stat_loss =", "seqlen, .) int span (batch, max_n_mentions, 2) int cat (batch, max_n_mentions, n_cats) bool", "collect_feats(args, tokens=samples) optimizer.zero_grad() loss = model(X) loss.backward() nn.utils.clip_grad_value_(model.parameters(), args.max_grad_norm) optimizer.step() stat_loss.insert(loss.item()) if (it", "= collect_feats(args, token=token) with torch.no_grad(): loss, opt, reg = model(X) stat_loss.insert(loss.item()) for c,", "args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if (it + 1) % args.eval_every == 0:", "default=1000, type=int) parser.add_argument('--n_test', default=1000, type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5,", "model, tokens, split): model.eval() stat_loss = StatLoss() stat_result = StatResult() for token in", "if args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model ...')", "% args.print_every == 0: logger.info('Iter %d / %d\\tloss_train = %.4f' % (it+1, args.iters,", "= argparse.ArgumentParser() parser.add_argument('--output_dir', default=None, type=str, required=True) parser.add_argument('--model_name', default='model.pth', type=str) parser.add_argument('--do_train', action='store_true') parser.add_argument('--do_test', action='store_true')", "default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0, type=float) parser.add_argument('--seed', default=19980430, type=int) parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every',", "logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger) def main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir',", "os.path.join(args.output_dir, args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model, tokens_test, split='test')", "= 1 args.d_reg = 4 set_seed(args.seed) args.device = torch.device('cuda') os.makedirs(args.output_dir, exist_ok=True) if args.visualize:", "} model_path = os.path.join(args.output_dir, 'model.%06d.pth' % (it+1)) torch.save(checkpoint, model_path) def eval (args, model,", "(batch, seqlen, .) int span (batch, max_n_mentions, 2) int cat (batch, max_n_mentions, n_cats)", "d_region) n_mentions [batch * int] n_regions [batch * int] _aff (batch, max_n_mentions, max_n_regions,", "sys.path.insert(0, os.path.dirname(__file__)) from model import Model from utils.data import load_tokens from utils.vision import", "args.d_fusion = 1024 args.d_aff = 1 args.d_reg = 4 set_seed(args.seed) args.device = torch.device('cuda')", "as optim sys.path.insert(0, os.path.dirname(__file__)) from model import Model from utils.data import load_tokens from", "import torch.optim as optim sys.path.insert(0, os.path.dirname(__file__)) from model import Model from utils.data import", "%d / %d\\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if", "= logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True '''", "logging import random import numpy as np import torch import torch.nn as nn", "of [none, m, mlr, mlrg]') parser.add_argument('--decode', default='none', type=str, help='Decode algo. One of [viterbi,", "* int] n_regions [batch * int] _aff (batch, max_n_mentions, max_n_regions, d_aff=1) _reg (batch,", "sys import argparse import logging import random import numpy as np import torch", "= '%Y/%m/%d %H:%M:%S', level = logging.INFO) logger = logging.getLogger(__name__) def set_seed (seed): random.seed(seed)", "optimizer.step() stat_loss.insert(loss.item()) if (it + 1) % args.print_every == 0: logger.info('Iter %d /", "not args.no_box_regression: mention.bbox_pred = clip_bbox_to_image(deparameterize_bbox_error(reg[c,m,r].tolist(), instance.regions[r].bbox, instance.image_size), instance.image_size) else: mention.bbox_pred = instance.regions[r].bbox stat_result.insert(instance)", "= 2048 args.d_region_spatial = 5 args.d_region = args.d_region_visual + args.d_region_spatial args.max_n_regions = 1000", "type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float) parser.add_argument('--max_grad_norm', default=10.0, type=float) parser.add_argument('--no_box_regression',action='store_true') parser.add_argument('--gamma', default=10.0,", "stat_result.insert(instance) if args.visualize: instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger) def main", "model.train() if (it + 1) % args.save_every == 0: checkpoint = { 'model_state_dict':", "if args.do_train: os.makedirs(os.path.join(args.output_dir, 'visualize', 'train'), exist_ok=True) os.makedirs(os.path.join(args.output_dir, 'visualize', 'dev'), exist_ok=True) if args.do_test: os.makedirs(os.path.join(args.output_dir,", "parser.add_argument('--print_every', default=500, type=int) parser.add_argument('--eval_every', default=5000, type=int) parser.add_argument('--save_every', default=5000, type=int) parser.add_argument('--visualize', action='store_true') parser.add_argument('--kld', action='store_true')", "span (batch, max_n_mentions, 2) int cat (batch, max_n_mentions, n_cats) bool image (batch, d_image)", "os.path.dirname(__file__)) from model import Model from utils.data import load_tokens from utils.vision import iou,", "instance.visualize_prediction(args.output_dir, split) logger.info('loss_eval = %.4f' % stat_loss.loss_avg) stat_result.print(logger) def main (): parser =", "of [viterbi, smoothing] when --crf') args = parser.parse_args() args.d_lang = 1024 args.max_n_mentions =", "= %.4f' % stat_loss.loss_avg) stat_result.print(logger) def main (): parser = argparse.ArgumentParser() parser.add_argument('--output_dir', default=None,", "type=int) parser.add_argument('--iters', default=50000, type=int) parser.add_argument('--batch', default=16, type=int) parser.add_argument('--lr', default=5e-5, type=float) parser.add_argument('--drop_prob', default=0.2, type=float)", "args.do_train: tokens_train = load_tokens('train', args.n_train) tokens_dev = load_tokens('dev', args.n_dev) logger.info('Initializing model ...') model", "logger = logging.getLogger(__name__) def set_seed (seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True", "stat_loss.insert(loss.item()) if (it + 1) % args.print_every == 0: logger.info('Iter %d / %d\\tloss_train", "/ %d\\tloss_train = %.4f' % (it+1, args.iters, stat_loss.loss_avg)) stat_loss = StatLoss() if (it", "args.model_name) checkpoint = torch.load(model_path) model.load_state_dict(checkpoint['model_state_dict']) logger.info('Testing model ...') eval(args, model, tokens_test, split='test') if" ]
[ "发送请求 client.send(request.encode()) # 接受回复 response = client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果 print(\"request:", "设置 def send_set_request(key, value): request = { \"op\": \"set\", \"params\": { \"key\": key,", "cmd == \"exit\": send_exit_request() else: print(\"usage: cli.py <cmd> <params>\") except Exception as e:", "echo def send_echo_request(): request = { \"op\": \"echo\", \"params\": {} } send_request(json.dumps(request)) #", "\"get\": try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py get <key>\") elif cmd == \"exit\":", "client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response #", "send_get_request(key): request = { \"op\": \"get\", \"params\": { \"key\": key } } send_request(json.dumps(request))", "\"value\": value } } send_request(json.dumps(request)) # 获取 def send_get_request(key): request = { \"op\":", "socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) # 接受回复", "打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response # 设置 def send_set_request(key, value): request", "获取 def send_get_request(key): request = { \"op\": \"get\", \"params\": { \"key\": key }", "# 主函数 if __name__ == \"__main__\": if len(sys.argv) == 1: print(\"usage: cli.py <cmd>", "send_request(json.dumps(request)) # 获取 def send_get_request(key): request = { \"op\": \"get\", \"params\": { \"key\":", "cmd == \"get\": try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py get <key>\") elif cmd", "{ \"op\": \"set\", \"params\": { \"key\": key, \"value\": value } } send_request(json.dumps(request)) #", "# 发送请求 def send_request(request: str) -> str: # 创建套接字 client = socket.socket( socket.AF_INET,", "send_set_request(key, value): request = { \"op\": \"set\", \"params\": { \"key\": key, \"value\": value", "{ \"op\": \"get\", \"params\": { \"key\": key } } send_request(json.dumps(request)) # 退出 def", "\"key\": key } } send_request(json.dumps(request)) # 退出 def send_exit_request(): request = { \"op\":", "set <key> <value>\") elif cmd == \"get\": try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py", "= { \"op\": \"get\", \"params\": { \"key\": key } } send_request(json.dumps(request)) # 退出", "request = { \"op\": \"echo\", \"params\": {} } send_request(json.dumps(request)) # 主函数 if __name__", "# 创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) #", "if cmd == \"echo\": send_echo_request() elif cmd == \"set\": try: send_set_request(params[0], params[1]) except", "cli.py get <key>\") elif cmd == \"exit\": send_exit_request() else: print(\"usage: cli.py <cmd> <params>\")", "try: cmd = sys.argv[1] params = sys.argv[2:] if cmd == \"echo\": send_echo_request() elif", "elif cmd == \"exit\": send_exit_request() else: print(\"usage: cli.py <cmd> <params>\") except Exception as", "1024)) # 发送请求 client.send(request.encode()) # 接受回复 response = client.recv(65536).decode() # 关闭套接字 client.close() #", "request = { \"op\": \"set\", \"params\": { \"key\": key, \"value\": value } }", "\"key\": key, \"value\": value } } send_request(json.dumps(request)) # 获取 def send_get_request(key): request =", "send_echo_request(): request = { \"op\": \"echo\", \"params\": {} } send_request(json.dumps(request)) # 主函数 if", "\"get\", \"params\": { \"key\": key } } send_request(json.dumps(request)) # 退出 def send_exit_request(): request", "value } } send_request(json.dumps(request)) # 获取 def send_get_request(key): request = { \"op\": \"get\",", "import socket import json import sys # 发送请求 def send_request(request: str) -> str:", "\"op\": \"exit\", \"params\": {} } send_request(json.dumps(request)) # echo def send_echo_request(): request = {", "send_request(json.dumps(request)) # echo def send_echo_request(): request = { \"op\": \"echo\", \"params\": {} }", "{ \"op\": \"exit\", \"params\": {} } send_request(json.dumps(request)) # echo def send_echo_request(): request =", "response # 设置 def send_set_request(key, value): request = { \"op\": \"set\", \"params\": {", "# 命令行工具 import argparse import socket import json import sys # 发送请求 def", "} } send_request(json.dumps(request)) # 退出 def send_exit_request(): request = { \"op\": \"exit\", \"params\":", "== \"__main__\": if len(sys.argv) == 1: print(\"usage: cli.py <cmd> <params>\") else: try: cmd", "except IndexError: print(\"usage: cli.py set <key> <value>\") elif cmd == \"get\": try: send_get_request(params[0])", "创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求", "argparse import socket import json import sys # 发送请求 def send_request(request: str) ->", "key } } send_request(json.dumps(request)) # 退出 def send_exit_request(): request = { \"op\": \"exit\",", "IndexError: print(\"usage: cli.py get <key>\") elif cmd == \"exit\": send_exit_request() else: print(\"usage: cli.py", "# 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) # 接受回复 response = client.recv(65536).decode() #", "# 退出 def send_exit_request(): request = { \"op\": \"exit\", \"params\": {} } send_request(json.dumps(request))", "cmd == \"echo\": send_echo_request() elif cmd == \"set\": try: send_set_request(params[0], params[1]) except IndexError:", "send_request(request: str) -> str: # 创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) #", "#!/usr/bin/python3 # coding:utf-8 # 命令行工具 import argparse import socket import json import sys", "== \"exit\": send_exit_request() else: print(\"usage: cli.py <cmd> <params>\") except Exception as e: print(e)", "str) -> str: # 创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接", "接受回复 response = client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response))", "# 获取 def send_get_request(key): request = { \"op\": \"get\", \"params\": { \"key\": key", "# echo def send_echo_request(): request = { \"op\": \"echo\", \"params\": {} } send_request(json.dumps(request))", "if __name__ == \"__main__\": if len(sys.argv) == 1: print(\"usage: cli.py <cmd> <params>\") else:", "def send_exit_request(): request = { \"op\": \"exit\", \"params\": {} } send_request(json.dumps(request)) # echo", "# 接受回复 response = client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response:", "else: try: cmd = sys.argv[1] params = sys.argv[2:] if cmd == \"echo\": send_echo_request()", "1: print(\"usage: cli.py <cmd> <params>\") else: try: cmd = sys.argv[1] params = sys.argv[2:]", "# 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response # 设置 def send_set_request(key, value):", "连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) # 接受回复 response = client.recv(65536).decode() # 关闭套接字", "len(sys.argv) == 1: print(\"usage: cli.py <cmd> <params>\") else: try: cmd = sys.argv[1] params", "client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) # 接受回复 response = client.recv(65536).decode() # 关闭套接字 client.close()", "cli.py set <key> <value>\") elif cmd == \"get\": try: send_get_request(params[0]) except IndexError: print(\"usage:", "client.send(request.encode()) # 接受回复 response = client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request))", "{ \"key\": key, \"value\": value } } send_request(json.dumps(request)) # 获取 def send_get_request(key): request", "print(\"usage: cli.py <cmd> <params>\") else: try: cmd = sys.argv[1] params = sys.argv[2:] if", "\"echo\": send_echo_request() elif cmd == \"set\": try: send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py", "IndexError: print(\"usage: cli.py set <key> <value>\") elif cmd == \"get\": try: send_get_request(params[0]) except", "= { \"op\": \"exit\", \"params\": {} } send_request(json.dumps(request)) # echo def send_echo_request(): request", "send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py set <key> <value>\") elif cmd == \"get\":", "{} } send_request(json.dumps(request)) # 主函数 if __name__ == \"__main__\": if len(sys.argv) == 1:", "try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py get <key>\") elif cmd == \"exit\": send_exit_request()", "str: # 创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024))", "def send_get_request(key): request = { \"op\": \"get\", \"params\": { \"key\": key } }", "<key> <value>\") elif cmd == \"get\": try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py get", "= { \"op\": \"echo\", \"params\": {} } send_request(json.dumps(request)) # 主函数 if __name__ ==", "sys.argv[1] params = sys.argv[2:] if cmd == \"echo\": send_echo_request() elif cmd == \"set\":", "\"op\": \"get\", \"params\": { \"key\": key } } send_request(json.dumps(request)) # 退出 def send_exit_request():", "# 设置 def send_set_request(key, value): request = { \"op\": \"set\", \"params\": { \"key\":", "def send_set_request(key, value): request = { \"op\": \"set\", \"params\": { \"key\": key, \"value\":", "\"params\": {} } send_request(json.dumps(request)) # echo def send_echo_request(): request = { \"op\": \"echo\",", "= { \"op\": \"set\", \"params\": { \"key\": key, \"value\": value } } send_request(json.dumps(request))", "主函数 if __name__ == \"__main__\": if len(sys.argv) == 1: print(\"usage: cli.py <cmd> <params>\")", "send_echo_request() elif cmd == \"set\": try: send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py set", "params[1]) except IndexError: print(\"usage: cli.py set <key> <value>\") elif cmd == \"get\": try:", "print(\"usage: cli.py get <key>\") elif cmd == \"exit\": send_exit_request() else: print(\"usage: cli.py <cmd>", "\"op\": \"set\", \"params\": { \"key\": key, \"value\": value } } send_request(json.dumps(request)) # 获取", "params = sys.argv[2:] if cmd == \"echo\": send_echo_request() elif cmd == \"set\": try:", "} send_request(json.dumps(request)) # 主函数 if __name__ == \"__main__\": if len(sys.argv) == 1: print(\"usage:", "get <key>\") elif cmd == \"exit\": send_exit_request() else: print(\"usage: cli.py <cmd> <params>\") except", "{} } send_request(json.dumps(request)) # echo def send_echo_request(): request = { \"op\": \"echo\", \"params\":", "<cmd> <params>\") else: try: cmd = sys.argv[1] params = sys.argv[2:] if cmd ==", "client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response # 设置 def send_set_request(key,", "import argparse import socket import json import sys # 发送请求 def send_request(request: str)", "\"op\": \"echo\", \"params\": {} } send_request(json.dumps(request)) # 主函数 if __name__ == \"__main__\": if", "} send_request(json.dumps(request)) # echo def send_echo_request(): request = { \"op\": \"echo\", \"params\": {}", "json import sys # 发送请求 def send_request(request: str) -> str: # 创建套接字 client", "except IndexError: print(\"usage: cli.py get <key>\") elif cmd == \"exit\": send_exit_request() else: print(\"usage:", "key, \"value\": value } } send_request(json.dumps(request)) # 获取 def send_get_request(key): request = {", "send_request(json.dumps(request)) # 退出 def send_exit_request(): request = { \"op\": \"exit\", \"params\": {} }", "} send_request(json.dumps(request)) # 退出 def send_exit_request(): request = { \"op\": \"exit\", \"params\": {}", "== \"get\": try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py get <key>\") elif cmd ==", "socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) # 接受回复 response", "\"echo\", \"params\": {} } send_request(json.dumps(request)) # 主函数 if __name__ == \"__main__\": if len(sys.argv)", "\"set\", \"params\": { \"key\": key, \"value\": value } } send_request(json.dumps(request)) # 获取 def", "退出 def send_exit_request(): request = { \"op\": \"exit\", \"params\": {} } send_request(json.dumps(request)) #", "value): request = { \"op\": \"set\", \"params\": { \"key\": key, \"value\": value }", "cmd == \"set\": try: send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py set <key> <value>\")", "send_get_request(params[0]) except IndexError: print(\"usage: cli.py get <key>\") elif cmd == \"exit\": send_exit_request() else:", "import sys # 发送请求 def send_request(request: str) -> str: # 创建套接字 client =", "关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response # 设置 def", "\"exit\", \"params\": {} } send_request(json.dumps(request)) # echo def send_echo_request(): request = { \"op\":", "request = { \"op\": \"get\", \"params\": { \"key\": key } } send_request(json.dumps(request)) #", "{ \"key\": key } } send_request(json.dumps(request)) # 退出 def send_exit_request(): request = {", "def send_echo_request(): request = { \"op\": \"echo\", \"params\": {} } send_request(json.dumps(request)) # 主函数", "sys.argv[2:] if cmd == \"echo\": send_echo_request() elif cmd == \"set\": try: send_set_request(params[0], params[1])", "# coding:utf-8 # 命令行工具 import argparse import socket import json import sys #", "<value>\") elif cmd == \"get\": try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py get <key>\")", "send_request(json.dumps(request)) # 主函数 if __name__ == \"__main__\": if len(sys.argv) == 1: print(\"usage: cli.py", "__name__ == \"__main__\": if len(sys.argv) == 1: print(\"usage: cli.py <cmd> <params>\") else: try:", "socket import json import sys # 发送请求 def send_request(request: str) -> str: #", "elif cmd == \"get\": try: send_get_request(params[0]) except IndexError: print(\"usage: cli.py get <key>\") elif", "} } send_request(json.dumps(request)) # 获取 def send_get_request(key): request = { \"op\": \"get\", \"params\":", "if len(sys.argv) == 1: print(\"usage: cli.py <cmd> <params>\") else: try: cmd = sys.argv[1]", "return response # 设置 def send_set_request(key, value): request = { \"op\": \"set\", \"params\":", "client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode())", "coding:utf-8 # 命令行工具 import argparse import socket import json import sys # 发送请求", "= client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response", "cli.py <cmd> <params>\") else: try: cmd = sys.argv[1] params = sys.argv[2:] if cmd", "== 1: print(\"usage: cli.py <cmd> <params>\") else: try: cmd = sys.argv[1] params =", "\"set\": try: send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py set <key> <value>\") elif cmd", "socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) # 接受回复 response = client.recv(65536).decode()", "\"__main__\": if len(sys.argv) == 1: print(\"usage: cli.py <cmd> <params>\") else: try: cmd =", "socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) # 接受回复 response =", "-> str: # 创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\",", "def send_request(request: str) -> str: # 创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)", "命令行工具 import argparse import socket import json import sys # 发送请求 def send_request(request:", "import json import sys # 发送请求 def send_request(request: str) -> str: # 创建套接字", "request = { \"op\": \"exit\", \"params\": {} } send_request(json.dumps(request)) # echo def send_echo_request():", "<key>\") elif cmd == \"exit\": send_exit_request() else: print(\"usage: cli.py <cmd> <params>\") except Exception", "print(\"usage: cli.py set <key> <value>\") elif cmd == \"get\": try: send_get_request(params[0]) except IndexError:", "send_exit_request(): request = { \"op\": \"exit\", \"params\": {} } send_request(json.dumps(request)) # echo def", "{}\".format(request)) print(\"response: {}\".format(response)) return response # 设置 def send_set_request(key, value): request = {", "\"params\": { \"key\": key } } send_request(json.dumps(request)) # 退出 def send_exit_request(): request =", "== \"set\": try: send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py set <key> <value>\") elif", "cmd = sys.argv[1] params = sys.argv[2:] if cmd == \"echo\": send_echo_request() elif cmd", "\"params\": { \"key\": key, \"value\": value } } send_request(json.dumps(request)) # 获取 def send_get_request(key):", "{ \"op\": \"echo\", \"params\": {} } send_request(json.dumps(request)) # 主函数 if __name__ == \"__main__\":", "<params>\") else: try: cmd = sys.argv[1] params = sys.argv[2:] if cmd == \"echo\":", "print(\"response: {}\".format(response)) return response # 设置 def send_set_request(key, value): request = { \"op\":", "try: send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py set <key> <value>\") elif cmd ==", "= socket.socket( socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) # 连接 client.connect((\"127.0.0.1\", 1024)) # 发送请求 client.send(request.encode()) #", "response = client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return", "\"params\": {} } send_request(json.dumps(request)) # 主函数 if __name__ == \"__main__\": if len(sys.argv) ==", "== \"echo\": send_echo_request() elif cmd == \"set\": try: send_set_request(params[0], params[1]) except IndexError: print(\"usage:", "= sys.argv[1] params = sys.argv[2:] if cmd == \"echo\": send_echo_request() elif cmd ==", "# 发送请求 client.send(request.encode()) # 接受回复 response = client.recv(65536).decode() # 关闭套接字 client.close() # 打印操作结果", "print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response # 设置 def send_set_request(key, value): request =", "{}\".format(response)) return response # 设置 def send_set_request(key, value): request = { \"op\": \"set\",", "} send_request(json.dumps(request)) # 获取 def send_get_request(key): request = { \"op\": \"get\", \"params\": {", "# 关闭套接字 client.close() # 打印操作结果 print(\"request: {}\".format(request)) print(\"response: {}\".format(response)) return response # 设置", "<reponame>MurmurWheel/Raft<filename>cli.py #!/usr/bin/python3 # coding:utf-8 # 命令行工具 import argparse import socket import json import", "sys # 发送请求 def send_request(request: str) -> str: # 创建套接字 client = socket.socket(", "= sys.argv[2:] if cmd == \"echo\": send_echo_request() elif cmd == \"set\": try: send_set_request(params[0],", "发送请求 def send_request(request: str) -> str: # 创建套接字 client = socket.socket( socket.AF_INET, socket.SOCK_STREAM,", "elif cmd == \"set\": try: send_set_request(params[0], params[1]) except IndexError: print(\"usage: cli.py set <key>" ]
[ "положительных элементов кратных 11, их # количество и вывести результаты на экран. if", "# Ввести список А из 10 элементов, найти разность положительных элементов кратных 11,", "b == 0 and lst[i] % 11 == 0: b = lst[i] if", "i in range(10): print(\"Введите\", i+1, \"элемент\") lst[i] = int(input()) if lst[i] > 0:", "> 0: if b == 0 and lst[i] % 11 == 0: b", "0: b = lst[i] if lst[i] % 11 == 0: dif -= lst[i]", "dif -= lst[i] count += 1 print(\"Изначальный список: \", lst, \"разность положительных элементов", "экран. if __name__ == '__main__': lst = [0] * 10 count = 0", "python3 # -*- coding: utf-8 -*- # Ввести список А из 10 элементов,", "0 b = 0 for i in range(10): print(\"Введите\", i+1, \"элемент\") lst[i] =", "элементов, найти разность положительных элементов кратных 11, их # количество и вывести результаты", "* 10 count = 0 dif = 0 b = 0 for i", "+= 1 print(\"Изначальный список: \", lst, \"разность положительных элементов кратных 11:\", dif +", "lst[i] if lst[i] % 11 == 0: dif -= lst[i] count += 1", "0 for i in range(10): print(\"Введите\", i+1, \"элемент\") lst[i] = int(input()) if lst[i]", "% 11 == 0: dif -= lst[i] count += 1 print(\"Изначальный список: \",", "= 0 dif = 0 b = 0 for i in range(10): print(\"Введите\",", "print(\"Введите\", i+1, \"элемент\") lst[i] = int(input()) if lst[i] > 0: if b ==", "11, их # количество и вывести результаты на экран. if __name__ == '__main__':", "кратных 11, их # количество и вывести результаты на экран. if __name__ ==", "-= lst[i] count += 1 print(\"Изначальный список: \", lst, \"разность положительных элементов кратных", "for i in range(10): print(\"Введите\", i+1, \"элемент\") lst[i] = int(input()) if lst[i] >", "1 print(\"Изначальный список: \", lst, \"разность положительных элементов кратных 11:\", dif + (b", "из 10 элементов, найти разность положительных элементов кратных 11, их # количество и", "количество и вывести результаты на экран. if __name__ == '__main__': lst = [0]", "b = 0 for i in range(10): print(\"Введите\", i+1, \"элемент\") lst[i] = int(input())", "== 0: b = lst[i] if lst[i] % 11 == 0: dif -=", "lst[i] = int(input()) if lst[i] > 0: if b == 0 and lst[i]", "count += 1 print(\"Изначальный список: \", lst, \"разность положительных элементов кратных 11:\", dif", "0: dif -= lst[i] count += 1 print(\"Изначальный список: \", lst, \"разность положительных", "if __name__ == '__main__': lst = [0] * 10 count = 0 dif", "вывести результаты на экран. if __name__ == '__main__': lst = [0] * 10", "-*- coding: utf-8 -*- # Ввести список А из 10 элементов, найти разность", "и вывести результаты на экран. if __name__ == '__main__': lst = [0] *", "10 count = 0 dif = 0 b = 0 for i in", "lst[i] % 11 == 0: dif -= lst[i] count += 1 print(\"Изначальный список:", "lst[i] count += 1 print(\"Изначальный список: \", lst, \"разность положительных элементов кратных 11:\",", "[0] * 10 count = 0 dif = 0 b = 0 for", "Ввести список А из 10 элементов, найти разность положительных элементов кратных 11, их", "= [0] * 10 count = 0 dif = 0 b = 0", "0 dif = 0 b = 0 for i in range(10): print(\"Введите\", i+1,", "= int(input()) if lst[i] > 0: if b == 0 and lst[i] %", "\", lst, \"разность положительных элементов кратных 11:\", dif + (b * 2), \"количество\",", "результаты на экран. if __name__ == '__main__': lst = [0] * 10 count", "lst[i] > 0: if b == 0 and lst[i] % 11 == 0:", "= 0 b = 0 for i in range(10): print(\"Введите\", i+1, \"элемент\") lst[i]", "# -*- coding: utf-8 -*- # Ввести список А из 10 элементов, найти", "if lst[i] > 0: if b == 0 and lst[i] % 11 ==", "<reponame>Time2003/lr7 #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Ввести список А из", "их # количество и вывести результаты на экран. if __name__ == '__main__': lst", "\"элемент\") lst[i] = int(input()) if lst[i] > 0: if b == 0 and", "if b == 0 and lst[i] % 11 == 0: b = lst[i]", "элементов кратных 11, их # количество и вывести результаты на экран. if __name__", "count = 0 dif = 0 b = 0 for i in range(10):", "== 0 and lst[i] % 11 == 0: b = lst[i] if lst[i]", "lst, \"разность положительных элементов кратных 11:\", dif + (b * 2), \"количество\", count)", "11 == 0: dif -= lst[i] count += 1 print(\"Изначальный список: \", lst,", "10 элементов, найти разность положительных элементов кратных 11, их # количество и вывести", "lst = [0] * 10 count = 0 dif = 0 b =", "= 0 for i in range(10): print(\"Введите\", i+1, \"элемент\") lst[i] = int(input()) if", "if lst[i] % 11 == 0: dif -= lst[i] count += 1 print(\"Изначальный", "11 == 0: b = lst[i] if lst[i] % 11 == 0: dif", "int(input()) if lst[i] > 0: if b == 0 and lst[i] % 11", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Ввести список А из 10", "0 and lst[i] % 11 == 0: b = lst[i] if lst[i] %", "and lst[i] % 11 == 0: b = lst[i] if lst[i] % 11", "__name__ == '__main__': lst = [0] * 10 count = 0 dif =", "-*- # Ввести список А из 10 элементов, найти разность положительных элементов кратных", "== '__main__': lst = [0] * 10 count = 0 dif = 0", "coding: utf-8 -*- # Ввести список А из 10 элементов, найти разность положительных", "% 11 == 0: b = lst[i] if lst[i] % 11 == 0:", "список: \", lst, \"разность положительных элементов кратных 11:\", dif + (b * 2),", "== 0: dif -= lst[i] count += 1 print(\"Изначальный список: \", lst, \"разность", "список А из 10 элементов, найти разность положительных элементов кратных 11, их #", "utf-8 -*- # Ввести список А из 10 элементов, найти разность положительных элементов", "range(10): print(\"Введите\", i+1, \"элемент\") lst[i] = int(input()) if lst[i] > 0: if b", "i+1, \"элемент\") lst[i] = int(input()) if lst[i] > 0: if b == 0", "in range(10): print(\"Введите\", i+1, \"элемент\") lst[i] = int(input()) if lst[i] > 0: if", "lst[i] % 11 == 0: b = lst[i] if lst[i] % 11 ==", "разность положительных элементов кратных 11, их # количество и вывести результаты на экран.", "на экран. if __name__ == '__main__': lst = [0] * 10 count =", "print(\"Изначальный список: \", lst, \"разность положительных элементов кратных 11:\", dif + (b *", "# количество и вывести результаты на экран. if __name__ == '__main__': lst =", "b = lst[i] if lst[i] % 11 == 0: dif -= lst[i] count", "= lst[i] if lst[i] % 11 == 0: dif -= lst[i] count +=", "А из 10 элементов, найти разность положительных элементов кратных 11, их # количество", "найти разность положительных элементов кратных 11, их # количество и вывести результаты на", "dif = 0 b = 0 for i in range(10): print(\"Введите\", i+1, \"элемент\")", "0: if b == 0 and lst[i] % 11 == 0: b =", "'__main__': lst = [0] * 10 count = 0 dif = 0 b" ]
[ "self.data_store = data_store def get_type(self): return self.type def get_tables(self): return [] def has_table(self,", "def get_type(self): return self.type def get_tables(self): return [] def has_table(self, tableName): return True", "tableName): return True def get_table_columns(self, tableName): return [] def select(self, query): sql_query =", "True def get_table_columns(self, tableName): return [] def select(self, query): sql_query = str(query) dso,", "select(self, query): sql_query = str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data =", "in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec in enumerate(data): rec[column_name]", "__init__(self, integration_name, data_store): self.integration_name = integration_name self.data_store = data_store def get_type(self): return self.type", "data_store def get_type(self): return self.type def get_tables(self): return [] def has_table(self, tableName): return", "import pandas as pd from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode class IntegrationDataNode(DataNode): type = 'integration'", "integration_name, data_store): self.integration_name = integration_name self.data_store = data_store def get_type(self): return self.type def", "IntegrationDataNode(DataNode): type = 'integration' def __init__(self, integration_name, data_store): self.integration_name = integration_name self.data_store =", "'integration' def __init__(self, integration_name, data_store): self.integration_name = integration_name self.data_store = data_store def get_type(self):", "data_store): self.integration_name = integration_name self.data_store = data_store def get_type(self): return self.type def get_tables(self):", "def get_table_columns(self, tableName): return [] def select(self, query): sql_query = str(query) dso, _creation_info", "column_names = list(dso.df.columns) for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for", "= 'integration' def __init__(self, integration_name, data_store): self.integration_name = integration_name self.data_store = data_store def", "query): sql_query = str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records')", "self.integration_name = integration_name self.data_store = data_store def get_type(self): return self.type def get_tables(self): return", "type = 'integration' def __init__(self, integration_name, data_store): self.integration_name = integration_name self.data_store = data_store", "[] def has_table(self, tableName): return True def get_table_columns(self, tableName): return [] def select(self,", "tableName): return [] def select(self, query): sql_query = str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name,", "dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for", "if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec in enumerate(data): rec[column_name] = pass_data[i].timestamp()", "as pd from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode class IntegrationDataNode(DataNode): type = 'integration' def __init__(self,", "return [] def has_table(self, tableName): return True def get_table_columns(self, tableName): return [] def", "pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec in enumerate(data): rec[column_name] = pass_data[i].timestamp() return data,", "DataNode class IntegrationDataNode(DataNode): type = 'integration' def __init__(self, integration_name, data_store): self.integration_name = integration_name", "self.type def get_tables(self): return [] def has_table(self, tableName): return True def get_table_columns(self, tableName):", "data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data", "return self.type def get_tables(self): return [] def has_table(self, tableName): return True def get_table_columns(self,", "= str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records') column_names =", "list(dso.df.columns) for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec", "from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode class IntegrationDataNode(DataNode): type = 'integration' def __init__(self, integration_name, data_store):", "mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode class IntegrationDataNode(DataNode): type = 'integration' def __init__(self, integration_name, data_store): self.integration_name", "= dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data =", "def has_table(self, tableName): return True def get_table_columns(self, tableName): return [] def select(self, query):", "for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec in", "integration_name self.data_store = data_store def get_type(self): return self.type def get_tables(self): return [] def", "import DataNode class IntegrationDataNode(DataNode): type = 'integration' def __init__(self, integration_name, data_store): self.integration_name =", "def select(self, query): sql_query = str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data", "sql_query = str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records') column_names", "= dso.df[column_name].dt.to_pydatetime() for i, rec in enumerate(data): rec[column_name] = pass_data[i].timestamp() return data, column_names", "column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec in enumerate(data):", "pd from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode class IntegrationDataNode(DataNode): type = 'integration' def __init__(self, integration_name,", "sql_query}) data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):", "_creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name", "str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns)", "return [] def select(self, query): sql_query = str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query':", "= self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name in", "{'query': sql_query}) data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name in column_names: if", "pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec in enumerate(data): rec[column_name] = pass_data[i].timestamp() return", "dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime()", "def __init__(self, integration_name, data_store): self.integration_name = integration_name self.data_store = data_store def get_type(self): return", "get_tables(self): return [] def has_table(self, tableName): return True def get_table_columns(self, tableName): return []", "self.data_store.create_datasource(self.integration_name, {'query': sql_query}) data = dso.df.to_dict(orient='records') column_names = list(dso.df.columns) for column_name in column_names:", "= list(dso.df.columns) for column_name in column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i,", "return True def get_table_columns(self, tableName): return [] def select(self, query): sql_query = str(query)", "def get_tables(self): return [] def has_table(self, tableName): return True def get_table_columns(self, tableName): return", "get_type(self): return self.type def get_tables(self): return [] def has_table(self, tableName): return True def", "class IntegrationDataNode(DataNode): type = 'integration' def __init__(self, integration_name, data_store): self.integration_name = integration_name self.data_store", "column_names: if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]): pass_data = dso.df[column_name].dt.to_pydatetime() for i, rec in enumerate(data): rec[column_name] =", "= data_store def get_type(self): return self.type def get_tables(self): return [] def has_table(self, tableName):", "pandas as pd from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode class IntegrationDataNode(DataNode): type = 'integration' def", "[] def select(self, query): sql_query = str(query) dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query})", "= integration_name self.data_store = data_store def get_type(self): return self.type def get_tables(self): return []", "has_table(self, tableName): return True def get_table_columns(self, tableName): return [] def select(self, query): sql_query", "get_table_columns(self, tableName): return [] def select(self, query): sql_query = str(query) dso, _creation_info =" ]
[ "by the current model fit \"\"\" #Check result super().eval(a0, a1) #Structure input and", "value. drop_missing : bool, optional Only include documents who have associated data for", "#Clear cache self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\" Estimate a point based on", "an Event x : str, optional the name of a data field in", ": All of the keys the models will need to evaluate Returns -------", "If 'unix', t=0 is 1 Jan 1970 (\"the UNIX epoch\"). Default is 'run'.", "''.format(key, e)) #Summarize return all(resp) def rank_models(models, target, **kwargs): \"\"\" Rank a list", "in every Event. goal : float the target pixel tolerance : float, optional", "{} and kwargs {}\" \"\".format(self.name, target, kwargs)) if not self.result: raise RuntimeError(\"Can not", "Party # ############### import lmfit import pandas as pd import numpy as np", "of each model for model in models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model", "optional passed to Axes.set_xlim ylim : tuple, optional passed to Axes.set_ylim ax :", "or \"nan\" == doc[key].lower(): resp.append(not drop_missing) continue #Handle all other types else: if", "failed to make an estimate return [model for model in model_ranking if model", "extract from the RunStart document and format in the legend of the plot.", "optional Fix the second mirror in the system Returns ------- angles : dict", "Tuple fo the mirror pitches (a1, a2) init_guess : dict, optional Initialization guess", "``y`` value Returns ------- x : dict Variable name and floating value \"\"\"", "all provided filters Example ------ ..code:: apply_filters(doc, filters = {'a' : lambda x", "filters for key, func in filters.items(): try: #Check iterables for nan and inf", "d in self._avg_cache]) #Send to callback super().event(doc) #Clear cache self._avg_cache.clear() def eval(self, *args,", "If set to None, the model will only be computed at the end", "is specified, every point is rendered as they come, otherwise the graph will", "super().event(doc) self.event_count += 1 def update_plot(self, force=False): if self.averages is None or (self.averages", "plot. The legend will always show the scan_id followed by a colon (\"1:", "= self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc): super().event(doc) self.event_count += 1 def update_plot(self,", "request if not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one of the mirror positions", "types else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle", "includes events missing the key entirely, reporting NaN or reporting Inf. Returns -------", "centroid position of two mirror system Parameters ---------- centroid : str Keyword in", "update_every=1, name=None, average=1): #Create model model = LinearModel(missing='drop', name=name) #Initialize parameters init =", "elif self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword `x`", "variable axis and a depended variable Parameters ---------- y : str Keyword in", "-= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self,", "None is specified, every point is rendered as they come, otherwise the graph", "def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title", "resp = [] filters = filters or dict() #Iterate through filters for key,", "(\"the UNIX epoch\"). Default is 'run'. kwargs : All additional keyword arguments are", "init_guess : dict, optional Initialization guess for the linear fit, available keys are", ": All additional keyword arguments are passed through to ``Axes.plot``. Notes ----- If", "takes precedence over the standard 'seq_num' and 'time' recorded in every Event. goal", "variables that create the requested dependent variable ..note:: For multivariable functions the user", "= ['a0', 'a1'], missing='drop') #Initialize parameters init = {'x0' : 0, 'x1': 0,", "def eval(self, *args, **kwargs): \"\"\" Estimate a point based on the current fit", "= len(self.ydata) +1 #Rewrite document with averages for key in self.field_names: doc['data'][key] =", "for each filter key. This includes events missing the key entirely, reporting NaN", "Path ############### # Third Party # ############### import lmfit import pandas as pd", "models based on the accuracy of their prediction Parameters ---------- models : list", "the user may have to specify which variable to solve for, and which", "\"\"\" Base class for live model building in Skywalker Parameters ---------- model :", "for the indepenedent variable can also be given as the field name in", "over the standard 'seq_num' and 'time' recorded in every Event. goal : float", "# ########## from .utils.argutils import isiterable logger = logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True):", "every point is rendered as they come, otherwise the graph will update every", "same as the model function update_every : int or None, optional Update rate", "for live model building in Skywalker Parameters ---------- model : lmfit.Model y: string", "averages for key in self.field_names: doc['data'][key] = np.mean([d['data'][key] for d in self._avg_cache]) #Send", "bad_models = list() #Calculate error of each model for model in models: try:", "yield estimate from model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove", "system a1 : float, optional Fix the second mirror in the system Returns", "information. Parameters ---------- x : float or int, optional Independent variable to evaluate", "the mirror positions \"\\ \"must be specified to backsolve for the target\") #Gather", "float Actual value of target kwargs : All of the keys the models", "#Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None): \"\"\" Find the mirror", "improper filter except Exception as e: logger.critical('Filter associated with event_key {}'\\ 'reported exception", "session. Examples -------- >>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>>", "the event document Returns ------- estimate : float Y value as determined by", "self._avg_cache = list() @property def name(self): \"\"\" Name of the model \"\"\" return", "model {} for target {} and kwargs {}\" \"\".format(self.name, target, kwargs)) if not", "the standard 'seq_num' and 'time' recorded in every Event. goal : float the", "{}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models who failed to", ": bool, optional Only include documents who have associated data for each filter", "\"\"\" Install additional filters Parameters ---------- filters : dict Filters are provided in", "to Axes.set_ylim ax : Axes, optional matplotib Axes; if none specified, new figure", "of keys to extract from the RunStart document and format in the legend", "sjson from pathlib import Path ############### # Third Party # ############### import lmfit", "------- resp : bool Whether the event passes all provided filters Example ------", "event document stream init_guess: dict, optional Initial guesses for other values if expected", "key and solvable value \"\"\" #Make sure we have a fit super().backsolve(target, a0=a0,", "Returns ------- estimate : float Y value as determined by current linear fit", "b) = (self.result.values['slope'], self.result.values['intercept']) #Return x position if m == 0 and b", "Standard # ############ import logging import simplejson as sjson from pathlib import Path", "#Simple model of two-bounce system def two_bounce(a0, a1, x0, x1, x2): return x0", "goal = np.asarray(self.goal_data) distance = 2 if self.averages is None else self.averages+1 if", "logger = logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an event document Parameters", "super().backsolve(target, **kwargs) #Gather line information (m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return x position", "numpy as np from lmfit.models import LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase,", "{'a0' : np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self,", "= 2 if self.averages is None else self.averages+1 if force: distance -= 1", "is a multivariable function you must fix one of the mirrors in place,", "############ # Standard # ############ import logging import simplejson as sjson from pathlib", "have to specify which variable to solve for, and which to keep fixed", "a dictionary of key / callable pairs that take a single input from", "solvable value \"\"\" #Make sure we have a fit super().backsolve(target, a0=a0, a1=a1) #Check", "{} self.drop_missing = drop_missing self._avg_cache = list() @property def name(self): \"\"\" Name of", "valid request if not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one of the mirror", "isiterable logger = logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an event document", "\"\"\" ############ # Standard # ############ import logging import simplejson as sjson from", "of the second mirror Returns ------- centroid : float Position of the centroid", "estimate from model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models", "outcome based on the most recent fit of the given information. Parameters ----------", "t=0 is 1 Jan 1970 (\"the UNIX epoch\"). Default is 'run'. kwargs :", "to average if len(self._avg_cache) >= self.average: #Overwrite event number #This can be removed", "goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x,", "to average cache self._avg_cache.append(doc) #Check we have the right number of shots to", "pitches (a1, a2) init_guess : dict, optional Initialization guess for the linear fit,", "backsolve without a saved fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to", "a0=0., a1=0., **kwargs): \"\"\" Evaluate the predicted outcome based on the most recent", "#Handle missing information except KeyError: resp.append(not drop_missing) #Handle improper filter except Exception as", "have the right number of shots to average if len(self._avg_cache) >= self.average: #Overwrite", "pathlib import Path ############### # Third Party # ############### import lmfit import pandas", "target kwargs : All of the keys the models will need to evaluate", "x0 - a0*x1)/ x2, 'a0' : a0} else: return {'a0' : (target -", "stop(self, doc): # Ensure that the last events are plotted # Only necessary", "doc : dict Bluesky Document to filter filters : dict Filters are provided", "two mirror system Parameters ---------- centroid : str Keyword in the event document", "data stream and return a boolean value. drop_missing : bool, optional Only include", "1 i.e update on every new event \"\"\" def __init__(self, model, y, independent_vars,", "x2, 'a0' : a0} else: return {'a0' : (target - x0 - a1*x2)/", "backsolve for the target\") #Gather fit information (x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'],", "{'x0' : 0, 'x1': 0, 'x2' : 0} if init_guess: init.update(init_guess) #Initialize fit", "2 if self.averages is None else self.averages+1 if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:],", "pd import numpy as np from lmfit.models import LinearModel from bluesky.callbacks import (LiveFit,", "target : float Actual value of target kwargs : All of the keys", "the model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model {} with args : {},", "in the event document that reports centroid position alphas : tuple of str", "other values if expected update_every : int or None, optional Update rate of", "model of two-bounce system def two_bounce(a0, a1, x0, x1, x2): return x0 +", "ax : Axes, optional matplotib Axes; if none specified, new figure and axes", ": float Position of the centroid as predicted by the current model fit", "for model in model_ranking if model not in bad_models] class LiveBuild(LiveFit): \"\"\" Base", "recent fit of the given information. Parameters ---------- x : float or int,", "#Handle all other types else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate", "self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\" Find the ``x`` position that solves the", "'a0' : a0} else: return {'a0' : (target - x0 - a1*x2)/ x1,", "legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None,", "resp.append(not drop_missing) continue #Handle all other types else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not", "a data field in an Event x : str, optional the name of", "apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an event document Parameters ---------- doc : dict", "super().eval(**kwargs) #Standard x setup if kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x'] in kwargs.keys():", "of the given information Parameters ---------- a0 : float Pitch of the first", "event document that reports centroid position alphas : tuple of str Tuple fo", "certain pixel value Because this is a multivariable function you must fix one", "for valid request if not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one of the", "every Event. goal : float the target pixel tolerance : float, optional the", "label='Target') super().start(doc) def event(self, doc): super().event(doc) self.event_count += 1 def update_plot(self, force=False): if", "of key / callable pairs that take a single input from the data", "return #Add doc to average cache self._avg_cache.append(doc) #Check we have the right number", "update on every new event \"\"\" def __init__(self, centroid, alphas, name=None, init_guess=None, update_every=1,", "{} predicted a value of {}\" \"\".format(model.name, estimate)) except RuntimeError as e: bad_models.append(model)", "Returns ------- model_ranking : list List of models sorted by accuracy of predictions", "eval(self, **kwargs): \"\"\" Evaluate the predicted outcome based on the most recent fit", "with the variable mirror key and solvable value \"\"\" #Make sure we have", "arguments are passed through to ``Axes.plot``. Notes ----- If your figure blocks the", "of the model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model {} with args :", "np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\" Find the", "x, init_guess=None, update_every=1, name=None, average=1): #Create model model = LinearModel(missing='drop', name=name) #Initialize parameters", "performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models who failed to make an estimate return", "########## # Module # ########## from .utils.argutils import isiterable logger = logging.getLogger(__name__) def", "# ############### import lmfit import pandas as pd import numpy as np from", "+1 #Rewrite document with averages for key in self.field_names: doc['data'][key] = np.mean([d['data'][key] for", "{'a1' : (target - x0 - a0*x1)/ x2, 'a0' : a0} else: return", "Examples -------- >>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan,", "Specialized Callbacks for Skywalker \"\"\" ############ # Standard # ############ import logging import", "return a boolean value. drop_missing : bool, optional Only include documents who have", "averages self.event_count = 0 def start(self, doc): self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--',", "document that reports the independent variable init_guess : dict, optional Initialization guess for", "figure blocks the main thread when you are trying to scan with this", "independent variables that create the requested dependent variable ..note:: For multivariable functions the", "return self.model.name @property def field_names(self): \"\"\" Name of all the keys associated with", "to evaluate Returns ------- model_ranking : list List of models sorted by accuracy", "of the mirrors in place, while the other one is solved for. Parameters", "optional The number of images to average. If None is specified, every point", "Parameters ---------- model : lmfit.Model y: string Key of dependent variable indpendent_vars :", "'run'. kwargs : All additional keyword arguments are passed through to ``Axes.plot``. Notes", "super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self,", "name : optional , str Name for the contained model. When None (default)", "by subclasses \"\"\" logger.debug(\"Evaluating model {} with args : {}, kwargs {}\" \"\".format(self.name,", "#Make sure we have a fit super().backsolve(target, a0=a0, a1=a1) #Check for valid request", "every new event \"\"\" def __init__(self, model, y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True,", "for the linear fit, available keys are be ``x0``, ``x1``, and ``x2`` name", "x > 0, 'c' : lambda x : 4 < x < 6})", "class LinearFit(LiveBuild): \"\"\" Model to fit a linear relationship between a single variable", "\"\"\" Find the mirror configuration to reach a certain pixel value Because this", "i.e update on every new event \"\"\" def __init__(self, model, y, independent_vars, init_guess=None,", "computed at the end of the run. By default, this is set to", "of dependent variable indpendent_vars : dict Map independent variables names to keys in", ": float Actual value of target kwargs : All of the keys the", "\"\"\" logger.debug(\"Evaluating model {} with args : {}, kwargs {}\" \"\".format(self.name, args, kwargs))", "x : x > 0, 'c' : lambda x : 4 < x", "fit to find the independent variables that create the requested dependent variable ..note::", "right=None, auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc): #", "average=1): #Create model model = LinearModel(missing='drop', name=name) #Initialize parameters init = {'slope' :", "list List of models sorted by accuracy of predictions \"\"\" #Initialize values model_ranking", "Third Party # ############### import lmfit import pandas as pd import numpy as", "model = LinearModel(missing='drop', name=name) #Initialize parameters init = {'slope' : 0, 'intercept' :", "indepenedent variable can also be given as the field name in the event", "Pitch of the first mirror a1 : float Pitch of the second mirror", "one of the mirrors in place, while the other one is solved for.", "point based on the current fit of the model. Reimplemented by subclasses \"\"\"", "specified, new figure and axes are made. fig : Figure, optional deprecated: use", "the event document that reports centroid position alphas : tuple of str Tuple", "is rendered as they come, otherwise the graph will update every ```averages``` points.", "if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None,", "in self.field_names: doc['data'][key] = np.mean([d['data'][key] for d in self._avg_cache]) #Send to callback super().event(doc)", "# Standard # ############ import logging import simplejson as sjson from pathlib import", ": alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self, a0=0., a1=0., **kwargs):", "to backsolve, because fit is horizontal \" \" after {} data points\".format(len(self.ydata))) return", "Variable name and floating value \"\"\" #Make sure we have a fit super().backsolve(target,", "a1 : float, optional Fix the second mirror in the system Returns -------", "to 1 i.e update on every new event \"\"\" def __init__(self, centroid, alphas,", ": str, optional the name of a data field in an Event, or", "x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value if a0: return {'a1' :", "list of keys to extract from the RunStart document and format in the", ": float Desired pixel location a0 : float, optional Fix the first mirror", ": list List of models to evaluate target : float Actual value of", "Base class for live model building in Skywalker Parameters ---------- model : lmfit.Model", "from a stream of Events. Parameters ---------- y : str the name of", "'seq_num' or 'time' If None, use the Event's sequence number. Special case: If", "stream of Events. Parameters ---------- y : str the name of a data", "tuple of str Tuple fo the mirror pitches (a1, a2) init_guess : dict,", "tolerance self.averages = averages self.event_count = 0 def start(self, doc): self.goal_data = []", "a0=None, a1=None): \"\"\" Find the mirror configuration to reach a certain pixel value", "to solve for, and which to keep fixed \"\"\" logger.debug(\"Backsolving model {} for", "in an Event x : str, optional the name of a data field", "<filename>pswalker/callbacks.py \"\"\" Specialized Callbacks for Skywalker \"\"\" ############ # Standard # ############ import", "Example ------ ..code:: apply_filters(doc, filters = {'a' : lambda x : x >", "mirror a1 : float Pitch of the second mirror Returns ------- centroid :", "- a1*x2)/ x1, 'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a function that", "kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword `x` or use fieldname", "import simplejson as sjson from pathlib import Path ############### # Third Party #", "name and floating value \"\"\" #Make sure we have a fit super().backsolve(target, **kwargs)", "any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string entries for nan and inf elif isinstance(doc[key],", "for nan and inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue", "use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add past result kwargs = {'x' :", "if not self.result: raise RuntimeError(\"Can not backsolve without a saved fit, \"\\ \"use", "update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys self.average", "def install_filters(self, filters): \"\"\" Install additional filters Parameters ---------- filters : dict Filters", "\"\"\" Use the most recent fit to find the independent variables that create", "a stream of Events. Parameters ---------- y : str the name of a", "{}, kwargs {}\" \"\".format(self.name, args, kwargs)) if not self.result: raise RuntimeError(\"Can not evaluate", "additional keyword arguments are passed through to ``Axes.plot``. Notes ----- If your figure", "of the centroid as predicted by the current model fit \"\"\" #Check result", "b != target: raise ValueError(\"Unable to backsolve, because fit is horizontal \" \"", "keep fixed \"\"\" logger.debug(\"Backsolving model {} for target {} and kwargs {}\" \"\".format(self.name,", "{} with args : {}, kwargs {}\" \"\".format(self.name, args, kwargs)) if not self.result:", "apply_filters(doc, filters = {'a' : lambda x : x > 0, 'c' :", "a0=a0, a1=a1) #Check for valid request if not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly", "new event \"\"\" def __init__(self, y, x, init_guess=None, update_every=1, name=None, average=1): #Create model", "by a colon (\"1: \"). Each xlim : tuple, optional passed to Axes.set_xlim", "the end of the run. By default, this is set to 1 i.e", "``x0``, ``x1``, and ``x2`` name : optional , str Name for the contained", "['a0', 'a1'], missing='drop') #Initialize parameters init = {'x0' : 0, 'x1': 0, 'x2'", "the other one is solved for. Parameters ---------- target : float Desired pixel", "Fix the second mirror in the system Returns ------- angles : dict Dictionary", "= np.mean([d['data'][key] for d in self._avg_cache]) #Send to callback super().event(doc) #Clear cache self._avg_cache.clear()", "when we are grouping the points if self.averages is not None: self.update_plot(force=True) super().stop(doc)", "at the end of the run. By default, this is set to 1", "the indepenedent variable can also be given as the field name in the", "UNIX epoch\"). Default is 'run'. kwargs : All additional keyword arguments are passed", "mirror configuration to reach a certain pixel value Because this is a multivariable", "float Desired pixel location a0 : float, optional Fix the first mirror in", "that reports centroid position alphas : tuple of str Tuple fo the mirror", "have a fit super().backsolve(target, **kwargs) #Gather line information (m, b) = (self.result.values['slope'], self.result.values['intercept'])", "the given target Parameters ---------- target : float Desired ``y`` value Returns -------", "a1=a1) #Check for valid request if not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one", "return {'a1' : (target - x0 - a0*x1)/ x2, 'a0' : a0} else:", "\"\"\" Find the ``x`` position that solves the reaches the given target Parameters", "to Axes.set_xlim ylim : tuple, optional passed to Axes.set_ylim ax : Axes, optional", "tolerance for the pixel averages : float, optional The number of images to", "Axes.set_ylim ax : Axes, optional matplotib Axes; if none specified, new figure and", "value \"\"\" #Make sure we have a fit super().backsolve(target, a0=a0, a1=a1) #Check for", "between a single variable axis and a depended variable Parameters ---------- y :", "**kwargs): \"\"\" Evaluate the predicted outcome based on the most recent fit of", "**kwargs) #Gather line information (m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return x position if", "'x1': 0, 'x2' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0'", "or 'seq_num' or 'time' If None, use the Event's sequence number. Special case:", "and a depended variable Parameters ---------- y : str Keyword in the event", "available keys are be ``x0``, ``x1``, and ``x2`` name : optional , str", "is the time recorded in the RunStart document. If 'unix', t=0 is 1", "Find the ``x`` position that solves the reaches the given target Parameters ----------", "data stream and return a boolean value. \"\"\" self.filters.update(filters) def event(self, doc): #Run", "to scan with this callback, call `plt.ion()` in your IPython session. Examples --------", "self.goal_data) goal = np.asarray(self.goal_data) distance = 2 if self.averages is None else self.averages+1", "x setup if kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x =", "fo the mirror pitches (a1, a2) init_guess : dict, optional Initialization guess for", "current fit of the model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model {} with", "of the mirror positions \"\\ \"must be specified to backsolve for the target\")", "format in the legend of the plot. The legend will always show the", "(self.result.values['slope'], self.result.values['intercept']) #Return x position if m == 0 and b != target:", "Parameters ---------- filters : dict Filters are provided in a dictionary of key", "{}\" \"\".format(self.name, target, kwargs)) if not self.result: raise RuntimeError(\"Can not backsolve without a", "\"\".format(self.name, target, kwargs)) if not self.result: raise RuntimeError(\"Can not backsolve without a saved", "and ``x2`` name : optional , str Name for the contained model. When", "filters = filters or dict() #Iterate through filters for key, func in filters.items():", "= list() #Calculate error of each model for model in models: try: estimate", "mirror positions \"\\ \"must be specified to backsolve for the target\") #Gather fit", "RunStart document. If 'unix', t=0 is 1 Jan 1970 (\"the UNIX epoch\"). Default", "import pandas as pd import numpy as np from lmfit.models import LinearModel from", "drop_missing : bool, optional Only include documents who have associated data for each", "x, **kwargs) self.legend_title = None self.goal = goal self.tolerance = tolerance self.averages =", "filters): \"\"\" Install additional filters Parameters ---------- filters : dict Filters are provided", "keys associated with the fit \"\"\" return [self.y] + list(self.independent_vars.values()) def install_filters(self, filters):", "model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models who failed", "y) def stop(self, doc): # Ensure that the last events are plotted #", "value of target kwargs : All of the keys the models will need", "self.event_count = 0 def start(self, doc): self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target')", "a depended variable Parameters ---------- y : str Keyword in the event document", "passed through to ``Axes.plot``. Notes ----- If your figure blocks the main thread", "y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc): # Ensure that the last events", "none specified, new figure and axes are made. fig : Figure, optional deprecated:", "epoch\"). Default is 'run'. kwargs : All additional keyword arguments are passed through", "outcome based on the most recent fit of the given information Parameters ----------", "Events. Parameters ---------- y : str the name of a data field in", "of the first mirror a1 : float Pitch of the second mirror Returns", "class MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid position of two mirror system Parameters", "self._avg_cache]) #Send to callback super().event(doc) #Clear cache self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\"", "in models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value of", "All additional keyword arguments are passed through to ``Axes.plot``. Notes ----- If your", "matplotib Axes; if none specified, new figure and axes are made. fig :", "of {}\" \"\".format(model.name, estimate)) except RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield", "associated with event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize return all(resp) def", "x1, x2): return x0 + a0*x1 + a1*x2 #Create model model = lmfit.Model(two_bounce,", "lmfit.models import LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## # Module", "based on the most recent fit of the given information. Parameters ---------- x", "target : float Desired pixel location a0 : float, optional Fix the first", "y, x, init_guess=None, update_every=1, name=None, average=1): #Create model model = LinearModel(missing='drop', name=name) #Initialize", "the run. By default, this is set to 1 i.e update on every", "alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate the predicted", "backsolve(self, target, a0=None, a1=None): \"\"\" Find the mirror configuration to reach a certain", "str Keyword in the event document that reports centroid position alphas : tuple", "come, otherwise the graph will update every ```averages``` points. legend_keys : list, optional", "string Key of dependent variable indpendent_vars : dict Map independent variables names to", "the models will need to evaluate Returns ------- model_ranking : list List of", "fixed \"\"\" logger.debug(\"Backsolving model {} for target {} and kwargs {}\" \"\".format(self.name, target,", "for Skywalker \"\"\" ############ # Standard # ############ import logging import simplejson as", "result kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self,", "through filters for key, func in filters.items(): try: #Check iterables for nan and", "average if len(self._avg_cache) >= self.average: #Overwrite event number #This can be removed with", "of Events. Parameters ---------- y : str the name of a data field", "list, optional The list of keys to extract from the RunStart document and", ": np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None): \"\"\"", "{} for target {} and kwargs {}\" \"\".format(self.name, target, kwargs)) if not self.result:", "rendered as they come, otherwise the graph will update every ```averages``` points. legend_keys", "and format in the legend of the plot. The legend will always show", "fit a linear relationship between a single variable axis and a depended variable", "that takes precedence over the standard 'seq_num' and 'time' recorded in every Event.", "by current linear fit \"\"\" #Check result super().eval(**kwargs) #Standard x setup if kwargs.get('x'):", "an update to Bluesky Issue #684 doc['seq_num'] = len(self.ydata) +1 #Rewrite document with", "if kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']] else:", "or dict() #Iterate through filters for key, func in filters.items(): try: #Check iterables", "= np.asarray(models) diffs = list() bad_models = list() #Calculate error of each model", "model not in bad_models] class LiveBuild(LiveFit): \"\"\" Base class for live model building", "passed to Axes.set_xlim ylim : tuple, optional passed to Axes.set_ylim ax : Axes,", "import Path ############### # Third Party # ############### import lmfit import pandas as", "filters : dict Filters are provided in a dictionary of key / callable", "the same as the model function update_every : int or None, optional Update", "The list of keys to extract from the RunStart document and format in", "Jan 1970 (\"the UNIX epoch\"). Default is 'run'. kwargs : All additional keyword", "callable pairs that take a single input from the data stream and return", "second mirror in the system Returns ------- angles : dict Dictionary with the", "number #This can be removed with an update to Bluesky Issue #684 doc['seq_num']", "y: string Key of dependent variable indpendent_vars : dict Map independent variables names", "kwargs {}\" \"\".format(self.name, args, kwargs)) if not self.result: raise RuntimeError(\"Can not evaluate without", "x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword `x` or use fieldname {}\"", "= np.asarray(self.goal_data) distance = 2 if self.averages is None else self.averages+1 if force:", "alphas : tuple of str Tuple fo the mirror pitches (a1, a2) init_guess", "----- If your figure blocks the main thread when you are trying to", "also be given as the field name in the event document Returns -------", "information (m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return x position if m == 0", "filters or {} self.drop_missing = drop_missing self._avg_cache = list() @property def name(self): \"\"\"", "to extract from the RunStart document and format in the legend of the", "position if m == 0 and b != target: raise ValueError(\"Unable to backsolve,", "field in an Event, or 'seq_num' or 'time' If None, use the Event's", "distance = 2 if self.averages is None else self.averages+1 if force: distance -=", "kwargs : All of the keys the models will need to evaluate Returns", "self.filters = filters or {} self.drop_missing = drop_missing self._avg_cache = list() @property def", "list() @property def name(self): \"\"\" Name of the model \"\"\" return self.model.name @property", "of models based on the accuracy of their prediction Parameters ---------- models :", "rank_models(models, target, **kwargs): \"\"\" Rank a list of models based on the accuracy", "cache self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\" Estimate a point based on the", "event number #This can be removed with an update to Bluesky Issue #684", "self.event_count % self.averages == 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance", "prediction return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None): \"\"\" Find the mirror configuration", "event document Parameters ---------- doc : dict Bluesky Document to filter filters :", "lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop') #Initialize parameters init = {'x0' : 0,", "= model_ranking[np.argsort(diffs)] #Remove models who failed to make an estimate return [model for", ": dict Map independent variables names to keys in the event document stream", "m == 0 and b != target: raise ValueError(\"Unable to backsolve, because fit", "Evaluate the predicted outcome based on the most recent fit of the given", "prediction return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\" Find the ``x`` position that", ": 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every,", "every new event \"\"\" def __init__(self, centroid, alphas, name=None, init_guess=None, update_every=1, average=1): #Simple", "in self._avg_cache]) #Send to callback super().event(doc) #Clear cache self._avg_cache.clear() def eval(self, *args, **kwargs):", "``Axes.plot``. Notes ----- If your figure blocks the main thread when you are", "average cache self._avg_cache.append(doc) #Check we have the right number of shots to average", "np.asarray(self.goal_data) distance = 2 if self.averages is None else self.averages+1 if force: distance", "x1, 'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a function that updates a", "as determined by current linear fit \"\"\" #Check result super().eval(**kwargs) #Standard x setup", ": float, optional The number of images to average. If None is specified,", "a0 : float Pitch of the first mirror a1 : float Pitch of", "positions \"\\ \"must be specified to backsolve for the target\") #Gather fit information", "model {} with args : {}, kwargs {}\" \"\".format(self.name, args, kwargs)) if not", "float, optional the tolerance for the pixel averages : float, optional The number", "new event \"\"\" def __init__(self, model, y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1):", "(\"1: \"). Each xlim : tuple, optional passed to Axes.set_xlim ylim : tuple,", "event \"\"\" def __init__(self, y, x, init_guess=None, update_every=1, name=None, average=1): #Create model model", "for other values if expected update_every : int or None, optional Update rate", "= averages self.event_count = 0 def start(self, doc): self.goal_data = [] self.goal_axis, =", "super().update_caches(x, y) def stop(self, doc): # Ensure that the last events are plotted", "centroid position alphas : tuple of str Tuple fo the mirror pitches (a1,", "the keys the models will need to evaluate Returns ------- model_ranking : list", "the Event's sequence number. Special case: If the Event's data includes a key", "the system Returns ------- angles : dict Dictionary with the variable mirror key", "the keys associated with the fit \"\"\" return [self.y] + list(self.independent_vars.values()) def install_filters(self,", "[] filters = filters or dict() #Iterate through filters for key, func in", "args, kwargs)) if not self.result: raise RuntimeError(\"Can not evaluate without a saved fit,", "pixel averages : float, optional The number of images to average. If None", "the accuracy of their prediction Parameters ---------- models : list List of models", "in the system Returns ------- angles : dict Dictionary with the variable mirror", "variable ..note:: For multivariable functions the user may have to specify which variable", "Returns ------- resp : bool Whether the event passes all provided filters Example", "rate of the model. If set to None, the model will only be", "import isiterable logger = logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an event", "LivePlotWithGoal(LivePlot): \"\"\" Build a function that updates a plot from a stream of", "to reach a certain pixel value Because this is a multivariable function you", "\"\"\" Rank a list of models based on the accuracy of their prediction", "need to evaluate Returns ------- model_ranking : list List of models sorted by", "boolean value. drop_missing : bool, optional Only include documents who have associated data", "event document Returns ------- estimate : float Y value as determined by current", "and b != target: raise ValueError(\"Unable to backsolve, because fit is horizontal \"", "if model not in bad_models] class LiveBuild(LiveFit): \"\"\" Base class for live model", "key named 'seq_num' or 'time', that takes precedence over the standard 'seq_num' and", "pandas as pd import numpy as np from lmfit.models import LinearModel from bluesky.callbacks", "/ callable pairs that take a single input from the data stream and", "model. If set to None, the model will only be computed at the", "super().backsolve(target, a0=a0, a1=a1) #Check for valid request if not any([a0,a1]) or all([a0,a1]): raise", "prediction Parameters ---------- models : list List of models to evaluate target :", ": 0, 'intercept' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y, {'x':", "#Check for valid request if not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one of", "str, optional the name of a data field in an Event, or 'seq_num'", "return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None): \"\"\" Find the mirror configuration to", "for key in self.field_names: doc['data'][key] = np.mean([d['data'][key] for d in self._avg_cache]) #Send to", "bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## # Module # ########## from .utils.argutils", "By default, this is set to 1 i.e update on every new event", "the model function update_every : int or None, optional Update rate of the", "make an estimate return [model for model in model_ranking if model not in", "\"\"\" return [self.y] + list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install additional filters Parameters", "simplejson as sjson from pathlib import Path ############### # Third Party # ###############", "fit of the given information Parameters ---------- a0 : float Pitch of the", "self.tolerance = tolerance self.averages = averages self.event_count = 0 def start(self, doc): self.goal_data", "estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value of {}\" \"\".format(model.name, estimate))", "raise ValueError(\"Must supply keyword `x` or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add", "nan and inf elif isinstance(doc[key], str): if \"inf\" == doc[key].lower() or \"nan\" ==", "dict, optional Initialization guess for the linear fit, available keys are ``slope`` and", "number of images to average. If None is specified, every point is rendered", "as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate from model {}\" \"\".format(model.name)) logger.debug(e)", "Desired ``y`` value Returns ------- x : dict Variable name and floating value", "most recent fit of the given information. Parameters ---------- x : float or", "filters Example ------ ..code:: apply_filters(doc, filters = {'a' : lambda x : x", "if not self.result: raise RuntimeError(\"Can not evaluate without a saved fit, \"\\ \"use", "function you must fix one of the mirrors in place, while the other", "otherwise the graph will update every ```averages``` points. legend_keys : list, optional The", "drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information except KeyError: resp.append(not drop_missing) #Handle", "in Skywalker Parameters ---------- model : lmfit.Model y: string Key of dependent variable", "Event. goal : float the target pixel tolerance : float, optional the tolerance", "def start(self, doc): self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self,", "= {'a0' : np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def", "use ax instead epoch : {'run', 'unix'}, optional If 'run' t=0 is the", "#Initialize values model_ranking = np.asarray(models) diffs = list() bad_models = list() #Calculate error", "the event document that reports the independent variable init_guess : dict, optional Initialization", "[self.y] + list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install additional filters Parameters ---------- filters", "optional Only include documents who have associated data for each filter key. This", "not backsolve without a saved fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model", "model model = LinearModel(missing='drop', name=name) #Initialize parameters init = {'slope' : 0, 'intercept'", "= {'x0' : 0, 'x1': 0, 'x2' : 0} if init_guess: init.update(init_guess) #Initialize", "right number of shots to average if len(self._avg_cache) >= self.average: #Overwrite event number", "parameters init = {'slope' : 0, 'intercept' : 0} if init_guess: init.update(init_guess) #Initialize", "two-bounce system def two_bounce(a0, a1, x0, x1, x2): return x0 + a0*x1 +", "will always show the scan_id followed by a colon (\"1: \"). Each xlim", "two_bounce(a0, a1, x0, x1, x2): return x0 + a0*x1 + a1*x2 #Create model", "self.field_names: doc['data'][key] = np.mean([d['data'][key] for d in self._avg_cache]) #Send to callback super().event(doc) #Clear", "to callback super().event(doc) #Clear cache self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\" Estimate a", "iterables for nan and inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing)", "result super().eval(a0, a1) #Structure input and add past result kwargs = {'a0' :", "predicted outcome based on the most recent fit of the given information. Parameters", "field name in the event document Returns ------- estimate : float Y value", "fit of the model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model {} with args", "average=average) def eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate the predicted outcome based on", "init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1' : alphas[1]}, init_guess=init,", "an event document Parameters ---------- doc : dict Bluesky Document to filter filters", "force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance = 2 if self.averages is None", "def backsolve(self, target, **kwargs): \"\"\" Use the most recent fit to find the", "and 'time' recorded in every Event. goal : float the target pixel tolerance", "update_every=update_every) #Add additional keys self.average = average self.filters = filters or {} self.drop_missing", "*, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title = None self.goal =", "Filter an event document Parameters ---------- doc : dict Bluesky Document to filter", "\"\"\" def __init__(self, y, x, init_guess=None, update_every=1, name=None, average=1): #Create model model =", "Position of the centroid as predicted by the current model fit \"\"\" #Check", "may have to specify which variable to solve for, and which to keep", "optional Initialization guess for the linear fit, available keys are ``slope`` and ``intercept``", "optional deprecated: use ax instead epoch : {'run', 'unix'}, optional If 'run' t=0", "lambda x : 4 < x < 6}) \"\"\" resp = [] filters", "drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys self.average = average", "continue #Handle all other types else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue", "bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate from model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances", "All of the keys the models will need to evaluate Returns ------- model_ranking", ": dict Bluesky Document to filter filters : dict Filters are provided in", "variable to evaluate linear model kwargs : The value for the indepenedent variable", "and solvable value \"\"\" #Make sure we have a fit super().backsolve(target, a0=a0, a1=a1)", "filters Parameters ---------- filters : dict Filters are provided in a dictionary of", "super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self,", "centroid as predicted by the current model fit \"\"\" #Check result super().eval(a0, a1)", "events are plotted # Only necessary when we are grouping the points if", "Keyword in the event document that reports the independent variable init_guess : dict,", "**kwargs): \"\"\" Estimate a point based on the current fit of the model.", "self.averages is None else self.averages+1 if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance,", "kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None): \"\"\" Find the", "kwargs : The value for the indepenedent variable can also be given as", "document stream init_guess: dict, optional Initial guesses for other values if expected update_every", "fit \"\"\" return [self.y] + list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install additional filters", "fit, available keys are ``slope`` and ``intercept`` name : optional , str Name", "the time recorded in the RunStart document. If 'unix', t=0 is 1 Jan", "Parameters ---------- target : float Desired pixel location a0 : float, optional Fix", "dict() #Iterate through filters for key, func in filters.items(): try: #Check iterables for", "the independent variable init_guess : dict, optional Initialization guess for the linear fit,", "if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every, average=average) def", "available keys are ``slope`` and ``intercept`` name : optional , str Name for", "model. When None (default) the name is the same as the model function", "\"\"\" #Check result super().eval(**kwargs) #Standard x setup if kwargs.get('x'): x = kwargs['x'] elif", "data points\".format(len(self.ydata))) return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid", "**kwargs): super().__init__(y, x, **kwargs) self.legend_title = None self.goal = goal self.tolerance = tolerance", "because fit is horizontal \" \" after {} data points\".format(len(self.ydata))) return {'x' :", "'seq_num' or 'time', that takes precedence over the standard 'seq_num' and 'time' recorded", "for, and which to keep fixed \"\"\" logger.debug(\"Backsolving model {} for target {}", "in the event document that reports the dependent variable x: str Keyword in", "event \"\"\" def __init__(self, model, y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model,", "``x2`` name : optional , str Name for the contained model. When None", "is the same as the model function update_every : int or None, optional", "floating value \"\"\" #Make sure we have a fit super().backsolve(target, **kwargs) #Gather line", "# Module # ########## from .utils.argutils import isiterable logger = logging.getLogger(__name__) def apply_filters(doc,", "angles : dict Dictionary with the variable mirror key and solvable value \"\"\"", "{'run', 'unix'}, optional If 'run' t=0 is the time recorded in the RunStart", "variables names to keys in the event document stream init_guess: dict, optional Initial", "value. \"\"\" self.filters.update(filters) def event(self, doc): #Run event through filters if not apply_filters(doc['data']):", "events missing the key entirely, reporting NaN or reporting Inf. Returns ------- resp", "will update every ```averages``` points. legend_keys : list, optional The list of keys", "force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True)", "each model for model in models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {}", "to filter filters : dict Filters are provided in a dictionary of key", "#Make sure we have a fit super().backsolve(target, **kwargs) #Gather line information (m, b)", "#Add additional keys self.average = average self.filters = filters or {} self.drop_missing =", "target, **kwargs): \"\"\" Find the ``x`` position that solves the reaches the given", "############### # Third Party # ############### import lmfit import pandas as pd import", "y : str Keyword in the event document that reports the dependent variable", "missing the key entirely, reporting NaN or reporting Inf. Returns ------- resp :", "a key named 'seq_num' or 'time', that takes precedence over the standard 'seq_num'", "given information. Parameters ---------- x : float or int, optional Independent variable to", "the target\") #Gather fit information (x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return", "optional Independent variable to evaluate linear model kwargs : The value for the", "filter key. This includes events missing the key entirely, reporting NaN or reporting", "\"\"\" #Make sure we have a fit super().backsolve(target, **kwargs) #Gather line information (m,", "'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None):", "= logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an event document Parameters ----------", "saved fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to fit a linear", "dict, optional Initialization guess for the linear fit, available keys are be ``x0``,", "the RunStart document. If 'unix', t=0 is 1 Jan 1970 (\"the UNIX epoch\").", "if self.averages is None else self.averages+1 if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance,", "Update rate of the model. If set to None, the model will only", ".utils.argutils import isiterable logger = logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an", "images to average. If None is specified, every point is rendered as they", "0, 'x1': 0, 'x2' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid,", "(self.averages is not None and self.event_count % self.averages == 0) or force: self.goal_axis.set_data(self.x_data,", "a fit super().backsolve(target, **kwargs) #Gather line information (m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return", ">>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\"", "alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y)", "centroid : float Position of the centroid as predicted by the current model", "#Initialize fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average)", "on the accuracy of their prediction Parameters ---------- models : list List of", "guesses for other values if expected update_every : int or None, optional Update", "model in model_ranking if model not in bad_models] class LiveBuild(LiveFit): \"\"\" Base class", "..note:: For multivariable functions the user may have to specify which variable to", "from .utils.argutils import isiterable logger = logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter", "Parameters ---------- target : float Desired ``y`` value Returns ------- x : dict", "for target {} and kwargs {}\" \"\".format(self.name, target, kwargs)) if not self.result: raise", ": float Y value as determined by current linear fit \"\"\" #Check result", "not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one of the mirror positions \"\\ \"must", "= LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def __init__(self,", "Axes, optional matplotib Axes; if none specified, new figure and axes are made.", "models sorted by accuracy of predictions \"\"\" #Initialize values model_ranking = np.asarray(models) diffs", "with event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize return all(resp) def rank_models(models,", "\"\"\" def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs)", "in place, while the other one is solved for. Parameters ---------- target :", "of all the keys associated with the fit \"\"\" return [self.y] + list(self.independent_vars.values())", "#Check result super().eval(**kwargs) #Standard x setup if kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x']", "event(self, doc): super().event(doc) self.event_count += 1 def update_plot(self, force=False): if self.averages is None", "fit, available keys are be ``x0``, ``x1``, and ``x2`` name : optional ,", "from the data stream and return a boolean value. drop_missing : bool, optional", "result super().eval(**kwargs) #Standard x setup if kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x'] in", "a function that updates a plot from a stream of Events. Parameters ----------", "any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one of the mirror positions \"\\ \"must be", "kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target,", "a fit super().backsolve(target, a0=a0, a1=a1) #Check for valid request if not any([a0,a1]) or", "bool, optional Only include documents who have associated data for each filter key.", "current model fit \"\"\" #Check result super().eval(a0, a1) #Structure input and add past", "Y value as determined by current linear fit \"\"\" #Check result super().eval(**kwargs) #Standard", "\"\"\" Model to fit a linear relationship between a single variable axis and", "independent variables names to keys in the event document stream init_guess: dict, optional", "list List of models to evaluate target : float Actual value of target", "#Overwrite event number #This can be removed with an update to Bluesky Issue", "kwargs = {'a0' : np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs)", "associated with the fit \"\"\" return [self.y] + list(self.independent_vars.values()) def install_filters(self, filters): \"\"\"", "in the legend of the plot. The legend will always show the scan_id", "Each xlim : tuple, optional passed to Axes.set_xlim ylim : tuple, optional passed", "Dictionary with the variable mirror key and solvable value \"\"\" #Make sure we", ": {}, kwargs {}\" \"\".format(self.name, args, kwargs)) if not self.result: raise RuntimeError(\"Can not", "predicted outcome based on the most recent fit of the given information Parameters", "the mirror configuration to reach a certain pixel value Because this is a", "if a0: return {'a1' : (target - x0 - a0*x1)/ x2, 'a0' :", "goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal)", "average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys self.average = average self.filters", "Returns ------- x : dict Variable name and floating value \"\"\" #Make sure", "or {} self.drop_missing = drop_missing self._avg_cache = list() @property def name(self): \"\"\" Name", "to Bluesky Issue #684 doc['seq_num'] = len(self.ydata) +1 #Rewrite document with averages for", "import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## # Module # ########## from .utils.argutils import", "graph will update every ```averages``` points. legend_keys : list, optional The list of", "callback super().event(doc) #Clear cache self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\" Estimate a point", "> 0, 'c' : lambda x : 4 < x < 6}) \"\"\"", "to make an estimate return [model for model in model_ranking if model not", "in the event document stream init_guess: dict, optional Initial guesses for other values", "\"\"\" Filter an event document Parameters ---------- doc : dict Bluesky Document to", "from the data stream and return a boolean value. \"\"\" self.filters.update(filters) def event(self,", "fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add past result kwargs = {'x' : np.asarray(x)}", "#Gather fit information (x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value", "entries for nan and inf elif isinstance(doc[key], str): if \"inf\" == doc[key].lower() or", "inf elif isinstance(doc[key], str): if \"inf\" == doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not", "single variable axis and a depended variable Parameters ---------- y : str Keyword", "legend_keys : list, optional The list of keys to extract from the RunStart", "a plot from a stream of Events. Parameters ---------- y : str the", "target, **kwargs): \"\"\" Rank a list of models based on the accuracy of", "number. Special case: If the Event's data includes a key named 'seq_num' or", "and inf elif isinstance(doc[key], str): if \"inf\" == doc[key].lower() or \"nan\" == doc[key].lower():", "{}\" \"\".format(self.independent_vars['x'])) #Structure input add past result kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values)", "Install additional filters Parameters ---------- filters : dict Filters are provided in a", "an estimate return [model for model in model_ranking if model not in bad_models]", "entirely, reporting NaN or reporting Inf. Returns ------- resp : bool Whether the", "continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information except KeyError: resp.append(not drop_missing) #Handle improper", "dependent variable x: str Keyword in the event document that reports the independent", ": float, optional Fix the first mirror in the system a1 : float,", "y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title = None", "keyword arguments are passed through to ``Axes.plot``. Notes ----- If your figure blocks", "kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword `x` or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure", "int or None, optional Update rate of the model. If set to None,", "set to None, the model will only be computed at the end of", "#Run event through filters if not apply_filters(doc['data']): return #Add doc to average cache", "#Return x position if m == 0 and b != target: raise ValueError(\"Unable", "variable mirror key and solvable value \"\"\" #Make sure we have a fit", "for the contained model. When None (default) the name is the same as", "name(self): \"\"\" Name of the model \"\"\" return self.model.name @property def field_names(self): \"\"\"", "dict Map independent variables names to keys in the event document stream init_guess:", "the requested dependent variable ..note:: For multivariable functions the user may have to", "a saved fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to fit a", "#Check string entries for nan and inf elif isinstance(doc[key], str): if \"inf\" ==", "raise ValueError(\"Exactly one of the mirror positions \"\\ \"must be specified to backsolve", "data for each filter key. This includes events missing the key entirely, reporting", "model function update_every : int or None, optional Update rate of the model.", "None, use the Event's sequence number. Special case: If the Event's data includes", "they come, otherwise the graph will update every ```averages``` points. legend_keys : list,", "kwargs)) if not self.result: raise RuntimeError(\"Can not evaluate without a saved fit, \"\\", "a0*x1)/ x2, 'a0' : a0} else: return {'a0' : (target - x0 -", "(LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## # Module # ########## from .utils.argutils import isiterable", "```averages``` points. legend_keys : list, optional The list of keys to extract from", "your IPython session. Examples -------- >>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None,", "document and format in the legend of the plot. The legend will always", "**kwargs): \"\"\" Use the most recent fit to find the independent variables that", "import numpy as np from lmfit.models import LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot,", "float, optional Fix the second mirror in the system Returns ------- angles :", "independent_vars = ['a0', 'a1'], missing='drop') #Initialize parameters init = {'x0' : 0, 'x1':", "information Parameters ---------- a0 : float Pitch of the first mirror a1 :", "[model for model in model_ranking if model not in bad_models] class LiveBuild(LiveFit): \"\"\"", "passed to Axes.set_ylim ax : Axes, optional matplotib Axes; if none specified, new", "class LiveBuild(LiveFit): \"\"\" Base class for live model building in Skywalker Parameters ----------", "0, 'c' : lambda x : 4 < x < 6}) \"\"\" resp", "necessary when we are grouping the points if self.averages is not None: self.update_plot(force=True)", "a multivariable function you must fix one of the mirrors in place, while", "1 i.e update on every new event \"\"\" def __init__(self, y, x, init_guess=None,", "self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance = 2 if self.averages is None else", "filters.items(): try: #Check iterables for nan and inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or", "functions the user may have to specify which variable to solve for, and", "the first mirror in the system a1 : float, optional Fix the second", "- x0 - a1*x2)/ x1, 'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a", "Build a function that updates a plot from a stream of Events. Parameters", "= lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop') #Initialize parameters init = {'x0' :", "eval(self, *args, **kwargs): \"\"\" Estimate a point based on the current fit of", "model kwargs : The value for the indepenedent variable can also be given", "to None, the model will only be computed at the end of the", "of str Tuple fo the mirror pitches (a1, a2) init_guess : dict, optional", "line information (m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return x position if m ==", "``x`` position that solves the reaches the given target Parameters ---------- target :", "\"\"\" #Make sure we have a fit super().backsolve(target, a0=a0, a1=a1) #Check for valid", "fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to fit a linear relationship", "__init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title =", "model_ranking[np.argsort(diffs)] #Remove models who failed to make an estimate return [model for model", "of their prediction Parameters ---------- models : list List of models to evaluate", "doc to average cache self._avg_cache.append(doc) #Check we have the right number of shots", "logging import simplejson as sjson from pathlib import Path ############### # Third Party", "self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword `x` or", "independent_vars={'a0' : alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self, a0=0., a1=0.,", "linear fit \"\"\" #Check result super().eval(**kwargs) #Standard x setup if kwargs.get('x'): x =", "0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance = 2 if self.averages", "Skywalker Parameters ---------- model : lmfit.Model y: string Key of dependent variable indpendent_vars", "Event's sequence number. Special case: If the Event's data includes a key named", "scan_id followed by a colon (\"1: \"). Each xlim : tuple, optional passed", "# Only necessary when we are grouping the points if self.averages is not", "def eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate the predicted outcome based on the", "logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models who failed to make an", "are be ``x0``, ``x1``, and ``x2`` name : optional , str Name for", "or reporting Inf. Returns ------- resp : bool Whether the event passes all", "keyword `x` or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add past result kwargs", "event \"\"\" def __init__(self, centroid, alphas, name=None, init_guess=None, update_every=1, average=1): #Simple model of", "will need to evaluate Returns ------- model_ranking : list List of models sorted", ": float Pitch of the second mirror Returns ------- centroid : float Position", "optional Initial guesses for other values if expected update_every : int or None,", "centroid, independent_vars={'a0' : alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self, a0=0.,", "setup if kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']]", "RE(my_scan, my_plotter) \"\"\" def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y,", "subclasses \"\"\" logger.debug(\"Evaluating model {} with args : {}, kwargs {}\" \"\".format(self.name, args,", "resp.append(bool(func(doc[key]))) #Handle missing information except KeyError: resp.append(not drop_missing) #Handle improper filter except Exception", "LivePlot) ########## # Module # ########## from .utils.argutils import isiterable logger = logging.getLogger(__name__)", "kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\" Find the ``x``", "add past result kwargs = {'a0' : np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return", "self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc): super().event(doc) self.event_count += 1 def", "init_guess: dict, optional Initial guesses for other values if expected update_every : int", "update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc): # Ensure that the", "Filters are provided in a dictionary of key / callable pairs that take", "= (self.result.values['slope'], self.result.values['intercept']) #Return x position if m == 0 and b !=", "goal self.tolerance = tolerance self.averages = averages self.event_count = 0 def start(self, doc):", "the tolerance for the pixel averages : float, optional The number of images", "------ ..code:: apply_filters(doc, filters = {'a' : lambda x : x > 0,", "be removed with an update to Bluesky Issue #684 doc['seq_num'] = len(self.ydata) +1", "averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title = None self.goal = goal self.tolerance =", "Initialization guess for the linear fit, available keys are ``slope`` and ``intercept`` name", "model building in Skywalker Parameters ---------- model : lmfit.Model y: string Key of", "estimate : float Y value as determined by current linear fit \"\"\" #Check", "place, while the other one is solved for. Parameters ---------- target : float", "= filters or dict() #Iterate through filters for key, func in filters.items(): try:", "= (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value if a0: return {'a1' : (target", "#Send to callback super().event(doc) #Clear cache self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\" Estimate", "pairs that take a single input from the data stream and return a", "\"\".format(self.name, args, kwargs)) if not self.result: raise RuntimeError(\"Can not evaluate without a saved", "plotted # Only necessary when we are grouping the points if self.averages is", "in the system a1 : float, optional Fix the second mirror in the", "i.e update on every new event \"\"\" def __init__(self, y, x, init_guess=None, update_every=1,", "None self.goal = goal self.tolerance = tolerance self.averages = averages self.event_count = 0", "update_plot(self, force=False): if self.averages is None or (self.averages is not None and self.event_count", "name of a data field in an Event, or 'seq_num' or 'time' If", "will only be computed at the end of the run. By default, this", "return all(resp) def rank_models(models, target, **kwargs): \"\"\" Rank a list of models based", "self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\" Estimate a point based on the current", "def eval(self, **kwargs): \"\"\" Evaluate the predicted outcome based on the most recent", "all([a0,a1]): raise ValueError(\"Exactly one of the mirror positions \"\\ \"must be specified to", "x position if m == 0 and b != target: raise ValueError(\"Unable to", "stream init_guess: dict, optional Initial guesses for other values if expected update_every :", "logger.debug(\"Model {} predicted a value of {}\" \"\".format(model.name, estimate)) except RuntimeError as e:", "\"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to fit a linear relationship between", "fig : Figure, optional deprecated: use ax instead epoch : {'run', 'unix'}, optional", "from pathlib import Path ############### # Third Party # ############### import lmfit import", "target: raise ValueError(\"Unable to backsolve, because fit is horizontal \" \" after {}", "figure and axes are made. fig : Figure, optional deprecated: use ax instead", "{'a0' : (target - x0 - a1*x2)/ x1, 'a1' : a1} class LivePlotWithGoal(LivePlot):", ": dict, optional Initialization guess for the linear fit, available keys are ``slope``", "multivariable functions the user may have to specify which variable to solve for,", "6}) \"\"\" resp = [] filters = filters or dict() #Iterate through filters", "import lmfit import pandas as pd import numpy as np from lmfit.models import", "+= 1 def update_plot(self, force=False): if self.averages is None or (self.averages is not", ": lambda x : x > 0, 'c' : lambda x : 4", "= drop_missing self._avg_cache = list() @property def name(self): \"\"\" Name of the model", "init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs): \"\"\" Evaluate the predicted outcome based on", "location a0 : float, optional Fix the first mirror in the system a1", "predicted a value of {}\" \"\".format(model.name, estimate)) except RuntimeError as e: bad_models.append(model) diffs.append(np.inf)", "estimate return [model for model in model_ranking if model not in bad_models] class", "str): if \"inf\" == doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not drop_missing) continue #Handle", "else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing", "if not apply_filters(doc['data']): return #Add doc to average cache self._avg_cache.append(doc) #Check we have", "model \"\"\" return self.model.name @property def field_names(self): \"\"\" Name of all the keys", "a colon (\"1: \"). Each xlim : tuple, optional passed to Axes.set_xlim ylim", "Use the most recent fit to find the independent variables that create the", "Parameters ---------- y : str Keyword in the event document that reports the", "\"\".format(self.independent_vars['x'])) #Structure input add past result kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return", ": list List of models sorted by accuracy of predictions \"\"\" #Initialize values", "(self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value if a0: return {'a1' : (target -", "a certain pixel value Because this is a multivariable function you must fix", "the plot. The legend will always show the scan_id followed by a colon", "for nan and inf elif isinstance(doc[key], str): if \"inf\" == doc[key].lower() or \"nan\"", "key in self.field_names: doc['data'][key] = np.mean([d['data'][key] for d in self._avg_cache]) #Send to callback", "if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1' : alphas[1]},", "KeyError: resp.append(not drop_missing) #Handle improper filter except Exception as e: logger.critical('Filter associated with", "'unix'}, optional If 'run' t=0 is the time recorded in the RunStart document.", "for key, func in filters.items(): try: #Check iterables for nan and inf if", "< 6}) \"\"\" resp = [] filters = filters or dict() #Iterate through", "str Name for the contained model. When None (default) the name is the", "model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model {} with args : {}, kwargs", "as they come, otherwise the graph will update every ```averages``` points. legend_keys :", "other types else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key])))", "#Initialize fit super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs): \"\"\"", "based on the most recent fit of the given information Parameters ---------- a0", "optional , str Name for the contained model. When None (default) the name", "all(resp) def rank_models(models, target, **kwargs): \"\"\" Rank a list of models based on", "def field_names(self): \"\"\" Name of all the keys associated with the fit \"\"\"", "the first mirror a1 : float Pitch of the second mirror Returns -------", "string entries for nan and inf elif isinstance(doc[key], str): if \"inf\" == doc[key].lower()", "------- centroid : float Position of the centroid as predicted by the current", "self.result: raise RuntimeError(\"Can not evaluate without a saved fit, \"\\ \"use .update_fit()\") def", "except Exception as e: logger.critical('Filter associated with event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key,", "the event document stream init_guess: dict, optional Initial guesses for other values if", "list of models based on the accuracy of their prediction Parameters ---------- models", "#Handle improper filter except Exception as e: logger.critical('Filter associated with event_key {}'\\ 'reported", "precedence over the standard 'seq_num' and 'time' recorded in every Event. goal :", "if expected update_every : int or None, optional Update rate of the model.", "y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys self.average = average self.filters = filters", "= goal self.tolerance = tolerance self.averages = averages self.event_count = 0 def start(self,", "@property def name(self): \"\"\" Name of the model \"\"\" return self.model.name @property def", "sure we have a fit super().backsolve(target, a0=a0, a1=a1) #Check for valid request if", "a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a function that updates a plot from a", "update_every=update_every, average=average) def eval(self, **kwargs): \"\"\" Evaluate the predicted outcome based on the", "nan and inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check", "on every new event \"\"\" def __init__(self, centroid, alphas, name=None, init_guess=None, update_every=1, average=1):", "return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid position of", "of shots to average if len(self._avg_cache) >= self.average: #Overwrite event number #This can", "based on the current fit of the model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating", "Find the mirror configuration to reach a certain pixel value Because this is", ": list, optional The list of keys to extract from the RunStart document", "the field name in the event document Returns ------- estimate : float Y", "Parameters ---------- x : float or int, optional Independent variable to evaluate linear", "np from lmfit.models import LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ##########", "Only necessary when we are grouping the points if self.averages is not None:", "value Returns ------- x : dict Variable name and floating value \"\"\" #Make", "0 and b != target: raise ValueError(\"Unable to backsolve, because fit is horizontal", "Desired pixel location a0 : float, optional Fix the first mirror in the", "reports the dependent variable x: str Keyword in the event document that reports", "a1*x2 #Create model model = lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop') #Initialize parameters", "pixel location a0 : float, optional Fix the first mirror in the system", "followed by a colon (\"1: \"). Each xlim : tuple, optional passed to", "predicted by the current model fit \"\"\" #Check result super().eval(a0, a1) #Structure input", "Whether the event passes all provided filters Example ------ ..code:: apply_filters(doc, filters =", "bad_models] class LiveBuild(LiveFit): \"\"\" Base class for live model building in Skywalker Parameters", "recent fit of the given information Parameters ---------- a0 : float Pitch of", "new event \"\"\" def __init__(self, centroid, alphas, name=None, init_guess=None, update_every=1, average=1): #Simple model", "independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys self.average = average self.filters = filters or", ": lambda x : 4 < x < 6}) \"\"\" resp = []", "0, 'intercept' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y, {'x': x},", "self.drop_missing = drop_missing self._avg_cache = list() @property def name(self): \"\"\" Name of the", "Keyword in the event document that reports the dependent variable x: str Keyword", "second mirror Returns ------- centroid : float Position of the centroid as predicted", "models to evaluate target : float Actual value of target kwargs : All", "---------- doc : dict Bluesky Document to filter filters : dict Filters are", "document. If 'unix', t=0 is 1 Jan 1970 (\"the UNIX epoch\"). Default is", "(target - x0 - a0*x1)/ x2, 'a0' : a0} else: return {'a0' :", "name in the event document Returns ------- estimate : float Y value as", "document Parameters ---------- doc : dict Bluesky Document to filter filters : dict", "self.average: #Overwrite event number #This can be removed with an update to Bluesky", "have associated data for each filter key. This includes events missing the key", "start(self, doc): self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc):", "\"\".format(model.name)) logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models who failed to make", ">>> RE(my_scan, my_plotter) \"\"\" def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs):", "the name of a data field in an Event, or 'seq_num' or 'time'", "self.goal = goal self.tolerance = tolerance self.averages = averages self.event_count = 0 def", ": lmfit.Model y: string Key of dependent variable indpendent_vars : dict Map independent", "if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string entries for nan and", "Key of dependent variable indpendent_vars : dict Map independent variables names to keys", ": a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a function that updates a plot from", ": str Keyword in the event document that reports the dependent variable x:", "def __init__(self, y, x, init_guess=None, update_every=1, name=None, average=1): #Create model model = LinearModel(missing='drop',", "If None, use the Event's sequence number. Special case: If the Event's data", "LiveBuild(LiveFit): \"\"\" Base class for live model building in Skywalker Parameters ---------- model", "fit \"\"\" #Check result super().eval(**kwargs) #Standard x setup if kwargs.get('x'): x = kwargs['x']", "x0 + a0*x1 + a1*x2 #Create model model = lmfit.Model(two_bounce, independent_vars = ['a0',", "__init__(self, model, y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess,", "information except KeyError: resp.append(not drop_missing) #Handle improper filter except Exception as e: logger.critical('Filter", ": Figure, optional deprecated: use ax instead epoch : {'run', 'unix'}, optional If", "sequence number. Special case: If the Event's data includes a key named 'seq_num'", "linear relationship between a single variable axis and a depended variable Parameters ----------", "0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every, average=average)", "\"\"\" Evaluate the predicted outcome based on the most recent fit of the", "resp.append(not drop_missing) #Handle improper filter except Exception as e: logger.critical('Filter associated with event_key", "self._avg_cache.append(doc) #Check we have the right number of shots to average if len(self._avg_cache)", "int, optional Independent variable to evaluate linear model kwargs : The value for", "(x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value if a0: return", "#Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models who failed to make an estimate", "as pd import numpy as np from lmfit.models import LinearModel from bluesky.callbacks import", "of two-bounce system def two_bounce(a0, a1, x0, x1, x2): return x0 + a0*x1", "ValueError(\"Unable to backsolve, because fit is horizontal \" \" after {} data points\".format(len(self.ydata)))", "dict Dictionary with the variable mirror key and solvable value \"\"\" #Make sure", "a2) init_guess : dict, optional Initialization guess for the linear fit, available keys", "the key entirely, reporting NaN or reporting Inf. Returns ------- resp : bool", "Inf. Returns ------- resp : bool Whether the event passes all provided filters", "\"inf\" == doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not drop_missing) continue #Handle all other", "---------- centroid : str Keyword in the event document that reports centroid position", "the given information. Parameters ---------- x : float or int, optional Independent variable", "based on the accuracy of their prediction Parameters ---------- models : list List", "float, optional The number of images to average. If None is specified, every", "models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value of {}\"", "Notes ----- If your figure blocks the main thread when you are trying", "the current fit of the model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model {}", "computed value if a0: return {'a1' : (target - x0 - a0*x1)/ x2,", "dependent variable indpendent_vars : dict Map independent variables names to keys in the", "float Pitch of the second mirror Returns ------- centroid : float Position of", "------- estimate : float Y value as determined by current linear fit \"\"\"", "while the other one is solved for. Parameters ---------- target : float Desired", "mirror pitches (a1, a2) init_guess : dict, optional Initialization guess for the linear", "y, {'x': x}, init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs): \"\"\" Evaluate the predicted", "#Rewrite document with averages for key in self.field_names: doc['data'][key] = np.mean([d['data'][key] for d", "instead epoch : {'run', 'unix'}, optional If 'run' t=0 is the time recorded", "target, **kwargs): \"\"\" Use the most recent fit to find the independent variables", "system def two_bounce(a0, a1, x0, x1, x2): return x0 + a0*x1 + a1*x2", "If the Event's data includes a key named 'seq_num' or 'time', that takes", "include documents who have associated data for each filter key. This includes events", "function that updates a plot from a stream of Events. Parameters ---------- y", "to keys in the event document stream init_guess: dict, optional Initial guesses for", "List of models to evaluate target : float Actual value of target kwargs", "a0*x1 + a1*x2 #Create model model = lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop')", "all other types else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter", "to keep fixed \"\"\" logger.debug(\"Backsolving model {} for target {} and kwargs {}\"", "for the linear fit, available keys are ``slope`` and ``intercept`` name : optional", "= tolerance self.averages = averages self.event_count = 0 def start(self, doc): self.goal_data =", "def backsolve(self, target, a0=None, a1=None): \"\"\" Find the mirror configuration to reach a", "bool Whether the event passes all provided filters Example ------ ..code:: apply_filters(doc, filters", "the given information Parameters ---------- a0 : float Pitch of the first mirror", "the fit \"\"\" return [self.y] + list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install additional", "optional the tolerance for the pixel averages : float, optional The number of", "---------- a0 : float Pitch of the first mirror a1 : float Pitch", "distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def", "dictionary of key / callable pairs that take a single input from the", "average self.filters = filters or {} self.drop_missing = drop_missing self._avg_cache = list() @property", "on the most recent fit of the given information Parameters ---------- a0 :", "== 0 and b != target: raise ValueError(\"Unable to backsolve, because fit is", "---------- y : str Keyword in the event document that reports the dependent", "\"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to fit a linear relationship between a", "x : float or int, optional Independent variable to evaluate linear model kwargs", "Parameters ---------- centroid : str Keyword in the event document that reports centroid", "- a0*x1)/ x2, 'a0' : a0} else: return {'a0' : (target - x0", "recorded in every Event. goal : float the target pixel tolerance : float,", "of target kwargs : All of the keys the models will need to", "init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every,", "fit \"\"\" #Check result super().eval(a0, a1) #Structure input and add past result kwargs", "None, optional Update rate of the model. If set to None, the model", "the predicted outcome based on the most recent fit of the given information", "without a saved fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to fit", "event passes all provided filters Example ------ ..code:: apply_filters(doc, filters = {'a' :", "be computed at the end of the run. By default, this is set", "not apply_filters(doc['data']): return #Add doc to average cache self._avg_cache.append(doc) #Check we have the", "current linear fit \"\"\" #Check result super().eval(**kwargs) #Standard x setup if kwargs.get('x'): x", "continue #Check string entries for nan and inf elif isinstance(doc[key], str): if \"inf\"", ": int or None, optional Update rate of the model. If set to", "= 0 def start(self, doc): self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc)", "after {} data points\".format(len(self.ydata))) return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to", "xlim : tuple, optional passed to Axes.set_xlim ylim : tuple, optional passed to", "position alphas : tuple of str Tuple fo the mirror pitches (a1, a2)", "def backsolve(self, target, **kwargs): \"\"\" Find the ``x`` position that solves the reaches", "x0 - a1*x2)/ x1, 'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a function", "target Parameters ---------- target : float Desired ``y`` value Returns ------- x :", "fix one of the mirrors in place, while the other one is solved", "live model building in Skywalker Parameters ---------- model : lmfit.Model y: string Key", "Model to fit centroid position of two mirror system Parameters ---------- centroid :", "if len(self._avg_cache) >= self.average: #Overwrite event number #This can be removed with an", "update_every=1, average=1): #Simple model of two-bounce system def two_bounce(a0, a1, x0, x1, x2):", "are trying to scan with this callback, call `plt.ion()` in your IPython session.", "0, 'x2' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0' :", "average. If None is specified, every point is rendered as they come, otherwise", "\"\\ \"use .update_fit()\") def backsolve(self, target, **kwargs): \"\"\" Use the most recent fit", "model_ranking : list List of models sorted by accuracy of predictions \"\"\" #Initialize", "init = {'x0' : 0, 'x1': 0, 'x2' : 0} if init_guess: init.update(init_guess)", "= LinearModel(missing='drop', name=name) #Initialize parameters init = {'slope' : 0, 'intercept' : 0}", "############ import logging import simplejson as sjson from pathlib import Path ############### #", "#Gather line information (m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return x position if m", "\"\"\" def __init__(self, centroid, alphas, name=None, init_guess=None, update_every=1, average=1): #Simple model of two-bounce", "NaN or reporting Inf. Returns ------- resp : bool Whether the event passes", "values if expected update_every : int or None, optional Update rate of the", "names to keys in the event document stream init_guess: dict, optional Initial guesses", "that reports the dependent variable x: str Keyword in the event document that", "update_every : int or None, optional Update rate of the model. If set", "Bluesky Document to filter filters : dict Filters are provided in a dictionary", "building in Skywalker Parameters ---------- model : lmfit.Model y: string Key of dependent", "through filters if not apply_filters(doc['data']): return #Add doc to average cache self._avg_cache.append(doc) #Check", "If 'run' t=0 is the time recorded in the RunStart document. If 'unix',", "a single input from the data stream and return a boolean value. \"\"\"", "fit information (x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value if", "from lmfit.models import LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## #", "lmfit.Model y: string Key of dependent variable indpendent_vars : dict Map independent variables", "in model_ranking if model not in bad_models] class LiveBuild(LiveFit): \"\"\" Base class for", "specified to backsolve for the target\") #Gather fit information (x0, x1, x2) =", "np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information except KeyError: resp.append(not", "Skywalker \"\"\" ############ # Standard # ############ import logging import simplejson as sjson", "one of the mirror positions \"\\ \"must be specified to backsolve for the", "#This can be removed with an update to Bluesky Issue #684 doc['seq_num'] =", "facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def", "event document that reports the independent variable init_guess : dict, optional Initialization guess", "multivariable function you must fix one of the mirrors in place, while the", "parameters init = {'x0' : 0, 'x1': 0, 'x2' : 0} if init_guess:", "t=0 is the time recorded in the RunStart document. If 'unix', t=0 is", "named 'seq_num' or 'time', that takes precedence over the standard 'seq_num' and 'time'", "def rank_models(models, target, **kwargs): \"\"\" Rank a list of models based on the", "x : 4 < x < 6}) \"\"\" resp = [] filters =", "LiveFitPlot, CallbackBase, LivePlot) ########## # Module # ########## from .utils.argutils import isiterable logger", "\"\"\" return self.model.name @property def field_names(self): \"\"\" Name of all the keys associated", "the model \"\"\" return self.model.name @property def field_names(self): \"\"\" Name of all the", "update on every new event \"\"\" def __init__(self, model, y, independent_vars, init_guess=None, update_every=1,", "self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x, y):", "try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value of {}\" \"\".format(model.name,", "return {'a0' : (target - x0 - a1*x2)/ x1, 'a1' : a1} class", "epoch : {'run', 'unix'}, optional If 'run' t=0 is the time recorded in", "the variable mirror key and solvable value \"\"\" #Make sure we have a", "def two_bounce(a0, a1, x0, x1, x2): return x0 + a0*x1 + a1*x2 #Create", "pixel value Because this is a multivariable function you must fix one of", "x2): return x0 + a0*x1 + a1*x2 #Create model model = lmfit.Model(two_bounce, independent_vars", "is solved for. Parameters ---------- target : float Desired pixel location a0 :", "horizontal \" \" after {} data points\".format(len(self.ydata))) return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild):", "add past result kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs)", ": The value for the indepenedent variable can also be given as the", "centroid : str Keyword in the event document that reports centroid position alphas", "a1=0., **kwargs): \"\"\" Evaluate the predicted outcome based on the most recent fit", "\"). Each xlim : tuple, optional passed to Axes.set_xlim ylim : tuple, optional", "import LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## # Module #", "position that solves the reaches the given target Parameters ---------- target : float", "+ list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install additional filters Parameters ---------- filters :", "#Create model model = lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop') #Initialize parameters init", "key. This includes events missing the key entirely, reporting NaN or reporting Inf.", "x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title = None self.goal", "Issue #684 doc['seq_num'] = len(self.ydata) +1 #Rewrite document with averages for key in", "list() bad_models = list() #Calculate error of each model for model in models:", "if \"inf\" == doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not drop_missing) continue #Handle all", "drop_missing self._avg_cache = list() @property def name(self): \"\"\" Name of the model \"\"\"", "each filter key. This includes events missing the key entirely, reporting NaN or", "{} data points\".format(len(self.ydata))) return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to fit", "def event(self, doc): super().event(doc) self.event_count += 1 def update_plot(self, force=False): if self.averages is", "== 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance = 2 if", "is set to 1 i.e update on every new event \"\"\" def __init__(self,", "the second mirror Returns ------- centroid : float Position of the centroid as", "np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None): \"\"\" Find", "return a boolean value. \"\"\" self.filters.update(filters) def event(self, doc): #Run event through filters", "@property def field_names(self): \"\"\" Name of all the keys associated with the fit", "recent fit to find the independent variables that create the requested dependent variable", "single input from the data stream and return a boolean value. drop_missing :", "or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information except KeyError:", "None (default) the name is the same as the model function update_every :", "% self.averages == 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance =", "= None self.goal = goal self.tolerance = tolerance self.averages = averages self.event_count =", "optional the name of a data field in an Event, or 'seq_num' or", "value if a0: return {'a1' : (target - x0 - a0*x1)/ x2, 'a0'", "key, func in filters.items(): try: #Check iterables for nan and inf if isiterable(doc[key]):", "kwargs)) if not self.result: raise RuntimeError(\"Can not backsolve without a saved fit, \"\\", "filter filters : dict Filters are provided in a dictionary of key /", "document that reports centroid position alphas : tuple of str Tuple fo the", "self.event_count += 1 def update_plot(self, force=False): if self.averages is None or (self.averages is", "input and add past result kwargs = {'a0' : np.asarray(a0), 'a1' : np.asarray(a1)}", "super().start(doc) def event(self, doc): super().event(doc) self.event_count += 1 def update_plot(self, force=False): if self.averages", "the legend of the plot. The legend will always show the scan_id followed", "and which to keep fixed \"\"\" logger.debug(\"Backsolving model {} for target {} and", "self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc): super().event(doc) self.event_count += 1 def update_plot(self, force=False):", "np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information except", "expected update_every : int or None, optional Update rate of the model. If", "doc): super().event(doc) self.event_count += 1 def update_plot(self, force=False): if self.averages is None or", "mirrors in place, while the other one is solved for. Parameters ---------- target", "#Structure input and add past result kwargs = {'a0' : np.asarray(a0), 'a1' :", "reporting NaN or reporting Inf. Returns ------- resp : bool Whether the event", "Name of all the keys associated with the fit \"\"\" return [self.y] +", "or 'time' If None, use the Event's sequence number. Special case: If the", "is not None and self.event_count % self.averages == 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data)", "if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string entries for", "LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def __init__(self, y,", "given information Parameters ---------- a0 : float Pitch of the first mirror a1", "#684 doc['seq_num'] = len(self.ydata) +1 #Rewrite document with averages for key in self.field_names:", "optional passed to Axes.set_ylim ax : Axes, optional matplotib Axes; if none specified,", "len(self._avg_cache) >= self.average: #Overwrite event number #This can be removed with an update", "str Keyword in the event document that reports the dependent variable x: str", "is None or (self.averages is not None and self.event_count % self.averages == 0)", "boolean value. \"\"\" self.filters.update(filters) def event(self, doc): #Run event through filters if not", "The value for the indepenedent variable can also be given as the field", "== doc[key].lower(): resp.append(not drop_missing) continue #Handle all other types else: if np.isnan(doc[key]) or", ", str Name for the contained model. When None (default) the name is", "averages : float, optional The number of images to average. If None is", "with this callback, call `plt.ion()` in your IPython session. Examples -------- >>> my_plotter", ": str Keyword in the event document that reports centroid position alphas :", "diffs = list() bad_models = list() #Calculate error of each model for model", "goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def __init__(self, y, x=None, *,", "else self.averages+1 if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot()", "1 def update_plot(self, force=False): if self.averages is None or (self.averages is not None", "optional matplotib Axes; if none specified, new figure and axes are made. fig", "sure we have a fit super().backsolve(target, **kwargs) #Gather line information (m, b) =", "name=name) #Initialize parameters init = {'slope' : 0, 'intercept' : 0} if init_guess:", "in an Event, or 'seq_num' or 'time' If None, use the Event's sequence", "inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string entries", "of the run. By default, this is set to 1 i.e update on", "in the event document that reports the independent variable init_guess : dict, optional", "float Position of the centroid as predicted by the current model fit \"\"\"", "mirror Returns ------- centroid : float Position of the centroid as predicted by", "(m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return x position if m == 0 and", "Special case: If the Event's data includes a key named 'seq_num' or 'time',", "predictions \"\"\" #Initialize values model_ranking = np.asarray(models) diffs = list() bad_models = list()", "tolerance : float, optional the tolerance for the pixel averages : float, optional", "Parameters ---------- a0 : float Pitch of the first mirror a1 : float", "alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self, a0=0., a1=0., **kwargs): \"\"\"", "float, optional Fix the first mirror in the system a1 : float, optional", "on the current fit of the model. Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model", "The legend will always show the scan_id followed by a colon (\"1: \").", "stream and return a boolean value. \"\"\" self.filters.update(filters) def event(self, doc): #Run event", "tuple, optional passed to Axes.set_xlim ylim : tuple, optional passed to Axes.set_ylim ax", "mirror in the system a1 : float, optional Fix the second mirror in", "#Check result super().eval(a0, a1) #Structure input and add past result kwargs = {'a0'", "is 1 Jan 1970 (\"the UNIX epoch\"). Default is 'run'. kwargs : All", "'x2' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0],", "stream and return a boolean value. drop_missing : bool, optional Only include documents", "the Event's data includes a key named 'seq_num' or 'time', that takes precedence", "time recorded in the RunStart document. If 'unix', t=0 is 1 Jan 1970", "{}\" \"\".format(self.name, args, kwargs)) if not self.result: raise RuntimeError(\"Can not evaluate without a", "on the most recent fit of the given information. Parameters ---------- x :", "else: raise ValueError(\"Must supply keyword `x` or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input", "reports centroid position alphas : tuple of str Tuple fo the mirror pitches", "system Returns ------- angles : dict Dictionary with the variable mirror key and", "the second mirror in the system Returns ------- angles : dict Dictionary with", "super().event(doc) #Clear cache self._avg_cache.clear() def eval(self, *args, **kwargs): \"\"\" Estimate a point based", "the current model fit \"\"\" #Check result super().eval(a0, a1) #Structure input and add", "linear model kwargs : The value for the indepenedent variable can also be", "if none specified, new figure and axes are made. fig : Figure, optional", "of models sorted by accuracy of predictions \"\"\" #Initialize values model_ranking = np.asarray(models)", ": float, optional Fix the second mirror in the system Returns ------- angles", "your figure blocks the main thread when you are trying to scan with", "updates a plot from a stream of Events. Parameters ---------- y : str", "\"\"\" Name of the model \"\"\" return self.model.name @property def field_names(self): \"\"\" Name", "optional If 'run' t=0 is the time recorded in the RunStart document. If", "the most recent fit of the given information. Parameters ---------- x : float", "'seq_num' and 'time' recorded in every Event. goal : float the target pixel", "the system a1 : float, optional Fix the second mirror in the system", "init.update(init_guess) #Initialize fit super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs):", "position of two mirror system Parameters ---------- centroid : str Keyword in the", "drop_missing) #Handle improper filter except Exception as e: logger.critical('Filter associated with event_key {}'\\", "not in bad_models] class LiveBuild(LiveFit): \"\"\" Base class for live model building in", "'c' : lambda x : 4 < x < 6}) \"\"\" resp =", "most recent fit to find the independent variables that create the requested dependent", "RuntimeError(\"Can not backsolve without a saved fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild): \"\"\"", "for. Parameters ---------- target : float Desired pixel location a0 : float, optional", "x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc): # Ensure that the last", "target, kwargs)) if not self.result: raise RuntimeError(\"Can not backsolve without a saved fit,", "system Parameters ---------- centroid : str Keyword in the event document that reports", "are ``slope`` and ``intercept`` name : optional , str Name for the contained", "a1=None): \"\"\" Find the mirror configuration to reach a certain pixel value Because", "\"\"\" Name of all the keys associated with the fit \"\"\" return [self.y]", "a0} else: return {'a0' : (target - x0 - a1*x2)/ x1, 'a1' :", "y : str the name of a data field in an Event x", "fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average) def", "#Standard x setup if kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x", "self.averages is None or (self.averages is not None and self.event_count % self.averages ==", "#Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information except KeyError: resp.append(not drop_missing) #Handle improper filter", ": (target - x0 - a0*x1)/ x2, 'a0' : a0} else: return {'a0'", "sorted by accuracy of predictions \"\"\" #Initialize values model_ranking = np.asarray(models) diffs =", "a1, x0, x1, x2): return x0 + a0*x1 + a1*x2 #Create model model", "in a dictionary of key / callable pairs that take a single input", "---------- y : str the name of a data field in an Event", "value \"\"\" #Make sure we have a fit super().backsolve(target, **kwargs) #Gather line information", "Initialization guess for the linear fit, available keys are be ``x0``, ``x1``, and", "logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an event document Parameters ---------- doc", "return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\" Find the ``x`` position that solves", "the mirrors in place, while the other one is solved for. Parameters ----------", "def apply_filters(doc, filters=None, drop_missing=True): \"\"\" Filter an event document Parameters ---------- doc :", "of the model. If set to None, the model will only be computed", "relationship between a single variable axis and a depended variable Parameters ---------- y", "def __init__(self, model, y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars,", "init = {'slope' : 0, 'intercept' : 0} if init_guess: init.update(init_guess) #Initialize fit", "with the fit \"\"\" return [self.y] + list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install", "filters or dict() #Iterate through filters for key, func in filters.items(): try: #Check", "and kwargs {}\" \"\".format(self.name, target, kwargs)) if not self.result: raise RuntimeError(\"Can not backsolve", "that updates a plot from a stream of Events. Parameters ---------- y :", "you are trying to scan with this callback, call `plt.ion()` in your IPython", "`plt.ion()` in your IPython session. Examples -------- >>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0,", "callback, call `plt.ion()` in your IPython session. Examples -------- >>> my_plotter = LivePlotWithGoals('det',", "y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add", "scan with this callback, call `plt.ion()` in your IPython session. Examples -------- >>>", "you must fix one of the mirrors in place, while the other one", "from model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)] #Remove models who", "(a1, a2) init_guess : dict, optional Initialization guess for the linear fit, available", "in bad_models] class LiveBuild(LiveFit): \"\"\" Base class for live model building in Skywalker", "dict Filters are provided in a dictionary of key / callable pairs that", "fit is horizontal \" \" after {} data points\".format(len(self.ydata))) return {'x' : (target-b)/m}", "the event document that reports the dependent variable x: str Keyword in the", "= model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value of {}\" \"\".format(model.name, estimate)) except", "self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc): super().event(doc) self.event_count", "*args, **kwargs): \"\"\" Estimate a point based on the current fit of the", "document Returns ------- estimate : float Y value as determined by current linear", "CallbackBase, LivePlot) ########## # Module # ########## from .utils.argutils import isiterable logger =", "e: logger.critical('Filter associated with event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize return", "of predictions \"\"\" #Initialize values model_ranking = np.asarray(models) diffs = list() bad_models =", "Independent variable to evaluate linear model kwargs : The value for the indepenedent", "name is the same as the model function update_every : int or None,", "logger.debug(\"Unable to yield estimate from model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking =", "model model = lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop') #Initialize parameters init =", "------- angles : dict Dictionary with the variable mirror key and solvable value", "a single variable axis and a depended variable Parameters ---------- y : str", "of two mirror system Parameters ---------- centroid : str Keyword in the event", "If your figure blocks the main thread when you are trying to scan", "None, the model will only be computed at the end of the run.", ": {'run', 'unix'}, optional If 'run' t=0 is the time recorded in the", "def __init__(self, centroid, alphas, name=None, init_guess=None, update_every=1, average=1): #Simple model of two-bounce system", "resp.append(not drop_missing) continue #Check string entries for nan and inf elif isinstance(doc[key], str):", "logger.debug(\"Backsolving model {} for target {} and kwargs {}\" \"\".format(self.name, target, kwargs)) if", "always show the scan_id followed by a colon (\"1: \"). Each xlim :", "variable to solve for, and which to keep fixed \"\"\" logger.debug(\"Backsolving model {}", ": dict Dictionary with the variable mirror key and solvable value \"\"\" #Make", ": alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate the", "== doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not drop_missing) continue #Handle all other types", "made. fig : Figure, optional deprecated: use ax instead epoch : {'run', 'unix'},", "most recent fit of the given information Parameters ---------- a0 : float Pitch", "return [self.y] + list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install additional filters Parameters ----------", "alphas, name=None, init_guess=None, update_every=1, average=1): #Simple model of two-bounce system def two_bounce(a0, a1,", "is 'run'. kwargs : All additional keyword arguments are passed through to ``Axes.plot``.", "kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword", ": 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1'", "can also be given as the field name in the event document Returns", "super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys self.average = average self.filters =", "return [model for model in model_ranking if model not in bad_models] class LiveBuild(LiveFit):", "for the pixel averages : float, optional The number of images to average.", "Parameters ---------- y : str the name of a data field in an", "of the plot. The legend will always show the scan_id followed by a", "list(self.independent_vars.values()) def install_filters(self, filters): \"\"\" Install additional filters Parameters ---------- filters : dict", "set to 1 i.e update on every new event \"\"\" def __init__(self, model,", "ValueError(\"Exactly one of the mirror positions \"\\ \"must be specified to backsolve for", "def stop(self, doc): # Ensure that the last events are plotted # Only", "documents who have associated data for each filter key. This includes events missing", "class LivePlotWithGoal(LivePlot): \"\"\" Build a function that updates a plot from a stream", "given as the field name in the event document Returns ------- estimate :", "if not any([a0,a1]) or all([a0,a1]): raise ValueError(\"Exactly one of the mirror positions \"\\", "# Third Party # ############### import lmfit import pandas as pd import numpy", "with args : {}, kwargs {}\" \"\".format(self.name, args, kwargs)) if not self.result: raise", "elif isinstance(doc[key], str): if \"inf\" == doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not drop_missing)", "#Create model model = LinearModel(missing='drop', name=name) #Initialize parameters init = {'slope' : 0,", "a1) #Structure input and add past result kwargs = {'a0' : np.asarray(a0), 'a1'", "are provided in a dictionary of key / callable pairs that take a", "{'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\"", "we have a fit super().backsolve(target, a0=a0, a1=a1) #Check for valid request if not", "= kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword `x` or use fieldname {}\" \"\".format(self.independent_vars['x']))", "that solves the reaches the given target Parameters ---------- target : float Desired", "cache self._avg_cache.append(doc) #Check we have the right number of shots to average if", "the graph will update every ```averages``` points. legend_keys : list, optional The list", "blocks the main thread when you are trying to scan with this callback,", "the linear fit, available keys are ``slope`` and ``intercept`` name : optional ,", "passes all provided filters Example ------ ..code:: apply_filters(doc, filters = {'a' : lambda", "their prediction Parameters ---------- models : list List of models to evaluate target", "force=False): if self.averages is None or (self.averages is not None and self.event_count %", "init_guess=init_guess, update_every=update_every) #Add additional keys self.average = average self.filters = filters or {}", "self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc):", "filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys self.average =", "raise RuntimeError(\"Can not backsolve without a saved fit, \"\\ \"use .update_fit()\") class LinearFit(LiveBuild):", "variable indpendent_vars : dict Map independent variables names to keys in the event", "#Initialize parameters init = {'slope' : 0, 'intercept' : 0} if init_guess: init.update(init_guess)", "Initial guesses for other values if expected update_every : int or None, optional", "user may have to specify which variable to solve for, and which to", "model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value of {}\" \"\".format(model.name, estimate)) except RuntimeError", "Axes.set_xlim ylim : tuple, optional passed to Axes.set_ylim ax : Axes, optional matplotib", "'a1'], missing='drop') #Initialize parameters init = {'x0' : 0, 'x1': 0, 'x2' :", "args : {}, kwargs {}\" \"\".format(self.name, args, kwargs)) if not self.result: raise RuntimeError(\"Can", "model fit \"\"\" #Check result super().eval(a0, a1) #Structure input and add past result", "function update_every : int or None, optional Update rate of the model. If", "keys in the event document stream init_guess: dict, optional Initial guesses for other", "a0 : float, optional Fix the first mirror in the system a1 :", "self.averages+1 if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0,", "not self.result: raise RuntimeError(\"Can not backsolve without a saved fit, \"\\ \"use .update_fit()\")", "x : dict Variable name and floating value \"\"\" #Make sure we have", "error of each model for model in models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target))", "provided in a dictionary of key / callable pairs that take a single", "solves the reaches the given target Parameters ---------- target : float Desired ``y``", "set to 1 i.e update on every new event \"\"\" def __init__(self, centroid,", "requested dependent variable ..note:: For multivariable functions the user may have to specify", "not evaluate without a saved fit, \"\\ \"use .update_fit()\") def backsolve(self, target, **kwargs):", "apply_filters(doc['data']): return #Add doc to average cache self._avg_cache.append(doc) #Check we have the right", "Model to fit a linear relationship between a single variable axis and a", "average=1): #Simple model of two-bounce system def two_bounce(a0, a1, x0, x1, x2): return", "to 1 i.e update on every new event \"\"\" def __init__(self, model, y,", "a linear relationship between a single variable axis and a depended variable Parameters", "supply keyword `x` or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add past result", "\"\\ \"must be specified to backsolve for the target\") #Gather fit information (x0,", "str Keyword in the event document that reports the independent variable init_guess :", "target, a0=None, a1=None): \"\"\" Find the mirror configuration to reach a certain pixel", "Parameters ---------- doc : dict Bluesky Document to filter filters : dict Filters", "update_every=update_every, average=average) def eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate the predicted outcome based", "self.result.model.eval(**kwargs) def backsolve(self, target, a0=None, a1=None): \"\"\" Find the mirror configuration to reach", "we have the right number of shots to average if len(self._avg_cache) >= self.average:", "super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs): \"\"\" Evaluate the", ": a0} else: return {'a0' : (target - x0 - a1*x2)/ x1, 'a1'", "of a data field in an Event, or 'seq_num' or 'time' If None,", "input from the data stream and return a boolean value. drop_missing : bool,", "data field in an Event, or 'seq_num' or 'time' If None, use the", "model will only be computed at the end of the run. By default,", ": float Pitch of the first mirror a1 : float Pitch of the", "removed with an update to Bluesky Issue #684 doc['seq_num'] = len(self.ydata) +1 #Rewrite", "be specified to backsolve for the target\") #Gather fit information (x0, x1, x2)", "plot from a stream of Events. Parameters ---------- y : str the name", "{}\" \"\".format(model.name, estimate)) except RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate", "#Check iterables for nan and inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not", ": x > 0, 'c' : lambda x : 4 < x <", "{'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid position of two", "Bluesky Issue #684 doc['seq_num'] = len(self.ydata) +1 #Rewrite document with averages for key", "For multivariable functions the user may have to specify which variable to solve", "reach a certain pixel value Because this is a multivariable function you must", "the name of a data field in an Event x : str, optional", "and return a boolean value. drop_missing : bool, optional Only include documents who", "independent variable init_guess : dict, optional Initialization guess for the linear fit, available", ": tuple, optional passed to Axes.set_ylim ax : Axes, optional matplotib Axes; if", "Pitch of the second mirror Returns ------- centroid : float Position of the", "every new event \"\"\" def __init__(self, y, x, init_guess=None, update_every=1, name=None, average=1): #Create", "LinearModel(missing='drop', name=name) #Initialize parameters init = {'slope' : 0, 'intercept' : 0} if", "the most recent fit of the given information Parameters ---------- a0 : float", "goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title = None self.goal = goal", "or (self.averages is not None and self.event_count % self.averages == 0) or force:", "missing='drop') #Initialize parameters init = {'x0' : 0, 'x1': 0, 'x2' : 0}", "**kwargs) self.legend_title = None self.goal = goal self.tolerance = tolerance self.averages = averages", ": np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target,", "self.filters.update(filters) def event(self, doc): #Run event through filters if not apply_filters(doc['data']): return #Add", "\"\"\" #Check result super().eval(a0, a1) #Structure input and add past result kwargs =", "fit of the given information. Parameters ---------- x : float or int, optional", "given target Parameters ---------- target : float Desired ``y`` value Returns ------- x", "#Return computed value if a0: return {'a1' : (target - x0 - a0*x1)/", "input from the data stream and return a boolean value. \"\"\" self.filters.update(filters) def", "accuracy of predictions \"\"\" #Initialize values model_ranking = np.asarray(models) diffs = list() bad_models", "associated data for each filter key. This includes events missing the key entirely,", "models : list List of models to evaluate target : float Actual value", "(target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid position of two mirror system", "indpendent_vars : dict Map independent variables names to keys in the event document", "the reaches the given target Parameters ---------- target : float Desired ``y`` value", "init_guess=None, update_every=1, name=None, average=1): #Create model model = LinearModel(missing='drop', name=name) #Initialize parameters init", "create the requested dependent variable ..note:: For multivariable functions the user may have", "and axes are made. fig : Figure, optional deprecated: use ax instead epoch", "backsolve, because fit is horizontal \" \" after {} data points\".format(len(self.ydata))) return {'x'", "dict Bluesky Document to filter filters : dict Filters are provided in a", "x0, x1, x2): return x0 + a0*x1 + a1*x2 #Create model model =", "event(self, doc): #Run event through filters if not apply_filters(doc['data']): return #Add doc to", "and self.event_count % self.averages == 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data)", ": 0, 'x1': 0, 'x2' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model,", "centroid, alphas, name=None, init_guess=None, update_every=1, average=1): #Simple model of two-bounce system def two_bounce(a0,", "fit super().backsolve(target, a0=a0, a1=a1) #Check for valid request if not any([a0,a1]) or all([a0,a1]):", "init_guess=init, update_every=update_every, average=average) def eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate the predicted outcome", "resp : bool Whether the event passes all provided filters Example ------ ..code::", "Event x : str, optional the name of a data field in an", "tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def __init__(self, y, x=None, *, goal=0.0,", ": float the target pixel tolerance : float, optional the tolerance for the", "legend of the plot. The legend will always show the scan_id followed by", "float Y value as determined by current linear fit \"\"\" #Check result super().eval(**kwargs)", "i.e update on every new event \"\"\" def __init__(self, centroid, alphas, name=None, init_guess=None,", "the model. If set to None, the model will only be computed at", "the independent variables that create the requested dependent variable ..note:: For multivariable functions", "doc): self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc): super().event(doc)", "List of models sorted by accuracy of predictions \"\"\" #Initialize values model_ranking =", "np.mean([d['data'][key] for d in self._avg_cache]) #Send to callback super().event(doc) #Clear cache self._avg_cache.clear() def", "additional filters Parameters ---------- filters : dict Filters are provided in a dictionary", "on every new event \"\"\" def __init__(self, y, x, init_guess=None, update_every=1, name=None, average=1):", "be given as the field name in the event document Returns ------- estimate", "list() #Calculate error of each model for model in models: try: estimate =", "filters if not apply_filters(doc['data']): return #Add doc to average cache self._avg_cache.append(doc) #Check we", "last events are plotted # Only necessary when we are grouping the points", "backsolve(self, target, **kwargs): \"\"\" Use the most recent fit to find the independent", ": np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\" Find", "of the given information. Parameters ---------- x : float or int, optional Independent", ": 4 < x < 6}) \"\"\" resp = [] filters = filters", "point is rendered as they come, otherwise the graph will update every ```averages```", "are passed through to ``Axes.plot``. Notes ----- If your figure blocks the main", "= list() bad_models = list() #Calculate error of each model for model in", "Fix the first mirror in the system a1 : float, optional Fix the", "call `plt.ion()` in your IPython session. Examples -------- >>> my_plotter = LivePlotWithGoals('det', 'motor',", ".update_fit()\") class LinearFit(LiveBuild): \"\"\" Model to fit a linear relationship between a single", "variable x: str Keyword in the event document that reports the independent variable", "we have a fit super().backsolve(target, **kwargs) #Gather line information (m, b) = (self.result.values['slope'],", "the data stream and return a boolean value. drop_missing : bool, optional Only", "= list() @property def name(self): \"\"\" Name of the model \"\"\" return self.model.name", "axis and a depended variable Parameters ---------- y : str Keyword in the", ": dict, optional Initialization guess for the linear fit, available keys are be", "missing information except KeyError: resp.append(not drop_missing) #Handle improper filter except Exception as e:", "'time' If None, use the Event's sequence number. Special case: If the Event's", "and inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string", "can be removed with an update to Bluesky Issue #684 doc['seq_num'] = len(self.ydata)", "- x0 - a0*x1)/ x2, 'a0' : a0} else: return {'a0' : (target", "the centroid as predicted by the current model fit \"\"\" #Check result super().eval(a0,", "a point based on the current fit of the model. Reimplemented by subclasses", "{'slope' : 0, 'intercept' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y,", "float or int, optional Independent variable to evaluate linear model kwargs : The", "(target - x0 - a1*x2)/ x1, 'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\" Build", "event document that reports the dependent variable x: str Keyword in the event", "model = lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop') #Initialize parameters init = {'x0'", "'time', that takes precedence over the standard 'seq_num' and 'time' recorded in every", "Parameters ---------- models : list List of models to evaluate target : float", "that reports the independent variable init_guess : dict, optional Initialization guess for the", "the data stream and return a boolean value. \"\"\" self.filters.update(filters) def event(self, doc):", "use the Event's sequence number. Special case: If the Event's data includes a", "variable can also be given as the field name in the event document", "doc): # Ensure that the last events are plotted # Only necessary when", "e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate from model {}\" \"\".format(model.name)) logger.debug(e) #Rank", "to find the independent variables that create the requested dependent variable ..note:: For", "first mirror a1 : float Pitch of the second mirror Returns ------- centroid", "\"\"\" self.filters.update(filters) def event(self, doc): #Run event through filters if not apply_filters(doc['data']): return", "is horizontal \" \" after {} data points\".format(len(self.ydata))) return {'x' : (target-b)/m} class", "if m == 0 and b != target: raise ValueError(\"Unable to backsolve, because", "try: #Check iterables for nan and inf if isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])):", "\" \" after {} data points\".format(len(self.ydata))) return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\"", "filters=None, drop_missing=True): \"\"\" Filter an event document Parameters ---------- doc : dict Bluesky", "pixel tolerance : float, optional the tolerance for the pixel averages : float,", "a boolean value. drop_missing : bool, optional Only include documents who have associated", "Reimplemented by subclasses \"\"\" logger.debug(\"Evaluating model {} with args : {}, kwargs {}\"", "super().eval(a0, a1) #Structure input and add past result kwargs = {'a0' : np.asarray(a0),", "RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate from model {}\" \"\".format(model.name))", "LinearFit(LiveBuild): \"\"\" Model to fit a linear relationship between a single variable axis", "configuration to reach a certain pixel value Because this is a multivariable function", "{'a' : lambda x : x > 0, 'c' : lambda x :", "and return a boolean value. \"\"\" self.filters.update(filters) def event(self, doc): #Run event through", "name=None, average=1): #Create model model = LinearModel(missing='drop', name=name) #Initialize parameters init = {'slope'", "None and self.event_count % self.averages == 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal =", "reporting Inf. Returns ------- resp : bool Whether the event passes all provided", "model in models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value", "data includes a key named 'seq_num' or 'time', that takes precedence over the", "self.legend_title = None self.goal = goal self.tolerance = tolerance self.averages = averages self.event_count", "the model will only be computed at the end of the run. By", "drop_missing=True): \"\"\" Filter an event document Parameters ---------- doc : dict Bluesky Document", "(default) the name is the same as the model function update_every : int", "None else self.averages+1 if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r')", "or all([a0,a1]): raise ValueError(\"Exactly one of the mirror positions \"\\ \"must be specified", "an Event, or 'seq_num' or 'time' If None, use the Event's sequence number.", "or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string entries for nan and inf elif", "optional Fix the first mirror in the system a1 : float, optional Fix", "init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional keys", "float Desired ``y`` value Returns ------- x : dict Variable name and floating", "#Add doc to average cache self._avg_cache.append(doc) #Check we have the right number of", "and floating value \"\"\" #Make sure we have a fit super().backsolve(target, **kwargs) #Gather", "models who failed to make an estimate return [model for model in model_ranking", "of a data field in an Event x : str, optional the name", "accuracy of their prediction Parameters ---------- models : list List of models to", ": (target - x0 - a1*x2)/ x1, 'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\"", "trying to scan with this callback, call `plt.ion()` in your IPython session. Examples", "Rank a list of models based on the accuracy of their prediction Parameters", "Returns ------- angles : dict Dictionary with the variable mirror key and solvable", ": tuple, optional passed to Axes.set_xlim ylim : tuple, optional passed to Axes.set_ylim", "standard 'seq_num' and 'time' recorded in every Event. goal : float the target", "average=average) def eval(self, **kwargs): \"\"\" Evaluate the predicted outcome based on the most", "LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## # Module # ##########", "or None, optional Update rate of the model. If set to None, the", "4 < x < 6}) \"\"\" resp = [] filters = filters or", "Figure, optional deprecated: use ax instead epoch : {'run', 'unix'}, optional If 'run'", "x}, init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs): \"\"\" Evaluate the predicted outcome based", "show the scan_id followed by a colon (\"1: \"). Each xlim : tuple,", "and ``intercept`` name : optional , str Name for the contained model. When", "variable init_guess : dict, optional Initialization guess for the linear fit, available keys", "have a fit super().backsolve(target, a0=a0, a1=a1) #Check for valid request if not any([a0,a1])", "isiterable(doc[key]): if any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string entries for nan", "'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a function that updates a plot", "as the model function update_every : int or None, optional Update rate of", "fit super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs): \"\"\" Evaluate", "additional keys self.average = average self.filters = filters or {} self.drop_missing = drop_missing", "optional Initialization guess for the linear fit, available keys are be ``x0``, ``x1``,", "backsolve(self, target, **kwargs): \"\"\" Find the ``x`` position that solves the reaches the", "< x < 6}) \"\"\" resp = [] filters = filters or dict()", "or int, optional Independent variable to evaluate linear model kwargs : The value", "doc['seq_num'] = len(self.ydata) +1 #Rewrite document with averages for key in self.field_names: doc['data'][key]", "name=None, init_guess=None, update_every=1, average=1): #Simple model of two-bounce system def two_bounce(a0, a1, x0,", "Only include documents who have associated data for each filter key. This includes", "len(self.ydata) +1 #Rewrite document with averages for key in self.field_names: doc['data'][key] = np.mean([d['data'][key]", "dependent variable ..note:: For multivariable functions the user may have to specify which", "the contained model. When None (default) the name is the same as the", "axes are made. fig : Figure, optional deprecated: use ax instead epoch :", "a1 : float Pitch of the second mirror Returns ------- centroid : float", "for the target\") #Gather fit information (x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2'])", "dict Variable name and floating value \"\"\" #Make sure we have a fit", "lambda x : x > 0, 'c' : lambda x : 4 <", "of models to evaluate target : float Actual value of target kwargs :", "#Summarize return all(resp) def rank_models(models, target, **kwargs): \"\"\" Rank a list of models", "np.asarray(models) diffs = list() bad_models = list() #Calculate error of each model for", "dict, optional Initial guesses for other values if expected update_every : int or", "'unix', t=0 is 1 Jan 1970 (\"the UNIX epoch\"). Default is 'run'. kwargs", "\"\"\" Estimate a point based on the current fit of the model. Reimplemented", "self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc): # Ensure that the last events are", ": dict Variable name and floating value \"\"\" #Make sure we have a", "---------- target : float Desired pixel location a0 : float, optional Fix the", "``slope`` and ``intercept`` name : optional , str Name for the contained model.", "the pixel averages : float, optional The number of images to average. If", "Event, or 'seq_num' or 'time' If None, use the Event's sequence number. Special", "= [] filters = filters or dict() #Iterate through filters for key, func", "#Initialize parameters init = {'x0' : 0, 'x1': 0, 'x2' : 0} if", "result kwargs = {'a0' : np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return", "value as determined by current linear fit \"\"\" #Check result super().eval(**kwargs) #Standard x", "self.result.values['intercept']) #Return x position if m == 0 and b != target: raise", "MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid position of two mirror system Parameters ----------", "points. legend_keys : list, optional The list of keys to extract from the", "independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every) #Add additional", "evaluate linear model kwargs : The value for the indepenedent variable can also", "= [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc): super().event(doc) self.event_count +=", "for d in self._avg_cache]) #Send to callback super().event(doc) #Clear cache self._avg_cache.clear() def eval(self,", "= average self.filters = filters or {} self.drop_missing = drop_missing self._avg_cache = list()", "of images to average. If None is specified, every point is rendered as", "keys self.average = average self.filters = filters or {} self.drop_missing = drop_missing self._avg_cache", "or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance = 2 if self.averages is", "are plotted # Only necessary when we are grouping the points if self.averages", "determined by current linear fit \"\"\" #Check result super().eval(**kwargs) #Standard x setup if", ": tuple of str Tuple fo the mirror pitches (a1, a2) init_guess :", "or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add past result kwargs = {'x'", "x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value if a0: return {'a1'", "any(np.isnan(doc[key])) or any(np.isinf(doc[key])): resp.append(not drop_missing) continue #Check string entries for nan and inf", "kwargs {}\" \"\".format(self.name, target, kwargs)) if not self.result: raise RuntimeError(\"Can not backsolve without", "super().__init__(y, x, **kwargs) self.legend_title = None self.goal = goal self.tolerance = tolerance self.averages", "my_plotter) \"\"\" def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0, averages=None, **kwargs): super().__init__(y, x,", "mirror key and solvable value \"\"\" #Make sure we have a fit super().backsolve(target,", "when you are trying to scan with this callback, call `plt.ion()` in your", "filter resp.append(bool(func(doc[key]))) #Handle missing information except KeyError: resp.append(not drop_missing) #Handle improper filter except", "exception \"{}\"'\\ ''.format(key, e)) #Summarize return all(resp) def rank_models(models, target, **kwargs): \"\"\" Rank", "doc): #Run event through filters if not apply_filters(doc['data']): return #Add doc to average", "#Structure input add past result kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction", "estimate)) except RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate from model", "every ```averages``` points. legend_keys : list, optional The list of keys to extract", "[] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def event(self, doc): super().event(doc) self.event_count += 1", "to evaluate target : float Actual value of target kwargs : All of", "evaluate target : float Actual value of target kwargs : All of the", "evaluate Returns ------- model_ranking : list List of models sorted by accuracy of", "1970 (\"the UNIX epoch\"). Default is 'run'. kwargs : All additional keyword arguments", "from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot) ########## # Module # ########## from", "\"\"\" Specialized Callbacks for Skywalker \"\"\" ############ # Standard # ############ import logging", "ylim : tuple, optional passed to Axes.set_ylim ax : Axes, optional matplotib Axes;", "end of the run. By default, this is set to 1 i.e update", "deprecated: use ax instead epoch : {'run', 'unix'}, optional If 'run' t=0 is", "with averages for key in self.field_names: doc['data'][key] = np.mean([d['data'][key] for d in self._avg_cache])", "guess for the linear fit, available keys are be ``x0``, ``x1``, and ``x2``", "self.averages = averages self.event_count = 0 def start(self, doc): self.goal_data = [] self.goal_axis,", "update to Bluesky Issue #684 doc['seq_num'] = len(self.ydata) +1 #Rewrite document with averages", "optional Update rate of the model. If set to None, the model will", "with an update to Bluesky Issue #684 doc['seq_num'] = len(self.ydata) +1 #Rewrite document", "RuntimeError(\"Can not evaluate without a saved fit, \"\\ \"use .update_fit()\") def backsolve(self, target,", "keys the models will need to evaluate Returns ------- model_ranking : list List", "as e: logger.critical('Filter associated with event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize", "the last events are plotted # Only necessary when we are grouping the", "solved for. Parameters ---------- target : float Desired pixel location a0 : float,", "a single input from the data stream and return a boolean value. drop_missing", "value Because this is a multivariable function you must fix one of the", "model_ranking = model_ranking[np.argsort(diffs)] #Remove models who failed to make an estimate return [model", "\"nan\" == doc[key].lower(): resp.append(not drop_missing) continue #Handle all other types else: if np.isnan(doc[key])", "########## from .utils.argutils import isiterable logger = logging.getLogger(__name__) def apply_filters(doc, filters=None, drop_missing=True): \"\"\"", "main thread when you are trying to scan with this callback, call `plt.ion()`", "reaches the given target Parameters ---------- target : float Desired ``y`` value Returns", "take a single input from the data stream and return a boolean value.", "recorded in the RunStart document. If 'unix', t=0 is 1 Jan 1970 (\"the", "!= target: raise ValueError(\"Unable to backsolve, because fit is horizontal \" \" after", "model_ranking if model not in bad_models] class LiveBuild(LiveFit): \"\"\" Base class for live", "def update_plot(self, force=False): if self.averages is None or (self.averages is not None and", "the dependent variable x: str Keyword in the event document that reports the", "a0: return {'a1' : (target - x0 - a0*x1)/ x2, 'a0' : a0}", "includes a key named 'seq_num' or 'time', that takes precedence over the standard", "fit, \"\\ \"use .update_fit()\") def backsolve(self, target, **kwargs): \"\"\" Use the most recent", ": optional , str Name for the contained model. When None (default) the", "..code:: apply_filters(doc, filters = {'a' : lambda x : x > 0, 'c'", "drop_missing) continue #Handle all other types else: if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing)", "the most recent fit to find the independent variables that create the requested", "is None else self.averages+1 if force: distance -= 1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2,", "logger.critical('Filter associated with event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize return all(resp)", "x = kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must", "a boolean value. \"\"\" self.filters.update(filters) def event(self, doc): #Run event through filters if", "target : float Desired ``y`` value Returns ------- x : dict Variable name", "which variable to solve for, and which to keep fixed \"\"\" logger.debug(\"Backsolving model", "key entirely, reporting NaN or reporting Inf. Returns ------- resp : bool Whether", "model_ranking = np.asarray(models) diffs = list() bad_models = list() #Calculate error of each", "from the RunStart document and format in the legend of the plot. The", "eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate the predicted outcome based on the most", "specify which variable to solve for, and which to keep fixed \"\"\" logger.debug(\"Backsolving", "Exception as e: logger.critical('Filter associated with event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key, e))", "1 Jan 1970 (\"the UNIX epoch\"). Default is 'run'. kwargs : All additional", "if self.averages is None or (self.averages is not None and self.event_count % self.averages", "values model_ranking = np.asarray(models) diffs = list() bad_models = list() #Calculate error of", "number of shots to average if len(self._avg_cache) >= self.average: #Overwrite event number #This", "Document to filter filters : dict Filters are provided in a dictionary of", "Axes; if none specified, new figure and axes are made. fig : Figure,", "\"\"\" Model to fit centroid position of two mirror system Parameters ---------- centroid", "auto=True) def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc): # Ensure", "doc[key].lower(): resp.append(not drop_missing) continue #Handle all other types else: if np.isnan(doc[key]) or np.isinf(doc[key]):", "= {'slope' : 0, 'intercept' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model,", "\"\"\" def __init__(self, model, y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y,", "input add past result kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction return", "for model in models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a", "self.result.values['x2']) #Return computed value if a0: return {'a1' : (target - x0 -", "{}'\\ 'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize return all(resp) def rank_models(models, target, **kwargs):", "**kwargs): \"\"\" Rank a list of models based on the accuracy of their", "optional The list of keys to extract from the RunStart document and format", "the predicted outcome based on the most recent fit of the given information.", "guess for the linear fit, available keys are ``slope`` and ``intercept`` name :", "import logging import simplejson as sjson from pathlib import Path ############### # Third", "x < 6}) \"\"\" resp = [] filters = filters or dict() #Iterate", "field in an Event x : str, optional the name of a data", "keys to extract from the RunStart document and format in the legend of", ": dict Filters are provided in a dictionary of key / callable pairs", "to backsolve for the target\") #Gather fit information (x0, x1, x2) = (self.result.values['x0'],", "this is set to 1 i.e update on every new event \"\"\" def", "self.result.values['x1'], self.result.values['x2']) #Return computed value if a0: return {'a1' : (target - x0", "the scan_id followed by a colon (\"1: \"). Each xlim : tuple, optional", "to evaluate linear model kwargs : The value for the indepenedent variable can", "a data field in an Event, or 'seq_num' or 'time' If None, use", "without a saved fit, \"\\ \"use .update_fit()\") def backsolve(self, target, **kwargs): \"\"\" Use", "ValueError(\"Must supply keyword `x` or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add past", "init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y, {'x': x}, init_guess=init, update_every=update_every, average=average) def eval(self,", "1 self.ax.fill_between(self.x_data[-distance:], goal[-distance:]-self.tolerance, goal[-distance:]+self.tolerance, alpha=0.2, facecolor='r') super().update_plot() self.ax.set_xlim(left=0, right=None, auto=True) def update_caches(self, x,", ": (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid position of two mirror", "'run' t=0 is the time recorded in the RunStart document. If 'unix', t=0", "that create the requested dependent variable ..note:: For multivariable functions the user may", "IPython session. Examples -------- >>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample'])", "the main thread when you are trying to scan with this callback, call", "else: return {'a0' : (target - x0 - a1*x2)/ x1, 'a1' : a1}", ">= self.average: #Overwrite event number #This can be removed with an update to", "to average. If None is specified, every point is rendered as they come,", "'a1' : alphas[1]}, init_guess=init, update_every=update_every, average=average) def eval(self, a0=0., a1=0., **kwargs): \"\"\" Evaluate", "= {'a' : lambda x : x > 0, 'c' : lambda x", "1 i.e update on every new event \"\"\" def __init__(self, centroid, alphas, name=None,", "\"must be specified to backsolve for the target\") #Gather fit information (x0, x1,", "depended variable Parameters ---------- y : str Keyword in the event document that", "single input from the data stream and return a boolean value. \"\"\" self.filters.update(filters)", "float Pitch of the first mirror a1 : float Pitch of the second", "thread when you are trying to scan with this callback, call `plt.ion()` in", "shots to average if len(self._avg_cache) >= self.average: #Overwrite event number #This can be", "return x0 + a0*x1 + a1*x2 #Create model model = lmfit.Model(two_bounce, independent_vars =", "keys are be ``x0``, ``x1``, and ``x2`` name : optional , str Name", "the RunStart document and format in the legend of the plot. The legend", "---------- models : list List of models to evaluate target : float Actual", "x: str Keyword in the event document that reports the independent variable init_guess", "\"\".format(model.name, estimate)) except RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate from", "other one is solved for. Parameters ---------- target : float Desired pixel location", "past result kwargs = {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def", "as sjson from pathlib import Path ############### # Third Party # ############### import", "self.result: raise RuntimeError(\"Can not backsolve without a saved fit, \"\\ \"use .update_fit()\") class", "diffs.append(np.inf) logger.debug(\"Unable to yield estimate from model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking", "of the keys the models will need to evaluate Returns ------- model_ranking :", "model, y, independent_vars, init_guess=None, update_every=1, filters=None, drop_missing=True, average=1): super().__init__(model, y, independent_vars, init_guess=init_guess, update_every=update_every)", "doc['data'][key] = np.mean([d['data'][key] for d in self._avg_cache]) #Send to callback super().event(doc) #Clear cache", "to specify which variable to solve for, and which to keep fixed \"\"\"", "to 1 i.e update on every new event \"\"\" def __init__(self, y, x,", "Returns ------- centroid : float Position of the centroid as predicted by the", "RunStart document and format in the legend of the plot. The legend will", "fit centroid position of two mirror system Parameters ---------- centroid : str Keyword", "install_filters(self, filters): \"\"\" Install additional filters Parameters ---------- filters : dict Filters are", "to ``Axes.plot``. Notes ----- If your figure blocks the main thread when you", "event_key {}'\\ 'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize return all(resp) def rank_models(models, target,", "case: If the Event's data includes a key named 'seq_num' or 'time', that", "new figure and axes are made. fig : Figure, optional deprecated: use ax", "diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted a value of {}\" \"\".format(model.name, estimate)) except RuntimeError as", "keys are ``slope`` and ``intercept`` name : optional , str Name for the", "When None (default) the name is the same as the model function update_every", "= filters or {} self.drop_missing = drop_missing self._avg_cache = list() @property def name(self):", "colon (\"1: \"). Each xlim : tuple, optional passed to Axes.set_xlim ylim :", "#Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs): \"\"\" Find the ``x`` position", "all the keys associated with the fit \"\"\" return [self.y] + list(self.independent_vars.values()) def", "str the name of a data field in an Event x : str,", "that the last events are plotted # Only necessary when we are grouping", "x : str, optional the name of a data field in an Event,", "If None is specified, every point is rendered as they come, otherwise the", "Actual value of target kwargs : All of the keys the models will", "init_guess=None, update_every=1, average=1): #Simple model of two-bounce system def two_bounce(a0, a1, x0, x1,", "set to 1 i.e update on every new event \"\"\" def __init__(self, y,", "logger.debug(\"Evaluating model {} with args : {}, kwargs {}\" \"\".format(self.name, args, kwargs)) if", "default, this is set to 1 i.e update on every new event \"\"\"", ": float, optional the tolerance for the pixel averages : float, optional The", "of the model \"\"\" return self.model.name @property def field_names(self): \"\"\" Name of all", "the event passes all provided filters Example ------ ..code:: apply_filters(doc, filters = {'a'", "on every new event \"\"\" def __init__(self, model, y, independent_vars, init_guess=None, update_every=1, filters=None,", "a saved fit, \"\\ \"use .update_fit()\") def backsolve(self, target, **kwargs): \"\"\" Use the", "filters = {'a' : lambda x : x > 0, 'c' : lambda", "the ``x`` position that solves the reaches the given target Parameters ---------- target", "float the target pixel tolerance : float, optional the tolerance for the pixel", "e)) #Summarize return all(resp) def rank_models(models, target, **kwargs): \"\"\" Rank a list of", "None or (self.averages is not None and self.event_count % self.averages == 0) or", "def name(self): \"\"\" Name of the model \"\"\" return self.model.name @property def field_names(self):", "------- x : dict Variable name and floating value \"\"\" #Make sure we", "models will need to evaluate Returns ------- model_ranking : list List of models", "goal : float the target pixel tolerance : float, optional the tolerance for", "# ############ import logging import simplejson as sjson from pathlib import Path ###############", "lmfit import pandas as pd import numpy as np from lmfit.models import LinearModel", "resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information except KeyError: resp.append(not drop_missing)", "+ a1*x2 #Create model model = lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'], missing='drop') #Initialize", "str Tuple fo the mirror pitches (a1, a2) init_guess : dict, optional Initialization", "model for model in models: try: estimate = model.eval(**kwargs) diffs.append(np.abs(estimate-target)) logger.debug(\"Model {} predicted", ": float or int, optional Independent variable to evaluate linear model kwargs :", "############### import lmfit import pandas as pd import numpy as np from lmfit.models", "def event(self, doc): #Run event through filters if not apply_filters(doc['data']): return #Add doc", "if np.isnan(doc[key]) or np.isinf(doc[key]): resp.append(not drop_missing) continue #Evaluate filter resp.append(bool(func(doc[key]))) #Handle missing information", "ax instead epoch : {'run', 'unix'}, optional If 'run' t=0 is the time", "doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not drop_missing) continue #Handle all other types else:", "# Ensure that the last events are plotted # Only necessary when we", "except KeyError: resp.append(not drop_missing) #Handle improper filter except Exception as e: logger.critical('Filter associated", "this callback, call `plt.ion()` in your IPython session. Examples -------- >>> my_plotter =", "field_names(self): \"\"\" Name of all the keys associated with the fit \"\"\" return", "__init__(self, y, x, init_guess=None, update_every=1, name=None, average=1): #Create model model = LinearModel(missing='drop', name=name)", "def update_caches(self, x, y): self.goal_data.append(self.goal) super().update_caches(x, y) def stop(self, doc): # Ensure that", "'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def __init__(self, y, x=None,", "+ a0*x1 + a1*x2 #Create model model = lmfit.Model(two_bounce, independent_vars = ['a0', 'a1'],", "a value of {}\" \"\".format(model.name, estimate)) except RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable", "not self.result: raise RuntimeError(\"Can not evaluate without a saved fit, \"\\ \"use .update_fit()\")", "isinstance(doc[key], str): if \"inf\" == doc[key].lower() or \"nan\" == doc[key].lower(): resp.append(not drop_missing) continue", "run. By default, this is set to 1 i.e update on every new", "a1*x2)/ x1, 'a1' : a1} class LivePlotWithGoal(LivePlot): \"\"\" Build a function that updates", "np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, a0=None,", "who failed to make an estimate return [model for model in model_ranking if", "Callbacks for Skywalker \"\"\" ############ # Standard # ############ import logging import simplejson", "filter except Exception as e: logger.critical('Filter associated with event_key {}'\\ 'reported exception \"{}\"'\\", "\"\"\" #Initialize values model_ranking = np.asarray(models) diffs = list() bad_models = list() #Calculate", "which to keep fixed \"\"\" logger.debug(\"Backsolving model {} for target {} and kwargs", "kwargs.get('x'): x = kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise", "'time' recorded in every Event. goal : float the target pixel tolerance :", "to fit a linear relationship between a single variable axis and a depended", "only be computed at the end of the run. By default, this is", "0 def start(self, doc): self.goal_data = [] self.goal_axis, = self.ax.plot([],[],'r--', label='Target') super().start(doc) def", "'intercept' : 0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, y, {'x': x}, init_guess=init,", "self.model.name @property def field_names(self): \"\"\" Name of all the keys associated with the", "---------- target : float Desired ``y`` value Returns ------- x : dict Variable", "not None and self.event_count % self.averages == 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal", "except RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to yield estimate from model {}\"", "\"{}\"'\\ ''.format(key, e)) #Summarize return all(resp) def rank_models(models, target, **kwargs): \"\"\" Rank a", "in the RunStart document. If 'unix', t=0 is 1 Jan 1970 (\"the UNIX", "document with averages for key in self.field_names: doc['data'][key] = np.mean([d['data'][key] for d in", "specified, every point is rendered as they come, otherwise the graph will update", "tolerance=0.0, averages=None, **kwargs): super().__init__(y, x, **kwargs) self.legend_title = None self.goal = goal self.tolerance", "who have associated data for each filter key. This includes events missing the", "#Remove models who failed to make an estimate return [model for model in", "Event's data includes a key named 'seq_num' or 'time', that takes precedence over", "provided filters Example ------ ..code:: apply_filters(doc, filters = {'a' : lambda x :", "linear fit, available keys are be ``x0``, ``x1``, and ``x2`` name : optional", "event through filters if not apply_filters(doc['data']): return #Add doc to average cache self._avg_cache.append(doc)", "to fit centroid position of two mirror system Parameters ---------- centroid : str", "\" after {} data points\".format(len(self.ydata))) return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model", "kwargs : All additional keyword arguments are passed through to ``Axes.plot``. Notes -----", "contained model. When None (default) the name is the same as the model", "= {'x' : np.asarray(x)} kwargs.update(self.result.values) #Return prediction return self.result.model.eval(**kwargs) def backsolve(self, target, **kwargs):", "legend will always show the scan_id followed by a colon (\"1: \"). Each", "the right number of shots to average if len(self._avg_cache) >= self.average: #Overwrite event", ": bool Whether the event passes all provided filters Example ------ ..code:: apply_filters(doc,", "self.averages == 0) or force: self.goal_axis.set_data(self.x_data, self.goal_data) goal = np.asarray(self.goal_data) distance = 2", "that take a single input from the data stream and return a boolean", "by accuracy of predictions \"\"\" #Initialize values model_ranking = np.asarray(models) diffs = list()", "in filters.items(): try: #Check iterables for nan and inf if isiterable(doc[key]): if any(np.isnan(doc[key]))", "Default is 'run'. kwargs : All additional keyword arguments are passed through to", "---------- model : lmfit.Model y: string Key of dependent variable indpendent_vars : dict", "averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def __init__(self, y, x=None, *, goal=0.0, tolerance=0.0,", "= kwargs['x'] elif self.independent_vars['x'] in kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply", "`x` or use fieldname {}\" \"\".format(self.independent_vars['x'])) #Structure input add past result kwargs =", "be ``x0``, ``x1``, and ``x2`` name : optional , str Name for the", "and add past result kwargs = {'a0' : np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values)", "drop_missing) continue #Check string entries for nan and inf elif isinstance(doc[key], str): if", "the mirror pitches (a1, a2) init_guess : dict, optional Initialization guess for the", "solve for, and which to keep fixed \"\"\" logger.debug(\"Backsolving model {} for target", "raise RuntimeError(\"Can not evaluate without a saved fit, \"\\ \"use .update_fit()\") def backsolve(self,", "#Iterate through filters for key, func in filters.items(): try: #Check iterables for nan", "target pixel tolerance : float, optional the tolerance for the pixel averages :", ": Axes, optional matplotib Axes; if none specified, new figure and axes are", "are made. fig : Figure, optional deprecated: use ax instead epoch : {'run',", "---------- filters : dict Filters are provided in a dictionary of key /", "value of {}\" \"\".format(model.name, estimate)) except RuntimeError as e: bad_models.append(model) diffs.append(np.inf) logger.debug(\"Unable to", ": float Desired ``y`` value Returns ------- x : dict Variable name and", "\"\"\" Build a function that updates a plot from a stream of Events.", "key / callable pairs that take a single input from the data stream", "Ensure that the last events are plotted # Only necessary when we are", "my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter) \"\"\" def", "\"\"\" resp = [] filters = filters or dict() #Iterate through filters for", "the linear fit, available keys are be ``x0``, ``x1``, and ``x2`` name :", "#Calculate error of each model for model in models: try: estimate = model.eval(**kwargs)", "in the event document Returns ------- estimate : float Y value as determined", "linear fit, available keys are ``slope`` and ``intercept`` name : optional , str", "Keyword in the event document that reports centroid position alphas : tuple of", ": str the name of a data field in an Event x :", "data field in an Event x : str, optional the name of a", "The number of images to average. If None is specified, every point is", "update every ```averages``` points. legend_keys : list, optional The list of keys to", "tuple, optional passed to Axes.set_ylim ax : Axes, optional matplotib Axes; if none", "update on every new event \"\"\" def __init__(self, y, x, init_guess=None, update_every=1, name=None,", "\"\"\" logger.debug(\"Backsolving model {} for target {} and kwargs {}\" \"\".format(self.name, target, kwargs))", "__init__(self, centroid, alphas, name=None, init_guess=None, update_every=1, average=1): #Simple model of two-bounce system def", "Module # ########## from .utils.argutils import isiterable logger = logging.getLogger(__name__) def apply_filters(doc, filters=None,", "in your IPython session. Examples -------- >>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5,", "target\") #Gather fit information (x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed", "document that reports the dependent variable x: str Keyword in the event document", "mirror in the system Returns ------- angles : dict Dictionary with the variable", "Name of the model \"\"\" return self.model.name @property def field_names(self): \"\"\" Name of", "as predicted by the current model fit \"\"\" #Check result super().eval(a0, a1) #Structure", "Name for the contained model. When None (default) the name is the same", "the target pixel tolerance : float, optional the tolerance for the pixel averages", "or 'time', that takes precedence over the standard 'seq_num' and 'time' recorded in", "-------- >>> my_plotter = LivePlotWithGoals('det', 'motor', goal=10.0, tolerance=1.5, averages=None, legend_keys=['sample']) >>> RE(my_scan, my_plotter)", "raise ValueError(\"Unable to backsolve, because fit is horizontal \" \" after {} data", "``x1``, and ``x2`` name : optional , str Name for the contained model.", "value for the indepenedent variable can also be given as the field name", "target {} and kwargs {}\" \"\".format(self.name, target, kwargs)) if not self.result: raise RuntimeError(\"Can", "fit super().backsolve(target, **kwargs) #Gather line information (m, b) = (self.result.values['slope'], self.result.values['intercept']) #Return x", ".update_fit()\") def backsolve(self, target, **kwargs): \"\"\" Use the most recent fit to find", "``intercept`` name : optional , str Name for the contained model. When None", "func in filters.items(): try: #Check iterables for nan and inf if isiterable(doc[key]): if", "past result kwargs = {'a0' : np.asarray(a0), 'a1' : np.asarray(a1)} kwargs.update(self.result.values) #Return prediction", "'reported exception \"{}\"'\\ ''.format(key, e)) #Summarize return all(resp) def rank_models(models, target, **kwargs): \"\"\"", "This includes events missing the key entirely, reporting NaN or reporting Inf. Returns", "evaluate without a saved fit, \"\\ \"use .update_fit()\") def backsolve(self, target, **kwargs): \"\"\"", "Map independent variables names to keys in the event document stream init_guess: dict,", "0} if init_guess: init.update(init_guess) #Initialize fit super().__init__(model, centroid, independent_vars={'a0' : alphas[0], 'a1' :", "name of a data field in an Event x : str, optional the", "------- model_ranking : list List of models sorted by accuracy of predictions \"\"\"", "this is a multivariable function you must fix one of the mirrors in", "Estimate a point based on the current fit of the model. Reimplemented by", "as np from lmfit.models import LinearModel from bluesky.callbacks import (LiveFit, LiveFitPlot, CallbackBase, LivePlot)", "class for live model building in Skywalker Parameters ---------- model : lmfit.Model y:", "---------- x : float or int, optional Independent variable to evaluate linear model", "{'x': x}, init_guess=init, update_every=update_every, average=average) def eval(self, **kwargs): \"\"\" Evaluate the predicted outcome", "find the independent variables that create the requested dependent variable ..note:: For multivariable", "Because this is a multivariable function you must fix one of the mirrors", "self.average = average self.filters = filters or {} self.drop_missing = drop_missing self._avg_cache =", "mirror system Parameters ---------- centroid : str Keyword in the event document that", "saved fit, \"\\ \"use .update_fit()\") def backsolve(self, target, **kwargs): \"\"\" Use the most", "model : lmfit.Model y: string Key of dependent variable indpendent_vars : dict Map", "#Check we have the right number of shots to average if len(self._avg_cache) >=", "a list of models based on the accuracy of their prediction Parameters ----------", "reports the independent variable init_guess : dict, optional Initialization guess for the linear", "**kwargs): \"\"\" Find the ``x`` position that solves the reaches the given target", "as the field name in the event document Returns ------- estimate : float", "in kwargs.keys(): x = kwargs[self.independent_vars['x']] else: raise ValueError(\"Must supply keyword `x` or use", "to yield estimate from model {}\" \"\".format(model.name)) logger.debug(e) #Rank performances model_ranking = model_ranking[np.argsort(diffs)]", "information (x0, x1, x2) = (self.result.values['x0'], self.result.values['x1'], self.result.values['x2']) #Return computed value if a0:", "\"use .update_fit()\") def backsolve(self, target, **kwargs): \"\"\" Use the most recent fit to", "variable Parameters ---------- y : str Keyword in the event document that reports", "through to ``Axes.plot``. Notes ----- If your figure blocks the main thread when", "first mirror in the system a1 : float, optional Fix the second mirror", "must fix one of the mirrors in place, while the other one is", "points\".format(len(self.ydata))) return {'x' : (target-b)/m} class MultiPitchFit(LiveBuild): \"\"\" Model to fit centroid position", "the name is the same as the model function update_every : int or", "one is solved for. Parameters ---------- target : float Desired pixel location a0" ]
[ "len(sys.argv) == 1: print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3'", "'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N = 4 inf, outf, outdir = sys.argv[1:N]", "if len(sys.argv) == 1: print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2',", "'w') as fout: with open(inf, 'r') as fin: for l in fin: ls", "'filter1', 'filter2', 'filter3' N = 4 inf, outf, outdir = sys.argv[1:N] fltrs =", "cmd = ' ' logProc.logProc(outf, outdir, cmd, 'started') with open(outf, 'w') as fout:", "logProc.logProc(outf, outdir, cmd, 'started') with open(outf, 'w') as fout: with open(inf, 'r') as", "fin: for l in fin: ls = l.split() if ls[3] in fltrs: fout.write('\\t'.join(ls)+'\\n')", "in fin: ls = l.split() if ls[3] in fltrs: fout.write('\\t'.join(ls)+'\\n') logProc.logProc(outf, outdir, cmd,", "'logdir', 'filter1', 'filter2', 'filter3' N = 4 inf, outf, outdir = sys.argv[1:N] fltrs", "N = 4 inf, outf, outdir = sys.argv[1:N] fltrs = sys.argv[N:] print fltrs", "sys from sets import Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) == 1:", "fin: ls = l.split() if ls[3] in fltrs: fout.write('\\t'.join(ls)+'\\n') logProc.logProc(outf, outdir, cmd, 'finished')", "as fin: for l in fin: ls = l.split() if ls[3] in fltrs:", "outf, outdir = sys.argv[1:N] fltrs = sys.argv[N:] print fltrs cmd = ' '", "with open(inf, 'r') as fin: for l in fin: ls = l.split() if", "'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N = 4 inf,", "== 1: print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N", "'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N = 4 inf, outf, outdir =", "= 4 inf, outf, outdir = sys.argv[1:N] fltrs = sys.argv[N:] print fltrs cmd", "import Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) == 1: print 'Usage:' print", "= sys.argv[1:N] fltrs = sys.argv[N:] print fltrs cmd = ' ' logProc.logProc(outf, outdir,", "' logProc.logProc(outf, outdir, cmd, 'started') with open(outf, 'w') as fout: with open(inf, 'r')", "import sys from sets import Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) ==", "''' ''' import sys from sets import Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if", "import logProc if len(sys.argv) == 1: print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir',", "1: print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N =", "4 inf, outf, outdir = sys.argv[1:N] fltrs = sys.argv[N:] print fltrs cmd =", "cmd, 'started') with open(outf, 'w') as fout: with open(inf, 'r') as fin: for", "'r') as fin: for l in fin: ls = l.split() if ls[3] in", "sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) == 1: print 'Usage:' print sys.argv[0], 'input.bed',", "outdir, cmd, 'started') with open(outf, 'w') as fout: with open(inf, 'r') as fin:", "= ' ' logProc.logProc(outf, outdir, cmd, 'started') with open(outf, 'w') as fout: with", "sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N = 4 inf, outf, outdir", "'/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) == 1: print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed',", "outdir = sys.argv[1:N] fltrs = sys.argv[N:] print fltrs cmd = ' ' logProc.logProc(outf,", "print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N = 4", "fout: with open(inf, 'r') as fin: for l in fin: ls = l.split()", "sys.argv[1:N] fltrs = sys.argv[N:] print fltrs cmd = ' ' logProc.logProc(outf, outdir, cmd,", "with open(outf, 'w') as fout: with open(inf, 'r') as fin: for l in", "sys.argv[N:] print fltrs cmd = ' ' logProc.logProc(outf, outdir, cmd, 'started') with open(outf,", "fltrs cmd = ' ' logProc.logProc(outf, outdir, cmd, 'started') with open(outf, 'w') as", "''' import sys from sets import Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv)", "open(inf, 'r') as fin: for l in fin: ls = l.split() if ls[3]", "= sys.argv[N:] print fltrs cmd = ' ' logProc.logProc(outf, outdir, cmd, 'started') with", "'started') with open(outf, 'w') as fout: with open(inf, 'r') as fin: for l", "l in fin: ls = l.split() if ls[3] in fltrs: fout.write('\\t'.join(ls)+'\\n') logProc.logProc(outf, outdir,", "open(outf, 'w') as fout: with open(inf, 'r') as fin: for l in fin:", "inf, outf, outdir = sys.argv[1:N] fltrs = sys.argv[N:] print fltrs cmd = '", "as fout: with open(inf, 'r') as fin: for l in fin: ls =", "for l in fin: ls = l.split() if ls[3] in fltrs: fout.write('\\t'.join(ls)+'\\n') logProc.logProc(outf,", "'filter2', 'filter3' N = 4 inf, outf, outdir = sys.argv[1:N] fltrs = sys.argv[N:]", "logProc if len(sys.argv) == 1: print 'Usage:' print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1',", "from sets import Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) == 1: print", "' ' logProc.logProc(outf, outdir, cmd, 'started') with open(outf, 'w') as fout: with open(inf,", "sets import Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) == 1: print 'Usage:'", "Set sys.path.insert(0, '/nethome/asalomatov/projects/ppln') import logProc if len(sys.argv) == 1: print 'Usage:' print sys.argv[0],", "fltrs = sys.argv[N:] print fltrs cmd = ' ' logProc.logProc(outf, outdir, cmd, 'started')", "print sys.argv[0], 'input.bed', 'output.bed', 'logdir', 'filter1', 'filter2', 'filter3' N = 4 inf, outf,", "'filter3' N = 4 inf, outf, outdir = sys.argv[1:N] fltrs = sys.argv[N:] print", "print fltrs cmd = ' ' logProc.logProc(outf, outdir, cmd, 'started') with open(outf, 'w')" ]
[ "vnffg['description'] except KeyError: vnffg_desc_str = \"\" if not item: # Add an item", "Unless required by applicable law or agreed to in writing, software # distributed", "return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) def action(self, request, obj_id): api.tacker.delete_vnffg(request,", "openstack_dashboard import policy from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object):", "status of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete Failed\")), (\"update_in_progress\",", "obj in cls.VNFFGLIST_P: if obj.id == vnffg_id: return obj @classmethod def add_item(cls, item):", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "vnffg_instance and not item: # TODO(NAME) - bail with error return None if", "the License. You may obtain # a copy of the License at #", "stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current", "stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "get_data(self, request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not", "messages from horizon import tables from openstack_dashboard import policy from tacker_horizon.openstack_dashboard import api", "clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax =", "tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def __init__(self, id, name, description, status): self.id =", "_ from django.utils.translation import ungettext_lazy from horizon import messages from horizon import tables", "pgettext_lazy(\"current status of stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume", "api from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def __init__(self, id, name, description, status):", "stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status", "API failure, just keep the current state return item vnffg = vnffg_instance['vnffg'] try:", "status of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\",", "status of stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume Complete\")),", "(\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\",", "(\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status of stack\",", "with the License. You may obtain # a copy of the License at", "not item: # Add an item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id'])", "Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of", "pgettext_lazy(\"current status of stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check", "u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of", "from horizon import tables from openstack_dashboard import policy from tacker_horizon.openstack_dashboard import api from", "VNFFG\") classes = (\"ajax-modal\",) icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES", "stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status", "import NotFound class VNFFGManagerItem(object): def __init__(self, id, name, description, status): self.id = id", "status of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\",", "'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id)", ") @staticmethod def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) def", "True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init", "entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str item.status =", "from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def __init__(self, id,", "@classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction):", "None if not vnffg_instance and item: # API failure, just keep the current", "use this file except in compliance with the License. You may obtain #", "= (\"ajax-modal\",) icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = (", "VNFFGs\", count ) def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name =", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of", "Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\",", "u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of", "tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True,", "implied. See the # License for the specific language governing permissions and limitations", "stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current", "(\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend", "item except (Http404, NotFound): raise Http404 except Exception as e: messages.error(request, e) raise", "Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\",", "of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update In Progress\")), (\"update_complete\",", "stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current", "In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of", "Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\",", "u\"Terminate VNFFGs\", count ) def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name", "of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current", ") def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name", "== vnffg_id: return obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P", "stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current", "you may # not use this file except in compliance with the License.", "pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot In", "cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name = \"myfilter\" class", "of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current", "of stack\", u\"Check Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description =", "Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\",", "KIND, either express or implied. See the # License for the specific language", "item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str item.status = vnffg['status']", "language governing permissions and limitations # under the License. from django.http import Http404", "of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current", "verbose_name = _(\"VNFFGManager\") status_columns = [\"status\", ] row_class = VNFFGUpdateRow table_actions = (DeployVNFFG,", "classes = (\"ajax-modal\",) icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES =", "status of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update Failed\")), (\"rollback_in_progress\",", "status of stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")),", "status of stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check Complete\")),", "file except in compliance with the License. You may obtain # a copy", "Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of", "obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes = (\"ajax-modal\",)", "and limitations # under the License. from django.http import Http404 from django.utils.translation import", "limitations # under the License. from django.http import Http404 from django.utils.translation import pgettext_lazy", "= \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns = [\"status\", ] row_class = VNFFGUpdateRow table_actions", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend In Progress\")), (\"suspend_complete\",", "from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def __init__(self, id, name, description, status): self.id", "status of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create Failed\")), (\"delete_in_progress\",", "status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns = [\"status\", ]", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from django.utils.translation import", "(\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\",", "pgettext_lazy(\"current status of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create In", "if obj.id == vnffg_id: return obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def", "vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str item.status = vnffg['status'] return item except (Http404,", "(\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import messages", "@staticmethod def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) def action(self,", "u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of", "try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and not", "pgettext_lazy(\"current status of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update Failed\")),", "name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\",", "vnffg_desc_str item.status = vnffg['status'] return item except (Http404, NotFound): raise Http404 except Exception", "\"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns = [\"status\", ] row_class = VNFFGUpdateRow table_actions =", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create In Progress\")), (\"create_complete\",", "(\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt", "pgettext_lazy(\"current status of stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt", "u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status", "datum): return datum.status != 'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id)", "u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status", "= name self.description = description self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P = []", "pgettext_lazy(\"current status of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback In", "the # License for the specific language governing permissions and limitations # under", "(\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\",", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "not item: # TODO(NAME) - bail with error return None if not vnffg_instance", "name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self, datum): return datum.status", "class VNFFGManagerItem(object): def __init__(self, id, name, description, status): self.id = id self.name =", "description, status): self.id = id self.name = name self.description = description self.status =", "status=True, status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns = [\"status\",", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "\"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES =", "from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import", "def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name", "= _(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class", "(\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt", "required by applicable law or agreed to in writing, software # distributed under", "Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of", "vnffg_id) if not vnffg_instance and not item: # TODO(NAME) - bail with error", "applicable law or agreed to in writing, software # distributed under the License", "Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\",", "of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current", "return item except (Http404, NotFound): raise Http404 except Exception as e: messages.error(request, e)", "def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod def action_past(count):", "(\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\",", "stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status", "item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str item.status", "u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status", "in compliance with the License. You may obtain # a copy of the", "Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\",", "or agreed to in writing, software # distributed under the License is distributed", "u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of", "pgettext_lazy(\"current status of stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create", "tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def __init__(self, id, name,", "stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status", "status): self.id = id self.name = name self.description = description self.status = status", "VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\",", "status of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create In Progress\")),", "(\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\",", "import policy from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def", "NotFound): raise Http404 except Exception as e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction):", "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of", "Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\",", "Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\",", "django.http import Http404 from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _", "class Meta(object): name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns = [\"status\", ] row_class", "status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt In Progress\")),", "failure, just keep the current state return item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str", "for obj in cls.VNFFGLIST_P: if obj.id == vnffg_id: return obj @classmethod def add_item(cls,", "def can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try: item", "(\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete", "status of stack\", u\"Check Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description", "status of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete In Progress\")),", "class MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self, datum):", "= _(\"VNFFGManager\") status_columns = [\"status\", ] row_class = VNFFGUpdateRow table_actions = (DeployVNFFG, DeleteVNFFG,", "of stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init Complete\")), (\"init_failed\",", "__init__(self, id, name, description, status): self.id = id self.name = name self.description =", "u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status", "_(\"VNFFGManager\") status_columns = [\"status\", ] row_class = VNFFGUpdateRow table_actions = (DeployVNFFG, DeleteVNFFG, MyFilterAction,)", "current state return item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except KeyError:", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "(\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot", "(\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init In", "u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\",", "Http404 except Exception as e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def", "self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls, vnffg_id): for", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns = [\"status\", ] row_class = VNFFGUpdateRow", "return None if not vnffg_instance and item: # API failure, just keep the", "vnffg_desc_str = \"\" if not item: # Add an item entry item =", "VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE' def get_data(self,", "2.0 (the \"License\"); you may # not use this file except in compliance", "_(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable):", "import messages from horizon import tables from openstack_dashboard import policy from tacker_horizon.openstack_dashboard import", "action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod def action_past(count): return", "= vnffg['description'] except KeyError: vnffg_desc_str = \"\" if not item: # Add an", "u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check Failed\")), ) name = tables.Column(\"name\",", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of", "License, Version 2.0 (the \"License\"); you may # not use this file except", "horizon import tables from openstack_dashboard import policy from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions", "u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status", "cls.VNFFGLIST_P: if obj.id == vnffg_id: return obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod", "status of stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")),", "from django.utils.translation import ungettext_lazy from horizon import messages from horizon import tables from", "item.status = vnffg['status'] return item except (Http404, NotFound): raise Http404 except Exception as", "in cls.VNFFGLIST_P: if obj.id == vnffg_id: return obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item)", "request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\")", "(\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create", "pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")),", "stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status", "stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status", "VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate", "\"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon = \"plus\" url =", "Add an item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description =", "Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\",", "Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of", "from openstack_dashboard import policy from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import NotFound class", "agreed to in writing, software # distributed under the License is distributed on", "stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status", "of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current", "django.utils.translation import ungettext_lazy from horizon import messages from horizon import tables from openstack_dashboard", "(\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume", "stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current", "status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check In Progress\")),", "def get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P: if obj.id == vnffg_id: return obj", "@classmethod def get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P: if obj.id == vnffg_id: return", "# Unless required by applicable law or agreed to in writing, software #", "def __init__(self, id, name, description, status): self.id = id self.name = name self.description", "(\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback", "by applicable law or agreed to in writing, software # distributed under the", "License. from django.http import Http404 from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy", "vnffg_id): for obj in cls.VNFFGLIST_P: if obj.id == vnffg_id: return obj @classmethod def", "import api from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def __init__(self, id, name, description,", "item: # TODO(NAME) - bail with error return None if not vnffg_instance and", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\",", "policy from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import NotFound class VNFFGManagerItem(object): def __init__(self,", "e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u\"Terminate", "DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon =", "stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current", "raise Http404 except Exception as e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod", "u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of", "def get_data(self, request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if", "In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of", "ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id)", "pgettext_lazy from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon", "return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod def action_past(count): return ungettext_lazy(", "except in compliance with the License. You may obtain # a copy of", "of stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume Complete\")), (\"resume_failed\",", "@staticmethod def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod def", "STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current", "VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and not item: # TODO(NAME)", "Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\",", "to in writing, software # distributed under the License is distributed on an", "(\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update", "the License. from django.http import Http404 from django.utils.translation import pgettext_lazy from django.utils.translation import", "item.description = vnffg_desc_str item.status = vnffg['status'] return item except (Http404, NotFound): raise Http404", "class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = (", "of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current", "# Add an item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description", "import ungettext_lazy from horizon import messages from horizon import tables from openstack_dashboard import", "import tables from openstack_dashboard import policy from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import", "cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True", "status of stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update Complete\")),", "(Http404, NotFound): raise Http404 except Exception as e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin,", "item: # Add an item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else:", "stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current", "not vnffg_instance and not item: # TODO(NAME) - bail with error return None", "= ( (\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "except (Http404, NotFound): raise Http404 except Exception as e: messages.error(request, e) raise class", "pgettext_lazy(\"current status of stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init", "icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True),", "stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status", "of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\",", "(\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init", "# not use this file except in compliance with the License. You may", "pgettext_lazy(\"current status of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt In", "# License for the specific language governing permissions and limitations # under the", "of stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete Complete\")), (\"delete_failed\",", "except KeyError: vnffg_desc_str = \"\" if not item: # Add an item entry", "u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status", "u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status", "Http404 from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from django.utils.translation", "status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend In Progress\")),", "Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\",", "(\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "of stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")), (\"adopt_failed\",", "in writing, software # distributed under the License is distributed on an \"AS", "Version 2.0 (the \"License\"); you may # not use this file except in", "pgettext_lazy(\"current status of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init Failed\")),", "obj.id == vnffg_id: return obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls):", "status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume In Progress\")),", "In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of", "pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")),", "if not vnffg_instance and not item: # TODO(NAME) - bail with error return", "get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P: if obj.id == vnffg_id: return obj @classmethod", "vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and", "(\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot", "\"License\"); you may # not use this file except in compliance with the", "( (\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status of", "else: item.description = vnffg_desc_str item.status = vnffg['status'] return item except (Http404, NotFound): raise", "Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\",", "u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status", "import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "= VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str item.status = vnffg['status'] return", "(\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG", "permissions and limitations # under the License. from django.http import Http404 from django.utils.translation", "the current state return item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except", "def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) def action(self, request,", "not use this file except in compliance with the License. You may obtain", "stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current", "of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete In Progress\")), (\"delete_complete\",", "url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\", False), )", "pgettext_lazy(\"current status of stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback", "name, description, status): self.id = id self.name = name self.description = description self.status", "u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status", "of stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check Complete\")), (\"check_failed\",", "of stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update Complete\")), (\"update_failed\",", "item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name = \"myfilter\"", "License for the specific language governing permissions and limitations # under the License.", "In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of", "of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check Failed\")), ) name", "of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\",", "vnffg_instance and item: # API failure, just keep the current state return item", "hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns =", "VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P: if obj.id", "status of stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init Complete\")),", "= id self.name = name self.description = description self.status = status class VNFFGManagerItemList(object):", "bail with error return None if not vnffg_instance and item: # API failure,", "def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name =", "stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current", "self.name = name self.description = description self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P =", "ungettext_lazy from horizon import messages from horizon import tables from openstack_dashboard import policy", "pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume In", "import ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import messages from", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check In", "status of stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete Complete\")),", "(\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check", "Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status", "Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\",", "Meta(object): name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns = [\"status\", ] row_class =", "vnffg_id: return obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P =", "\"\" if not item: # Add an item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str,", "OF ANY KIND, either express or implied. See the # License for the", "[] class MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self,", "try: vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str = \"\" if not item: #", "status of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init Failed\")), (\"create_in_progress\",", "pgettext_lazy(\"current status of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\", u\"Resume Failed\")),", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "[] @classmethod def get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P: if obj.id == vnffg_id:", "of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback In Progress\")), (\"rollback_complete\",", "self.description = description self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def", "True def can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try:", "(\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\",", "Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\",", "(the \"License\"); you may # not use this file except in compliance with", "add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name =", "of stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt In Progress\")), (\"adopt_complete\",", "MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self, datum): return", "keep the current state return item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description']", "tables from openstack_dashboard import policy from tacker_horizon.openstack_dashboard import api from tackerclient.common.exceptions import NotFound", "status of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback In Progress\")),", "api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and not item: # TODO(NAME) - bail with", "# # Unless required by applicable law or agreed to in writing, software", "id self.name = name self.description = description self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P", "stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current", "item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and not item:", "# under the License. from django.http import Http404 from django.utils.translation import pgettext_lazy from", "u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class", "request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance", "= ( (\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status", "= api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and not item: # TODO(NAME) - bail", "(\"ajax-modal\",) icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\",", "as _ from django.utils.translation import ungettext_lazy from horizon import messages from horizon import", "In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of", "status class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls, vnffg_id): for obj in", "= vnffg['status'] return item except (Http404, NotFound): raise Http404 except Exception as e:", "stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current", "and item: # API failure, just keep the current state return item vnffg", "tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod", "License. You may obtain # a copy of the License at # #", "status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\" verbose_name =", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status", "status of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\",", "u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of", "ANY KIND, either express or implied. See the # License for the specific", "False), ) STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init In Progress\")),", "action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name = _(\"Deploy", "status of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")),", "u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status", "= description self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls,", "u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status", "return datum.status != 'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance", "an item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str", "from horizon import messages from horizon import tables from openstack_dashboard import policy from", "verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\" verbose_name", "u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of", "VNFFGs\", count ) @staticmethod def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count", "u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of", "count ) def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\"", "description = tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name", "messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\",", "of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current", "can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try: item =", "(\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback", "item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str = \"\"", "and not item: # TODO(NAME) - bail with error return None if not", ") name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status =", "return item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str =", "u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status", "u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of", "(\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update Complete\")), (\"update_failed\", pgettext_lazy(\"current status of stack\", u\"Update", "(\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend", "vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and not item: # TODO(NAME) -", "except Exception as e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count):", "vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str = \"\" if not item:", "just keep the current state return item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str =", "datum.status != 'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance =", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", "stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current", "obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P = [] class", "u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status", "= \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\",", "In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of", "stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check In Progress\")), (\"check_complete\", pgettext_lazy(\"current", "import Http404 from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as _ from", "of stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")), (\"suspend_failed\",", "= [] class MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True def", "pgettext_lazy(\"current status of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete Failed\")),", "stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current", "# API failure, just keep the current state return item vnffg = vnffg_instance['vnffg']", "Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\",", "stack\", u\"Check Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\",", "tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\"", "tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\") status_columns", "ajax = True def can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE' def get_data(self, request,", "See the # License for the specific language governing permissions and limitations #", "e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate", "governing permissions and limitations # under the License. from django.http import Http404 from", "Exception as e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return", "vnffg['status'] return item except (Http404, NotFound): raise Http404 except Exception as e: messages.error(request,", "class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count", "law or agreed to in writing, software # distributed under the License is", "(\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\",", "= [] @classmethod def get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P: if obj.id ==", "class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon", "In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of", "express or implied. See the # License for the specific language governing permissions", "u\"Terminate VNFFGs\", count ) @staticmethod def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\",", "= True def can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE' def get_data(self, request, vnffg_id):", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "(\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\",", "STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current", "description self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls, vnffg_id):", "status of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update In Progress\")),", "vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str = \"\" if", "u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status", "Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object):", "In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of", "status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot In Progress\")),", "status of stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create Complete\")),", "pgettext_lazy(\"current status of stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend", "vnffg['id']) else: item.description = vnffg_desc_str item.status = vnffg['status'] return item except (Http404, NotFound):", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "pgettext_lazy(\"current status of stack\", u\"Create Complete\")), (\"create_failed\", pgettext_lazy(\"current status of stack\", u\"Create Failed\")),", "NotFound class VNFFGManagerItem(object): def __init__(self, id, name, description, status): self.id = id self.name", "class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P:", "of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check In Progress\")), (\"check_complete\",", "Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of stack\", u\"Resume Complete\")), (\"resume_failed\", pgettext_lazy(\"current status of stack\",", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "return obj @classmethod def add_item(cls, item): cls.VNFFGLIST_P.append(item) @classmethod def clear_list(cls): cls.VNFFGLIST_P = []", "Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status of", "= vnffg_desc_str item.status = vnffg['status'] return item except (Http404, NotFound): raise Http404 except", "compliance with the License. You may obtain # a copy of the License", "stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current", "(\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\",", "(\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume", "raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\",", "if not item: # Add an item entry item = VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'],", "VNFFGManagerItem(object): def __init__(self, id, name, description, status): self.id = id self.name = name", "class VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE' def", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "specific language governing permissions and limitations # under the License. from django.http import", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "= status class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls, vnffg_id): for obj", "obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes", "name = \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon = \"plus\"", "stack\", u\"Resume Failed\")), (\"adopt_in_progress\", pgettext_lazy(\"current status of stack\", u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current", "(\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status of stack\", u\"Check", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\",", "ugettext_lazy as _ from django.utils.translation import ungettext_lazy from horizon import messages from horizon", "In Progress\")), (\"check_complete\", pgettext_lazy(\"current status of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of", "@classmethod def clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row):", "u\"Check Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\"))", "stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")), (\"check_in_progress\", pgettext_lazy(\"current status", "error return None if not vnffg_instance and item: # API failure, just keep", ") STACK_STATUS_DISPLAY_CHOICES = ( (\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init In Progress\")), (\"init_complete\",", "item: # API failure, just keep the current state return item vnffg =", "vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str item.status = vnffg['status'] return item except", "pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend In", "Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\",", "u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status", "may # not use this file except in compliance with the License. You", "with error return None if not vnffg_instance and item: # API failure, just", "verbose_name = _(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon = \"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\"", "either express or implied. See the # License for the specific language governing", "VNFFGManagerItem(vnffg['name'], vnffg_desc_str, vnffg['status'], vnffg['id']) else: item.description = vnffg_desc_str item.status = vnffg['status'] return item", "DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count )", "stack\", u\"Update Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current", "TODO(NAME) - bail with error return None if not vnffg_instance and item: #", "this file except in compliance with the License. You may obtain # a", "stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check Failed\")), ) name =", "u\"Adopt In Progress\")), (\"adopt_complete\", pgettext_lazy(\"current status of stack\", u\"Adopt Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status", "or implied. See the # License for the specific language governing permissions and", "pgettext_lazy(\"current status of stack\", u\"Check Failed\")), ) name = tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\"))", "the specific language governing permissions and limitations # under the License. from django.http", "of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current", "= tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name = \"vnffgmanager\" verbose_name = _(\"VNFFGManager\")", "not vnffg_instance and item: # API failure, just keep the current state return", "VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod def get_obj_given_id(cls, vnffg_id): for obj in cls.VNFFGLIST_P: if", "= \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\", False), ) STACK_STATUS_DISPLAY_CHOICES", "id, name, description, status): self.id = id self.name = name self.description = description", "u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume In Progress\")), (\"resume_complete\", pgettext_lazy(\"current status", "as e: messages.error(request, e) raise class DeleteVNFFG(policy.PolicyTargetMixin, tables.DeleteAction): @staticmethod def action_present(count): return ungettext_lazy(", "VNFFG\", u\"Terminate VNFFGs\", count ) def action(self, request, obj_id): api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction):", "count ) @staticmethod def action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count )", "Complete\")), (\"adopt_failed\", pgettext_lazy(\"current status of stack\", u\"Adopt Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\",", "for the specific language governing permissions and limitations # under the License. from", "!= 'DELETE_COMPLETE' def get_data(self, request, vnffg_id): try: item = VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request,", "stack\", u\"Rollback Failed\")), (\"suspend_in_progress\", pgettext_lazy(\"current status of stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current", "Complete\")), (\"init_failed\", pgettext_lazy(\"current status of stack\", u\"Init Failed\")), (\"create_in_progress\", pgettext_lazy(\"current status of stack\",", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) @staticmethod def action_past(count): return ungettext_lazy( u\"Terminate", "name self.description = description self.status = status class VNFFGManagerItemList(object): VNFFGLIST_P = [] @classmethod", "link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES)", "= \"\" if not item: # Add an item entry item = VNFFGManagerItem(vnffg['name'],", "vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str = \"\" if not item: # Add", "pgettext_lazy(\"current status of stack\", u\"Update In Progress\")), (\"update_complete\", pgettext_lazy(\"current status of stack\", u\"Update", "Failed\")), (\"snapshot_in_progress\", pgettext_lazy(\"current status of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of", "= tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class Meta(object): name =", "horizon import messages from horizon import tables from openstack_dashboard import policy from tacker_horizon.openstack_dashboard", "= \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self, datum): return datum.status !=", "\"myfilter\" class VNFFGUpdateRow(tables.Row): ajax = True def can_be_selected(self, datum): return datum.status != 'DELETE_COMPLETE'", "- bail with error return None if not vnffg_instance and item: # API", "of stack\", u\"Suspend Failed\")), (\"resume_in_progress\", pgettext_lazy(\"current status of stack\", u\"Resume In Progress\")), (\"resume_complete\",", "(\"create_in_progress\", pgettext_lazy(\"current status of stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\",", "under the License. from django.http import Http404 from django.utils.translation import pgettext_lazy from django.utils.translation", "self.id = id self.name = name self.description = description self.status = status class", "pgettext_lazy(\"current status of stack\", u\"Snapshot Complete\")), (\"snapshot_failed\", pgettext_lazy(\"current status of stack\", u\"Snapshot Failed\")),", "verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False, status=True, status_choices=STATUS_CHOICES) class", "if not vnffg_instance and item: # API failure, just keep the current state", "action_past(count): return ungettext_lazy( u\"Terminate VNFFG\", u\"Terminate VNFFGs\", count ) def action(self, request, obj_id):", "pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")), (\"rollback_failed\", pgettext_lazy(\"current status of stack\", u\"Rollback Failed\")),", "of stack\", u\"Delete Complete\")), (\"delete_failed\", pgettext_lazy(\"current status of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current", "Failed\")), (\"rollback_in_progress\", pgettext_lazy(\"current status of stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of", "of stack\", u\"Rollback In Progress\")), (\"rollback_complete\", pgettext_lazy(\"current status of stack\", u\"Rollback Complete\")), (\"rollback_failed\",", "status of stack\", u\"Suspend Complete\")), (\"suspend_failed\", pgettext_lazy(\"current status of stack\", u\"Suspend Failed\")), (\"resume_in_progress\",", "= VNFFGManagerItemList.get_obj_given_id(vnffg_id) vnffg_instance = api.tacker.get_vnffg(request, vnffg_id) if not vnffg_instance and not item: #", "= tables.Column(\"name\", link=\"horizon:nfv:vnffgmanager:detail\", verbose_name=_(\"VNFFG Name\")) description = tables.Column(\"description\", verbose_name=_(\"Description\")) status = tables.Column(\"status\", hidden=False,", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "def clear_list(cls): cls.VNFFGLIST_P = [] class MyFilterAction(tables.FilterAction): name = \"myfilter\" class VNFFGUpdateRow(tables.Row): ajax", "# TODO(NAME) - bail with error return None if not vnffg_instance and item:", "state return item vnffg = vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str", "( (\"init_in_progress\", pgettext_lazy(\"current status of stack\", u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of", "of stack\", u\"Create In Progress\")), (\"create_complete\", pgettext_lazy(\"current status of stack\", u\"Create Complete\")), (\"create_failed\",", "pgettext_lazy(\"current status of stack\", u\"Create Failed\")), (\"delete_in_progress\", pgettext_lazy(\"current status of stack\", u\"Delete In", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "pgettext_lazy(\"current status of stack\", u\"Delete In Progress\")), (\"delete_complete\", pgettext_lazy(\"current status of stack\", u\"Delete", "pgettext_lazy(\"current status of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check Failed\")),", "KeyError: vnffg_desc_str = \"\" if not item: # Add an item entry item", "\"plus\" url = \"horizon:nfv:vnffgmanager:deployvnffg\" class VNFFGManagerTable(tables.DataTable): STATUS_CHOICES = ( (\"ACTIVE\", True), (\"ERROR\", False),", "= vnffg_instance['vnffg'] try: vnffg_desc_str = vnffg['description'] except KeyError: vnffg_desc_str = \"\" if not", "status of stack\", u\"Check Complete\")), (\"check_failed\", pgettext_lazy(\"current status of stack\", u\"Check Failed\")), )", "status of stack\", u\"Suspend In Progress\")), (\"suspend_complete\", pgettext_lazy(\"current status of stack\", u\"Suspend Complete\")),", "u\"Init In Progress\")), (\"init_complete\", pgettext_lazy(\"current status of stack\", u\"Init Complete\")), (\"init_failed\", pgettext_lazy(\"current status", "= \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes = (\"ajax-modal\",) icon = \"plus\" url", "pgettext_lazy(\"current status of stack\", u\"Delete Failed\")), (\"update_in_progress\", pgettext_lazy(\"current status of stack\", u\"Update In", "from django.http import Http404 from django.utils.translation import pgettext_lazy from django.utils.translation import ugettext_lazy as", "pgettext_lazy(\"current status of stack\", u\"Snapshot In Progress\")), (\"snapshot_complete\", pgettext_lazy(\"current status of stack\", u\"Snapshot", "api.tacker.delete_vnffg(request, obj_id) class DeployVNFFG(tables.LinkAction): name = \"deployvnffg\" verbose_name = _(\"Deploy VNFFG\") classes =" ]
[ "import admin from django.urls import path, re_path, include from django.conf import settings from", "django.urls import path, re_path, include from django.conf import settings from django.conf.urls.static import static", "admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}), ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)", "settings from django.conf.urls.static import static from django.views.static import serve urlpatterns = [ path('admin/',", "#encoding=utf-8 from django.contrib import admin from django.urls import path, re_path, include from django.conf", "django.conf.urls.static import static from django.views.static import serve urlpatterns = [ path('admin/', admin.site.urls), path('',", "import settings from django.conf.urls.static import static from django.views.static import serve urlpatterns = [", "<reponame>guoshijiang/we_guitar #encoding=utf-8 from django.contrib import admin from django.urls import path, re_path, include from", "from django.urls import path, re_path, include from django.conf import settings from django.conf.urls.static import", "from django.conf import settings from django.conf.urls.static import static from django.views.static import serve urlpatterns", "path, re_path, include from django.conf import settings from django.conf.urls.static import static from django.views.static", "serve urlpatterns = [ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$', serve, {'document_root':", "= [ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}), ]", "include from django.conf import settings from django.conf.urls.static import static from django.views.static import serve", "import path, re_path, include from django.conf import settings from django.conf.urls.static import static from", "path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}), ] + static(settings.STATIC_URL,", "from django.views.static import serve urlpatterns = [ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')),", "static from django.views.static import serve urlpatterns = [ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/',", "import serve urlpatterns = [ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$', serve,", "from django.contrib import admin from django.urls import path, re_path, include from django.conf import", "django.views.static import serve urlpatterns = [ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$',", "re_path, include from django.conf import settings from django.conf.urls.static import static from django.views.static import", "import static from django.views.static import serve urlpatterns = [ path('admin/', admin.site.urls), path('', include('blog.urls')),", "admin from django.urls import path, re_path, include from django.conf import settings from django.conf.urls.static", "from django.conf.urls.static import static from django.views.static import serve urlpatterns = [ path('admin/', admin.site.urls),", "[ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}), ] +", "django.conf import settings from django.conf.urls.static import static from django.views.static import serve urlpatterns =", "urlpatterns = [ path('admin/', admin.site.urls), path('', include('blog.urls')), path('ueditor/', include('DjangoUeditor.urls')), re_path('^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),", "django.contrib import admin from django.urls import path, re_path, include from django.conf import settings" ]
[ "') print(f'\\nO menor número é {min(n)} e o maior é {max(n)}.') '''from random", "é {min(n)} e o maior é {max(n)}.') '''from random import randint a =", "10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10) print(f'Os números sorteados foram:", "menor número é {min(n)} e o maior é {max(n)}.') '''from random import randint", "'''from random import randint a = tuple(randint(1, 5) for i in range(5)) print(f'Os", "for i in range(5)) print(f'Os números sorteados foram {a}, o maior é {max(a)}", "n: print(num, end=' ') print(f'\\nO menor número é {min(n)} e o maior é", "10), randint(1, 10) print(f'Os números sorteados foram: ', end='') for num in n:", "print(num, end=' ') print(f'\\nO menor número é {min(n)} e o maior é {max(n)}.')", "from random import randint n = randint(1, 10), randint(1, 10), randint(1, 10), randint(1,", "randint(1, 10) print(f'Os números sorteados foram: ', end='') for num in n: print(num,", "end=' ') print(f'\\nO menor número é {min(n)} e o maior é {max(n)}.') '''from", "10) print(f'Os números sorteados foram: ', end='') for num in n: print(num, end='", "e o maior é {max(n)}.') '''from random import randint a = tuple(randint(1, 5)", "tuple(randint(1, 5) for i in range(5)) print(f'Os números sorteados foram {a}, o maior", "random import randint n = randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10),", "randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10) print(f'Os números sorteados foram: ',", "randint(1, 10), randint(1, 10) print(f'Os números sorteados foram: ', end='') for num in", "in range(5)) print(f'Os números sorteados foram {a}, o maior é {max(a)} e o", "{min(n)} e o maior é {max(n)}.') '''from random import randint a = tuple(randint(1,", "números sorteados foram {a}, o maior é {max(a)} e o menor é {min(a)}.')'''", "end='') for num in n: print(num, end=' ') print(f'\\nO menor número é {min(n)}", "5) for i in range(5)) print(f'Os números sorteados foram {a}, o maior é", "i in range(5)) print(f'Os números sorteados foram {a}, o maior é {max(a)} e", "{max(n)}.') '''from random import randint a = tuple(randint(1, 5) for i in range(5))", "é {max(n)}.') '''from random import randint a = tuple(randint(1, 5) for i in", "random import randint a = tuple(randint(1, 5) for i in range(5)) print(f'Os números", "randint(1, 10), randint(1, 10), randint(1, 10) print(f'Os números sorteados foram: ', end='') for", "for num in n: print(num, end=' ') print(f'\\nO menor número é {min(n)} e", "print(f'\\nO menor número é {min(n)} e o maior é {max(n)}.') '''from random import", "<filename>Desafios/desafio074.py<gh_stars>0 from random import randint n = randint(1, 10), randint(1, 10), randint(1, 10),", "randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10) print(f'Os números sorteados", "maior é {max(n)}.') '''from random import randint a = tuple(randint(1, 5) for i", "', end='') for num in n: print(num, end=' ') print(f'\\nO menor número é", "in n: print(num, end=' ') print(f'\\nO menor número é {min(n)} e o maior", "randint n = randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10)", "import randint a = tuple(randint(1, 5) for i in range(5)) print(f'Os números sorteados", "números sorteados foram: ', end='') for num in n: print(num, end=' ') print(f'\\nO", "a = tuple(randint(1, 5) for i in range(5)) print(f'Os números sorteados foram {a},", "10), randint(1, 10), randint(1, 10) print(f'Os números sorteados foram: ', end='') for num", "n = randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10) print(f'Os", "o maior é {max(n)}.') '''from random import randint a = tuple(randint(1, 5) for", "10), randint(1, 10), randint(1, 10), randint(1, 10) print(f'Os números sorteados foram: ', end='')", "= randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10) print(f'Os números", "range(5)) print(f'Os números sorteados foram {a}, o maior é {max(a)} e o menor", "print(f'Os números sorteados foram {a}, o maior é {max(a)} e o menor é", "import randint n = randint(1, 10), randint(1, 10), randint(1, 10), randint(1, 10), randint(1,", "número é {min(n)} e o maior é {max(n)}.') '''from random import randint a", "num in n: print(num, end=' ') print(f'\\nO menor número é {min(n)} e o", "= tuple(randint(1, 5) for i in range(5)) print(f'Os números sorteados foram {a}, o", "foram: ', end='') for num in n: print(num, end=' ') print(f'\\nO menor número", "sorteados foram: ', end='') for num in n: print(num, end=' ') print(f'\\nO menor", "randint a = tuple(randint(1, 5) for i in range(5)) print(f'Os números sorteados foram", "print(f'Os números sorteados foram: ', end='') for num in n: print(num, end=' ')" ]
[ "return True if isinstance(node, ast.Name): if node.id in ('True', 'False', 'None'): return True", "ast.Lambda): return node_to_code(node) return '' def _process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node) return", "the selector expression that make up the call. # We just have to", "scope bound to the provided LHS variable - Otherwise, if the LHS is", "class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {} self.in_func = True def", "a function. We resolved the LHS and RHS to their full names. Then:", "\"\"\" List of types we support for the RHS in assignment expressions. \"\"\"", "if not name: return None, None scopes = [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports',", "def end_ClassDef(self): self.class_scope = {} self.in_class = False def visit_Import(self, node): \"\"\" Handle", "if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check builtins elif self._check_in(rhs, self.builtin_scope):", "else: self.imports[im.asname] = full_import def visit_Assign(self, node): \"\"\" On an assignment expression, we", "def start_FunctionDef(self): self.func_scope = {} self.in_func = True def end_FunctionDef(self): self.func_scope = {}", "return LITERAL_NODES[type(node)] if type(node) == ast.Name: if (node.id == 'True' or node.id ==", "end_FunctionDef(self): self.func_scope = {} self.in_func = False def start_ClassDef(self): self.class_scope = {} self.in_class", "If we actually ended up at an ast.Name node, we have a #", "self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {} self.in_func", "return None, None def in_class_scope(self, node): name = self._get_name(node) if not name: return", "the provided LHS variable - Otherwise, if the LHS is already in scope,", "self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope))", "up the call. # We just have to reverse the parts we added", "self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope", "above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n)", "else: return LITERAL_NODES[type(node)] if type(node) == ast.Name: if (node.id == 'True' or node.id", "key in self: return \"__builtin__.%s\" % key raise KeyError(\"%s not a builtin\" %", "ast.Name: if (node.id == 'True' or node.id == 'False'): return \"__builtin__.bool\" elif node.id", "if not name: return False return self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield", "parts we added above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n,", "a type\" % key) class BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__, name) def", "for im in node.names: full_import = node.module + '.' + im.name if im.asname", "imports if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) # Check current scope (global,", "parts = name.split(\".\") for i in range(1, len(parts)): im = '.'.join(parts[:-i]) if im", "__contains__(self, name): return hasattr(types, name) def __getitem__(self, key): if key in self: return", "= TypeScope() self.builtin_scope = BuiltinScope() self.in_func = False self.in_class = False def resolve_node(self,", "bound to the provided LHS variable - If the RHS is a literal,", "im in scope: return scope[im] + name[len(im):] return None def _known_assignment_type(self, target): \"\"\"", "None or rhs is None: return # Use class scope if we are", "= self._get_name(node) if not name: return False return self._check_in(name, self.class_scope) @contextmanager def function_context(self):", "we remove it. \"\"\" # Check to see if we are in a", "return hasattr(__builtin__, name) def __getitem__(self, key): if key in self: return \"__builtin__.%s\" %", "or class) elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope) # Check rhs in", "Context(object): def __init__(self): self.imports = {} self.func_scope = {} self.class_scope = {} self.global_scope", "end_ClassDef(self): self.class_scope = {} self.in_class = False def visit_Import(self, node): \"\"\" Handle imports.", "= self._get_name(node.value) if lhs is None or rhs is None: return # Use", "im.asname is None: self.imports[im.name] = im.name else: self.imports[im.asname] = im.name def visit_ImportFrom(self, node):", "return self._get_name(n) if is_literal(n): nodeType = node_to_type(n) parts.append(nodeType) parts.reverse() return '.'.join(parts) return None", "we add it into scope bound to the provided LHS variable - If", "are nested in selector expressions, # e.g os.path.join, they are chained together as", "self.imports[im.asname] = im.name def visit_ImportFrom(self, node): \"\"\" Handle from imports. \"\"\" if node.module", "_resolve_in(self, name, scope): if name in scope: return scope[name] parts = name.split(\".\") for", "all the components of the selector expression that make up the call. #", "to reverse the parts we added above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return", "add it into scope bound to the provided LHS variable - If the", "its full name. \"\"\" if is_literal(node): return node_to_type(node) n = node if isinstance(node,", "in_class_scope(self, node): name = self._get_name(node) if not name: return False return self._check_in(name, self.class_scope)", "from imports. \"\"\" if node.module is None: return for im in node.names: full_import", "in scope, we remove it. \"\"\" # Check to see if we are", "a node to its full name. \"\"\" if is_literal(node): return node_to_type(node) n =", "function. We resolved the LHS and RHS to their full names. Then: -", "('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName,", "RHS is a literal, we add it into scope bound to the provided", "scope[lhs] = self._resolve_in(rhs, scope) # Check rhs in class scope elif self.in_class and", "n.value # If we actually ended up at an ast.Name node, we have", "in node.names: full_import = node.module + '.' + im.name if im.asname is None:", "they are chained together as a series of # ast.Attribute nodes. Extract them", "'' def _process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node) return gen.line except Exception as", "return \"__builtin__.None\" if type(node) == ast.Lambda: return \"LambdaType\" return \"unknown\" class TypeScope(object): def", "len(parts)): im = '.'.join(parts[:-i]) if im in scope: return scope[im] + name[len(im):] return", "for im in node.names: if im.asname is None: self.imports[im.name] = im.name else: self.imports[im.asname]", "int: \"int\", float: \"float\", long: \"long\", complex: \"complex\", } def is_literal(node): if type(node)", "return node_to_type(node) n = node if isinstance(node, ast.Call): n = node.func parts =", "We just have to reverse the parts we added above. if isinstance(n, ast.Name):", "if the RHS is a known assignment type if not self._known_assignment_type(node.value): return #", "a # all the components of the selector expression that make up the", "== 'None': return \"__builtin__.None\" if type(node) == ast.Lambda: return \"LambdaType\" return \"unknown\" class", "[('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if", "return '' if isinstance(node, ast.Num): return str(node.n) if isinstance(node, ast.Str): return node.s if", "elif rhs in LITERAL_NODES.values(): scope[lhs] = rhs # Remove re-assignments that were unrecognized", "ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", }", "are in a function. scope = self.global_scope if self.in_func: scope = self.func_scope #", "ASTCodeGenerator() gen.visit(node) return gen.line except Exception as e: return \"\" def node_to_type(node): if", "right-hand side of assignment expression lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs", "scope if we are in a class lhs starts with \"self.\" if self.in_class", "if isinstance(node, ast.Num): return str(node.n) if isinstance(node, ast.Str): return node.s if (isinstance(node, ast.Name)", "ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES = { int: \"int\", float: \"float\", long:", "into scope bound to the provided LHS variable - Otherwise, if the LHS", "name[len(im):] return None def _known_assignment_type(self, target): \"\"\" List of types we support for", "import node_to_code LITERAL_NODES = { ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\",", "isinstance(node.n, t): return name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node) == ast.Name:", "% key raise KeyError(\"%s not a builtin\" % key) class Context(object): def __init__(self):", "self.in_func = False def start_ClassDef(self): self.class_scope = {} self.in_class = True def end_ClassDef(self):", "with \"self.\" if self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope # Check imports if", "ast.Call): return self._get_name(n) if is_literal(n): nodeType = node_to_type(n) parts.append(nodeType) parts.reverse() return '.'.join(parts) return", "for now if len(node.targets) != 1: return # Resolve left-hand and right-hand side", "im = '.'.join(parts[:-i]) if im in scope: return True return False def _resolve_in(self,", "def __contains__(self, name): return hasattr(types, name) def __getitem__(self, key): if key in self:", "self._resolve_in(rhs, self.imports) # Check current scope (global, func or class) elif self._check_in(rhs, scope):", "ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES = { int:", "node_to_type(node): if type(node) in LITERAL_NODES: if type(node) == ast.Num: for t, name in", "self.imports[im.name] = im.name else: self.imports[im.asname] = im.name def visit_ImportFrom(self, node): \"\"\" Handle from", "len(parts)): im = '.'.join(parts[:-i]) if im in scope: return True return False def", "imports. \"\"\" for im in node.names: if im.asname is None: self.imports[im.name] = im.name", "self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] =", "from pprint import pprint from utils import node_to_code LITERAL_NODES = { ast.Num: \"number\",", "On an assignment expression, we add variables to local or global scope based", "already in scope, we remove it. \"\"\" # Check to see if we", "rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check builtins elif self._check_in(rhs,", "on whether we are in a function. We resolved the LHS and RHS", "self.imports = {} self.func_scope = {} self.class_scope = {} self.global_scope = {} self.type_scope", "side of assignment expression lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs is", "consider single-assignments for now if len(node.targets) != 1: return # Resolve left-hand and", "# Only consider single-assignments for now if len(node.targets) != 1: return # Resolve", "if isinstance(node, ast.Call): n = node.func parts = [] while isinstance(n, ast.Attribute): #", "to the provided LHS variable - If the RHS is a literal, we", "False def start_ClassDef(self): self.class_scope = {} self.in_class = True def end_ClassDef(self): self.class_scope =", "for scopeName, scope in scopes: if self._check_in(name, scope): return scopeName, self._resolve_in(name, scope) return", "self.in_class = True def end_ClassDef(self): self.class_scope = {} self.in_class = False def visit_Import(self,", "if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n) if", "ast from contextlib import contextmanager from pprint import pprint from utils import node_to_code", "= False self.in_class = False def resolve_node(self, node): name = self._get_name(node) if not", "elif node.id == 'None': return \"__builtin__.None\" if type(node) == ast.Lambda: return \"LambdaType\" return", "in scope: del scope[lhs] def _check_in(self, name, scope): if name in scope: return", "scope: return True parts = name.split(\".\") for i in range(1, len(parts)): im =", "+ name[len(im):] return None def _known_assignment_type(self, target): \"\"\" List of types we support", "not a builtin\" % key) class Context(object): def __init__(self): self.imports = {} self.func_scope", "= {} self.class_scope = {} self.global_scope = {} self.type_scope = TypeScope() self.builtin_scope =", "NUMBER_TYPES = { int: \"int\", float: \"float\", long: \"long\", complex: \"complex\", } def", "node_to_type(node) n = node if isinstance(node, ast.Call): n = node.func parts = []", "or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\" Resolve a node to its", "parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n) if is_literal(n): nodeType =", "pprint import pprint from utils import node_to_code LITERAL_NODES = { ast.Num: \"number\", ast.Str:", "'None': return \"__builtin__.None\" if type(node) == ast.Lambda: return \"LambdaType\" return \"unknown\" class TypeScope(object):", "raise KeyError(\"%s not a builtin\" % key) class Context(object): def __init__(self): self.imports =", "scope: return scope[name] parts = name.split(\".\") for i in range(1, len(parts)): im =", "as e: return \"\" def node_to_type(node): if type(node) in LITERAL_NODES: if type(node) ==", "is ast.Lambda: return True return False def literal_value(node): if not is_literal(node): return ''", "(is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\" Resolve a node to", "= False def visit_Import(self, node): \"\"\" Handle imports. \"\"\" for im in node.names:", "in self: return \"__builtin__.%s\" % key raise KeyError(\"%s not a builtin\" % key)", "node.names: full_import = node.module + '.' + im.name if im.asname is None: self.imports[im.name]", "scope: del scope[lhs] def _check_in(self, name, scope): if name in scope: return True", "in range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope: return scope[im] +", "try: gen = ASTCodeGenerator() gen.visit(node) return gen.line except Exception as e: return \"\"", "'None'): return True if type(node) is ast.Lambda: return True return False def literal_value(node):", "as a series of # ast.Attribute nodes. Extract them one by one. parts.append(n.attr)", "True return False def literal_value(node): if not is_literal(node): return '' if isinstance(node, ast.Num):", "self.imports) # Check current scope (global, func or class) elif self._check_in(rhs, scope): scope[lhs]", "= {} self.in_func = True def end_FunctionDef(self): self.func_scope = {} self.in_func = False", "node_to_code(node) return '' def _process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node) return gen.line except", "if isinstance(node, ast.Lambda): return node_to_code(node) return '' def _process_lambda(node): try: gen = ASTCodeGenerator()", "is_literal(node): return '' if isinstance(node, ast.Num): return str(node.n) if isinstance(node, ast.Str): return node.s", "float: \"float\", long: \"long\", complex: \"complex\", } def is_literal(node): if type(node) in LITERAL_NODES:", "i in range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope: return scope[im]", "{} self.class_scope = {} self.global_scope = {} self.type_scope = TypeScope() self.builtin_scope = BuiltinScope()", "scope = self.func_scope # Check to see if the RHS is a known", "def __getitem__(self, key): if key in self: return \"types.%s\" % key raise KeyError(\"%s", "None def in_class_scope(self, node): name = self._get_name(node) if not name: return False return", "scopeName, self._resolve_in(name, scope) return None, None def in_class_scope(self, node): name = self._get_name(node) if", "if is_literal(node): return node_to_type(node) n = node if isinstance(node, ast.Call): n = node.func", "if lhs is None or rhs is None: return # Use class scope", "\"self.\" if self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope # Check imports if self._check_in(rhs,", "it into scope bound to the provided LHS variable - If the RHS", "self.func_scope = {} self.in_func = True def end_FunctionDef(self): self.func_scope = {} self.in_func =", "im in node.names: if im.asname is None: self.imports[im.name] = im.name else: self.imports[im.asname] =", "node): \"\"\" On an assignment expression, we add variables to local or global", "= node if isinstance(node, ast.Call): n = node.func parts = [] while isinstance(n,", "name): return hasattr(__builtin__, name) def __getitem__(self, key): if key in self: return \"__builtin__.%s\"", "that are nested in selector expressions, # e.g os.path.join, they are chained together", "Check imports if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) # Check current scope", "ast.Num: for t, name in NUMBER_TYPES.items(): if isinstance(node.n, t): return name return LITERAL_NODES[type(node)]", "hasattr(types, name) def __getitem__(self, key): if key in self: return \"types.%s\" % key", "self: return \"types.%s\" % key raise KeyError(\"%s is not a type\" % key)", "rhs is None: return # Use class scope if we are in a", "= self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs is None or rhs is None:", "\"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES = { int: \"int\", float: \"float\",", "in LITERAL_NODES: if type(node) == ast.Num: for t, name in NUMBER_TYPES.items(): if isinstance(node.n,", "ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES =", "elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope) # Check rhs in class scope", "return node.id if isinstance(node, ast.Lambda): return node_to_code(node) return '' def _process_lambda(node): try: gen", "self._get_name(node) if not name: return False return self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef()", "expressions, # e.g os.path.join, they are chained together as a series of #", "{} self.type_scope = TypeScope() self.builtin_scope = BuiltinScope() self.in_func = False self.in_class = False", "node): name = self._get_name(node) if not name: return None, None scopes = [('types',", "key raise KeyError(\"%s not a builtin\" % key) class Context(object): def __init__(self): self.imports", "class Context(object): def __init__(self): self.imports = {} self.func_scope = {} self.class_scope = {}", "scope[lhs] = self._resolve_in(rhs, self.imports) # Check current scope (global, func or class) elif", "False def _resolve_in(self, name, scope): if name in scope: return scope[name] parts =", "We resolved the LHS and RHS to their full names. Then: - If", "= self._resolve_in(rhs, self.imports) # Check current scope (global, func or class) elif self._check_in(rhs,", "class scope if we are in a class lhs starts with \"self.\" if", "ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES = { int: \"int\", float:", "def visit_ImportFrom(self, node): \"\"\" Handle from imports. \"\"\" if node.module is None: return", "self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) # Check current scope (global, func or", "= self._resolve_in(rhs, scope) # Check rhs in class scope elif self.in_class and rhs.startswith(\"self.\"):", "scope): if name in scope: return True parts = name.split(\".\") for i in", "if self.in_func: scope = self.func_scope # Check to see if the RHS is", "scope): if name in scope: return scope[name] parts = name.split(\".\") for i in", "name, scope): if name in scope: return scope[name] parts = name.split(\".\") for i", "isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\" Resolve a node to its full", "ast.Lambda: return True return False def literal_value(node): if not is_literal(node): return '' if", "= { int: \"int\", float: \"float\", long: \"long\", complex: \"complex\", } def is_literal(node):", "if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) # Check current scope (global, func", "str(node.n) if isinstance(node, ast.Str): return node.s if (isinstance(node, ast.Name) and node.id in ('True',", "the RHS in assignment expressions. \"\"\" return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def", "= full_import def visit_Assign(self, node): \"\"\" On an assignment expression, we add variables", "\"int\", float: \"float\", long: \"long\", complex: \"complex\", } def is_literal(node): if type(node) in", "in class scope elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs,", "and lhs.startswith(\"self.\"): scope = self.class_scope # Check imports if self._check_in(rhs, self.imports): scope[lhs] =", "= '.'.join(parts[:-i]) if im in scope: return scope[im] + name[len(im):] return None def", "= True def end_ClassDef(self): self.class_scope = {} self.in_class = False def visit_Import(self, node):", "{} self.in_func = False def start_ClassDef(self): self.class_scope = {} self.in_class = True def", "KeyError(\"%s not a builtin\" % key) class Context(object): def __init__(self): self.imports = {}", "from an imported statement, we add it into scope bound to the provided", "(ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\" Resolve a node to its full name.", "and node.id in ('True', 'False', 'None')): return node.id if isinstance(node, ast.Lambda): return node_to_code(node)", "= [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope))", "RHS to their full names. Then: - If the RHS is from an", "im.asname is None: self.imports[im.name] = full_import else: self.imports[im.asname] = full_import def visit_Assign(self, node):", "return None, None scopes = [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)]", "= '.'.join(parts[:-i]) if im in scope: return True return False def _resolve_in(self, name,", "LITERAL_NODES[type(node)] if type(node) == ast.Name: if (node.id == 'True' or node.id == 'False'):", "provided LHS variable - If the RHS is a literal, we add it", "type(node) == ast.Num: for t, name in NUMBER_TYPES.items(): if isinstance(node.n, t): return name", "'.'.join(parts[:-i]) if im in scope: return True return False def _resolve_in(self, name, scope):", "types import ast from contextlib import contextmanager from pprint import pprint from utils", "name. \"\"\" if is_literal(node): return node_to_type(node) n = node if isinstance(node, ast.Call): n", "= rhs # Remove re-assignments that were unrecognized elif lhs in scope: del", "n = n.value # If we actually ended up at an ast.Name node,", "global scope based on whether we are in a function. We resolved the", "if im in scope: return True return False def _resolve_in(self, name, scope): if", "('True', 'False', 'None')): return node.id if isinstance(node, ast.Lambda): return node_to_code(node) return '' def", "known assignment type if not self._known_assignment_type(node.value): return # Only consider single-assignments for now", "scope[im] + name[len(im):] return None def _known_assignment_type(self, target): \"\"\" List of types we", "self._resolve_in(rhs, scope) # Check rhs in class scope elif self.in_class and rhs.startswith(\"self.\"): if", "node.id if isinstance(node, ast.Lambda): return node_to_code(node) return '' def _process_lambda(node): try: gen =", "to the provided LHS variable - Otherwise, if the LHS is already in", "variable - If the RHS is a literal, we add it into scope", "nodes. Extract them one by one. parts.append(n.attr) n = n.value # If we", "expression that make up the call. # We just have to reverse the", "if (node.id == 'True' or node.id == 'False'): return \"__builtin__.bool\" elif node.id ==", "key) class BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__, name) def __getitem__(self, key): if", "self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check literals elif rhs in LITERAL_NODES.values():", "\"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES = {", "rhs = self._get_name(node.value) if lhs is None or rhs is None: return #", "class TypeScope(object): def __contains__(self, name): return hasattr(types, name) def __getitem__(self, key): if key", "True if type(node) is ast.Lambda: return True return False def literal_value(node): if not", "{} self.func_scope = {} self.class_scope = {} self.global_scope = {} self.type_scope = TypeScope()", "os.path.join, they are chained together as a series of # ast.Attribute nodes. Extract", "or node.id == 'False'): return \"__builtin__.bool\" elif node.id == 'None': return \"__builtin__.None\" if", "in scopes: if self._check_in(name, scope): return scopeName, self._resolve_in(name, scope) return None, None def", "@contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def", "im.name def visit_ImportFrom(self, node): \"\"\" Handle from imports. \"\"\" if node.module is None:", "func or class) elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope) # Check rhs", "gen.visit(node) return gen.line except Exception as e: return \"\" def node_to_type(node): if type(node)", "if type(node) in LITERAL_NODES: return True if isinstance(node, ast.Name): if node.id in ('True',", "def _process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node) return gen.line except Exception as e:", "# For function calls that are nested in selector expressions, # e.g os.path.join,", "im = '.'.join(parts[:-i]) if im in scope: return scope[im] + name[len(im):] return None", "None def _known_assignment_type(self, target): \"\"\" List of types we support for the RHS", "__builtin__ import types import ast from contextlib import contextmanager from pprint import pprint", "\"__builtin__.bool\" elif node.id == 'None': return \"__builtin__.None\" if type(node) == ast.Lambda: return \"LambdaType\"", "return hasattr(types, name) def __getitem__(self, key): if key in self: return \"types.%s\" %", "whether we are in a function. We resolved the LHS and RHS to", "# Check to see if the RHS is a known assignment type if", "isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n) if is_literal(n):", "key in self: return \"types.%s\" % key raise KeyError(\"%s is not a type\"", "complex: \"complex\", } def is_literal(node): if type(node) in LITERAL_NODES: return True if isinstance(node,", "scope in scopes: if self._check_in(name, scope): return scopeName, self._resolve_in(name, scope) return None, None", "utils import node_to_code LITERAL_NODES = { ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple:", "True if isinstance(node, ast.Name): if node.id in ('True', 'False', 'None'): return True if", "t): return name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node) == ast.Name: if", "self.in_func = True def end_FunctionDef(self): self.func_scope = {} self.in_func = False def start_ClassDef(self):", "= full_import else: self.imports[im.asname] = full_import def visit_Assign(self, node): \"\"\" On an assignment", "'False'): return \"__builtin__.bool\" elif node.id == 'None': return \"__builtin__.None\" if type(node) == ast.Lambda:", "self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check builtins", "in a class lhs starts with \"self.\" if self.in_class and lhs.startswith(\"self.\"): scope =", "\"unknown\" class TypeScope(object): def __contains__(self, name): return hasattr(types, name) def __getitem__(self, key): if", "return scope[im] + name[len(im):] return None def _known_assignment_type(self, target): \"\"\" List of types", "{} self.in_class = False def visit_Import(self, node): \"\"\" Handle imports. \"\"\" for im", "in selector expressions, # e.g os.path.join, they are chained together as a series", "return True if type(node) is ast.Lambda: return True return False def literal_value(node): if", "LITERAL_NODES: if type(node) == ast.Num: for t, name in NUMBER_TYPES.items(): if isinstance(node.n, t):", "\"\"\" if is_literal(node): return node_to_type(node) n = node if isinstance(node, ast.Call): n =", "\"\"\" # Check to see if we are in a function. scope =", "together as a series of # ast.Attribute nodes. Extract them one by one.", "('True', 'False', 'None'): return True if type(node) is ast.Lambda: return True return False", "t, name in NUMBER_TYPES.items(): if isinstance(node.n, t): return name return LITERAL_NODES[type(node)] else: return", "a known assignment type if not self._known_assignment_type(node.value): return # Only consider single-assignments for", "contextlib import contextmanager from pprint import pprint from utils import node_to_code LITERAL_NODES =", "key raise KeyError(\"%s is not a type\" % key) class BuiltinScope(object): def __contains__(self,", "see if we are in a function. scope = self.global_scope if self.in_func: scope", "are chained together as a series of # ast.Attribute nodes. Extract them one", "def start_ClassDef(self): self.class_scope = {} self.in_class = True def end_ClassDef(self): self.class_scope = {}", "or global scope based on whether we are in a function. We resolved", "in scope: return scope[name] parts = name.split(\".\") for i in range(1, len(parts)): im", "Remove re-assignments that were unrecognized elif lhs in scope: del scope[lhs] def _check_in(self,", "= ASTCodeGenerator() gen.visit(node) return gen.line except Exception as e: return \"\" def node_to_type(node):", "= False def start_ClassDef(self): self.class_scope = {} self.in_class = True def end_ClassDef(self): self.class_scope", "Check current scope (global, func or class) elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs,", "if key in self: return \"__builtin__.%s\" % key raise KeyError(\"%s not a builtin\"", "expression, we add variables to local or global scope based on whether we", "'False', 'None'): return True if type(node) is ast.Lambda: return True return False def", "visit_ImportFrom(self, node): \"\"\" Handle from imports. \"\"\" if node.module is None: return for", "e.g os.path.join, they are chained together as a series of # ast.Attribute nodes.", "add variables to local or global scope based on whether we are in", "TypeScope(object): def __contains__(self, name): return hasattr(types, name) def __getitem__(self, key): if key in", "range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope: return True return False", "full name. \"\"\" if is_literal(node): return node_to_type(node) n = node if isinstance(node, ast.Call):", "self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check literals elif rhs in LITERAL_NODES.values(): scope[lhs]", "scope[lhs] = rhs # Remove re-assignments that were unrecognized elif lhs in scope:", "# Check literals elif rhs in LITERAL_NODES.values(): scope[lhs] = rhs # Remove re-assignments", "self.class_scope = {} self.in_class = True def end_ClassDef(self): self.class_scope = {} self.in_class =", "name.split(\".\") for i in range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope:", "return True return False def _resolve_in(self, name, scope): if name in scope: return", "assignment expressions. \"\"\" return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\"", "in LITERAL_NODES: return True if isinstance(node, ast.Name): if node.id in ('True', 'False', 'None'):", "ast.Str): return node.s if (isinstance(node, ast.Name) and node.id in ('True', 'False', 'None')): return", "not name: return None, None scopes = [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports),", "Check literals elif rhs in LITERAL_NODES.values(): scope[lhs] = rhs # Remove re-assignments that", "of types we support for the RHS in assignment expressions. \"\"\" return (is_literal(target)", "reverse the parts we added above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts)", "if the LHS is already in scope, we remove it. \"\"\" # Check", "ast.Num): return str(node.n) if isinstance(node, ast.Str): return node.s if (isinstance(node, ast.Name) and node.id", "self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope =", "node_to_code LITERAL_NODES = { ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set:", "self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef()", "and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check builtins elif", "(node.id == 'True' or node.id == 'False'): return \"__builtin__.bool\" elif node.id == 'None':", "the call. # We just have to reverse the parts we added above.", "class scope elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope)", "class BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__, name) def __getitem__(self, key): if key", "= self.class_scope # Check imports if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) #", "None, None def in_class_scope(self, node): name = self._get_name(node) if not name: return False", "= BuiltinScope() self.in_func = False self.in_class = False def resolve_node(self, node): name =", "def is_literal(node): if type(node) in LITERAL_NODES: return True if isinstance(node, ast.Name): if node.id", "pprint from utils import node_to_code LITERAL_NODES = { ast.Num: \"number\", ast.Str: \"str\", ast.List:", "self: return \"__builtin__.%s\" % key raise KeyError(\"%s not a builtin\" % key) class", "scopes.reverse() for scopeName, scope in scopes: if self._check_in(name, scope): return scopeName, self._resolve_in(name, scope)", "if isinstance(n, ast.Call): return self._get_name(n) if is_literal(n): nodeType = node_to_type(n) parts.append(nodeType) parts.reverse() return", "imports. \"\"\" if node.module is None: return for im in node.names: full_import =", "RHS is from an imported statement, we add it into scope bound to", "node to its full name. \"\"\" if is_literal(node): return node_to_type(node) n = node", "is_literal(node): if type(node) in LITERAL_NODES: return True if isinstance(node, ast.Name): if node.id in", "in self: return \"types.%s\" % key raise KeyError(\"%s is not a type\" %", "return # Only consider single-assignments for now if len(node.targets) != 1: return #", "== ast.Name: if (node.id == 'True' or node.id == 'False'): return \"__builtin__.bool\" elif", "name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node) == ast.Name: if (node.id ==", "\"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES = { int: \"int\",", "function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope", "is already in scope, we remove it. \"\"\" # Check to see if", "# Check imports if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) # Check current", "have a # all the components of the selector expression that make up", "that make up the call. # We just have to reverse the parts", "== 'False'): return \"__builtin__.bool\" elif node.id == 'None': return \"__builtin__.None\" if type(node) ==", "self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs is None or rhs is None: return", "\"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\", } NUMBER_TYPES", "def visit_Import(self, node): \"\"\" Handle imports. \"\"\" for im in node.names: if im.asname", "in node.names: if im.asname is None: self.imports[im.name] = im.name else: self.imports[im.asname] = im.name", "[] while isinstance(n, ast.Attribute): # For function calls that are nested in selector", "calls that are nested in selector expressions, # e.g os.path.join, they are chained", "scope): scope[lhs] = self._resolve_in(rhs, scope) # Check rhs in class scope elif self.in_class", "class) elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope) # Check rhs in class", "in scope: return True parts = name.split(\".\") for i in range(1, len(parts)): im", "yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {}", "def __contains__(self, name): return hasattr(__builtin__, name) def __getitem__(self, key): if key in self:", "self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield", "\"\"\" Handle from imports. \"\"\" if node.module is None: return for im in", "if im.asname is None: self.imports[im.name] = full_import else: self.imports[im.asname] = full_import def visit_Assign(self,", "Check to see if the RHS is a known assignment type if not", "# Check current scope (global, func or class) elif self._check_in(rhs, scope): scope[lhs] =", "= self._resolve_in(rhs, self.builtin_scope) # Check literals elif rhs in LITERAL_NODES.values(): scope[lhs] = rhs", "re-assignments that were unrecognized elif lhs in scope: del scope[lhs] def _check_in(self, name,", "self.type_scope = TypeScope() self.builtin_scope = BuiltinScope() self.in_func = False self.in_class = False def", "\"float\", long: \"long\", complex: \"complex\", } def is_literal(node): if type(node) in LITERAL_NODES: return", "True def end_ClassDef(self): self.class_scope = {} self.in_class = False def visit_Import(self, node): \"\"\"", "def _resolve_in(self, name, scope): if name in scope: return scope[name] parts = name.split(\".\")", "node if isinstance(node, ast.Call): n = node.func parts = [] while isinstance(n, ast.Attribute):", "LHS variable - If the RHS is a literal, we add it into", "gen.line except Exception as e: return \"\" def node_to_type(node): if type(node) in LITERAL_NODES:", "% key) class Context(object): def __init__(self): self.imports = {} self.func_scope = {} self.class_scope", "return name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node) == ast.Name: if (node.id", "name) def __getitem__(self, key): if key in self: return \"__builtin__.%s\" % key raise", "= name.split(\".\") for i in range(1, len(parts)): im = '.'.join(parts[:-i]) if im in", "isinstance(n, ast.Call): return self._get_name(n) if is_literal(n): nodeType = node_to_type(n) parts.append(nodeType) parts.reverse() return '.'.join(parts)", "actually ended up at an ast.Name node, we have a # all the", "one by one. parts.append(n.attr) n = n.value # If we actually ended up", "# e.g os.path.join, they are chained together as a series of # ast.Attribute", "ast.Name): if node.id in ('True', 'False', 'None'): return True if type(node) is ast.Lambda:", "= {} self.global_scope = {} self.type_scope = TypeScope() self.builtin_scope = BuiltinScope() self.in_func =", "def literal_value(node): if not is_literal(node): return '' if isinstance(node, ast.Num): return str(node.n) if", "parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n) if is_literal(n): nodeType = node_to_type(n)", "name in scope: return scope[name] parts = name.split(\".\") for i in range(1, len(parts)):", "make up the call. # We just have to reverse the parts we", "long: \"long\", complex: \"complex\", } def is_literal(node): if type(node) in LITERAL_NODES: return True", "\"LambdaType\" return \"unknown\" class TypeScope(object): def __contains__(self, name): return hasattr(types, name) def __getitem__(self,", "if key in self: return \"types.%s\" % key raise KeyError(\"%s is not a", "add it into scope bound to the provided LHS variable - Otherwise, if", "ast.Attribute))) def _get_name(self, node): \"\"\" Resolve a node to its full name. \"\"\"", "LHS is already in scope, we remove it. \"\"\" # Check to see", "self.in_func: scope = self.func_scope # Check to see if the RHS is a", "return node_to_code(node) return '' def _process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node) return gen.line", "node): \"\"\" Resolve a node to its full name. \"\"\" if is_literal(node): return", "ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n) if is_literal(n): nodeType", "name = self._get_name(node) if not name: return None, None scopes = [('types', self.type_scope),", "def _known_assignment_type(self, target): \"\"\" List of types we support for the RHS in", "into scope bound to the provided LHS variable - If the RHS is", "# ast.Attribute nodes. Extract them one by one. parts.append(n.attr) n = n.value #", "{} self.global_scope = {} self.type_scope = TypeScope() self.builtin_scope = BuiltinScope() self.in_func = False", "function calls that are nested in selector expressions, # e.g os.path.join, they are", "current scope (global, func or class) elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope)", "NUMBER_TYPES.items(): if isinstance(node.n, t): return name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node)", "gen = ASTCodeGenerator() gen.visit(node) return gen.line except Exception as e: return \"\" def", "im in scope: return True return False def _resolve_in(self, name, scope): if name", "Exception as e: return \"\" def node_to_type(node): if type(node) in LITERAL_NODES: if type(node)", "scope = self.class_scope # Check imports if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports)", "if type(node) in LITERAL_NODES: if type(node) == ast.Num: for t, name in NUMBER_TYPES.items():", "literal_value(node): if not is_literal(node): return '' if isinstance(node, ast.Num): return str(node.n) if isinstance(node,", "support for the RHS in assignment expressions. \"\"\" return (is_literal(target) or isinstance(target, (ast.Call,", "For function calls that are nested in selector expressions, # e.g os.path.join, they", "('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class',", "or rhs is None: return # Use class scope if we are in", "and RHS to their full names. Then: - If the RHS is from", "scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope in scopes: if", "if not self._known_assignment_type(node.value): return # Only consider single-assignments for now if len(node.targets) !=", "we actually ended up at an ast.Name node, we have a # all", "a literal, we add it into scope bound to the provided LHS variable", "return False def _resolve_in(self, name, scope): if name in scope: return scope[name] parts", "for t, name in NUMBER_TYPES.items(): if isinstance(node.n, t): return name return LITERAL_NODES[type(node)] else:", "if type(node) is ast.Lambda: return True return False def literal_value(node): if not is_literal(node):", "__getitem__(self, key): if key in self: return \"__builtin__.%s\" % key raise KeyError(\"%s not", "chained together as a series of # ast.Attribute nodes. Extract them one by", "\"dict\", } NUMBER_TYPES = { int: \"int\", float: \"float\", long: \"long\", complex: \"complex\",", "name in scope: return True parts = name.split(\".\") for i in range(1, len(parts)):", "None, None scopes = [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if", "type(node) in LITERAL_NODES: return True if isinstance(node, ast.Name): if node.id in ('True', 'False',", "__getitem__(self, key): if key in self: return \"types.%s\" % key raise KeyError(\"%s is", "self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for", "assignment expression, we add variables to local or global scope based on whether", "lhs starts with \"self.\" if self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope # Check", "def _check_in(self, name, scope): if name in scope: return True parts = name.split(\".\")", "= n.value # If we actually ended up at an ast.Name node, we", "return node.s if (isinstance(node, ast.Name) and node.id in ('True', 'False', 'None')): return node.id", "= self._resolve_in(rhs, self.class_scope) # Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope)", "if type(node) == ast.Name: if (node.id == 'True' or node.id == 'False'): return", "self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope # Check imports if self._check_in(rhs, self.imports): scope[lhs]", "self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope in scopes: if self._check_in(name, scope): return", "return scopeName, self._resolve_in(name, scope) return None, None def in_class_scope(self, node): name = self._get_name(node)", "are in a class lhs starts with \"self.\" if self.in_class and lhs.startswith(\"self.\"): scope", "builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check literals elif rhs", "contextmanager from pprint import pprint from utils import node_to_code LITERAL_NODES = { ast.Num:", "node.id == 'False'): return \"__builtin__.bool\" elif node.id == 'None': return \"__builtin__.None\" if type(node)", "is None: self.imports[im.name] = im.name else: self.imports[im.asname] = im.name def visit_ImportFrom(self, node): \"\"\"", "(global, func or class) elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope) # Check", "else: self.imports[im.asname] = im.name def visit_ImportFrom(self, node): \"\"\" Handle from imports. \"\"\" if", "literals elif rhs in LITERAL_NODES.values(): scope[lhs] = rhs # Remove re-assignments that were", "import types import ast from contextlib import contextmanager from pprint import pprint from", "if im in scope: return scope[im] + name[len(im):] return None def _known_assignment_type(self, target):", "node.names: if im.asname is None: self.imports[im.name] = im.name else: self.imports[im.asname] = im.name def", "if type(node) == ast.Num: for t, name in NUMBER_TYPES.items(): if isinstance(node.n, t): return", "rhs in LITERAL_NODES.values(): scope[lhs] = rhs # Remove re-assignments that were unrecognized elif", "scope[name] parts = name.split(\".\") for i in range(1, len(parts)): im = '.'.join(parts[:-i]) if", "# all the components of the selector expression that make up the call.", "scope) return None, None def in_class_scope(self, node): name = self._get_name(node) if not name:", "key): if key in self: return \"types.%s\" % key raise KeyError(\"%s is not", "self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope) # Check rhs in class scope elif", "scopes = [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function',", "return # Use class scope if we are in a class lhs starts", "parts = [] while isinstance(n, ast.Attribute): # For function calls that are nested", "the RHS is from an imported statement, we add it into scope bound", "scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check literals elif rhs in LITERAL_NODES.values(): scope[lhs] =", "self.imports[im.name] = full_import else: self.imports[im.asname] = full_import def visit_Assign(self, node): \"\"\" On an", "it. \"\"\" # Check to see if we are in a function. scope", "them one by one. parts.append(n.attr) n = n.value # If we actually ended", "ast.Name) and node.id in ('True', 'False', 'None')): return node.id if isinstance(node, ast.Lambda): return", "is_literal(node): return node_to_type(node) n = node if isinstance(node, ast.Call): n = node.func parts", "if self._check_in(name, scope): return scopeName, self._resolve_in(name, scope) return None, None def in_class_scope(self, node):", "TypeScope() self.builtin_scope = BuiltinScope() self.in_func = False self.in_class = False def resolve_node(self, node):", "from contextlib import contextmanager from pprint import pprint from utils import node_to_code LITERAL_NODES", "False self.in_class = False def resolve_node(self, node): name = self._get_name(node) if not name:", "= {} self.in_class = True def end_ClassDef(self): self.class_scope = {} self.in_class = False", "Check rhs in class scope elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs]", "node.func parts = [] while isinstance(n, ast.Attribute): # For function calls that are", "ast.Call): n = node.func parts = [] while isinstance(n, ast.Attribute): # For function", "return for im in node.names: full_import = node.module + '.' + im.name if", "name = self._get_name(node) if not name: return False return self._check_in(name, self.class_scope) @contextmanager def", "type(node) in LITERAL_NODES: if type(node) == ast.Num: for t, name in NUMBER_TYPES.items(): if", "name) def __getitem__(self, key): if key in self: return \"types.%s\" % key raise", "in scope: return scope[im] + name[len(im):] return None def _known_assignment_type(self, target): \"\"\" List", "type(node) is ast.Lambda: return True return False def literal_value(node): if not is_literal(node): return", "__contains__(self, name): return hasattr(__builtin__, name) def __getitem__(self, key): if key in self: return", "an assignment expression, we add variables to local or global scope based on", "# Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check literals", "unrecognized elif lhs in scope: del scope[lhs] def _check_in(self, name, scope): if name", "elif lhs in scope: del scope[lhs] def _check_in(self, name, scope): if name in", "types we support for the RHS in assignment expressions. \"\"\" return (is_literal(target) or", "\"types.%s\" % key raise KeyError(\"%s is not a type\" % key) class BuiltinScope(object):", "is None: return for im in node.names: full_import = node.module + '.' +", "= {} self.in_func = False def start_ClassDef(self): self.class_scope = {} self.in_class = True", "if (isinstance(node, ast.Name) and node.id in ('True', 'False', 'None')): return node.id if isinstance(node,", "= self._get_name(node) if not name: return None, None scopes = [('types', self.type_scope), ('builtin',", "variable - Otherwise, if the LHS is already in scope, we remove it.", "isinstance(node, ast.Name): if node.id in ('True', 'False', 'None'): return True if type(node) is", "full_import = node.module + '.' + im.name if im.asname is None: self.imports[im.name] =", "self._resolve_in(rhs, self.class_scope) # Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) #", "of the selector expression that make up the call. # We just have", "node): name = self._get_name(node) if not name: return False return self._check_in(name, self.class_scope) @contextmanager", "imported statement, we add it into scope bound to the provided LHS variable", "series of # ast.Attribute nodes. Extract them one by one. parts.append(n.attr) n =", "im.name else: self.imports[im.asname] = im.name def visit_ImportFrom(self, node): \"\"\" Handle from imports. \"\"\"", "remove it. \"\"\" # Check to see if we are in a function.", "hasattr(__builtin__, name) def __getitem__(self, key): if key in self: return \"__builtin__.%s\" % key", "that were unrecognized elif lhs in scope: del scope[lhs] def _check_in(self, name, scope):", "RHS is a known assignment type if not self._known_assignment_type(node.value): return # Only consider", "__init__(self): self.imports = {} self.func_scope = {} self.class_scope = {} self.global_scope = {}", "def visit_Assign(self, node): \"\"\" On an assignment expression, we add variables to local", "in range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope: return True return", "= { ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict:", "== ast.Lambda: return \"LambdaType\" return \"unknown\" class TypeScope(object): def __contains__(self, name): return hasattr(types,", "variables to local or global scope based on whether we are in a", "self.class_scope # Check imports if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) # Check", "provided LHS variable - Otherwise, if the LHS is already in scope, we", "self._get_name(node) if not name: return None, None scopes = [('types', self.type_scope), ('builtin', self.builtin_scope),", "node.id in ('True', 'False', 'None'): return True if type(node) is ast.Lambda: return True", "n = node.func parts = [] while isinstance(n, ast.Attribute): # For function calls", "{ ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\", ast.Dict: \"dict\",", "if not is_literal(node): return '' if isinstance(node, ast.Num): return str(node.n) if isinstance(node, ast.Str):", "scope elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) #", "isinstance(node, ast.Str): return node.s if (isinstance(node, ast.Name) and node.id in ('True', 'False', 'None')):", "return \"unknown\" class TypeScope(object): def __contains__(self, name): return hasattr(types, name) def __getitem__(self, key):", "if type(node) == ast.Lambda: return \"LambdaType\" return \"unknown\" class TypeScope(object): def __contains__(self, name):", "raise KeyError(\"%s is not a type\" % key) class BuiltinScope(object): def __contains__(self, name):", "Only consider single-assignments for now if len(node.targets) != 1: return # Resolve left-hand", "Otherwise, if the LHS is already in scope, we remove it. \"\"\" #", "# Remove re-assignments that were unrecognized elif lhs in scope: del scope[lhs] def", "ast.Attribute): # For function calls that are nested in selector expressions, # e.g", "isinstance(node, ast.Lambda): return node_to_code(node) return '' def _process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node)", "def resolve_node(self, node): name = self._get_name(node) if not name: return None, None scopes", "node): \"\"\" Handle imports. \"\"\" for im in node.names: if im.asname is None:", "+ im.name if im.asname is None: self.imports[im.name] = full_import else: self.imports[im.asname] = full_import", "by one. parts.append(n.attr) n = n.value # If we actually ended up at", "\"__builtin__.%s\" % key raise KeyError(\"%s not a builtin\" % key) class Context(object): def", "resolved the LHS and RHS to their full names. Then: - If the", "parts.append(n.attr) n = n.value # If we actually ended up at an ast.Name", "node.id in ('True', 'False', 'None')): return node.id if isinstance(node, ast.Lambda): return node_to_code(node) return", "if isinstance(node.n, t): return name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node) ==", "im.name if im.asname is None: self.imports[im.name] = full_import else: self.imports[im.asname] = full_import def", "to local or global scope based on whether we are in a function.", "return False def literal_value(node): if not is_literal(node): return '' if isinstance(node, ast.Num): return", "if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope in", "the LHS and RHS to their full names. Then: - If the RHS", "i in range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope: return True", "return \"types.%s\" % key raise KeyError(\"%s is not a type\" % key) class", "is None or rhs is None: return # Use class scope if we", "return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node) == ast.Name: if (node.id == 'True'", "\"\"\" Resolve a node to its full name. \"\"\" if is_literal(node): return node_to_type(node)", "we add it into scope bound to the provided LHS variable - Otherwise,", "'.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n) if is_literal(n): nodeType = node_to_type(n) parts.append(nodeType) parts.reverse()", "\"\"\" if node.module is None: return for im in node.names: full_import = node.module", "if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope in scopes: if self._check_in(name, scope):", "rhs in class scope elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] =", "self.builtin_scope) # Check literals elif rhs in LITERAL_NODES.values(): scope[lhs] = rhs # Remove", "if self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope # Check imports if self._check_in(rhs, self.imports):", "self.class_scope = {} self.in_class = False def visit_Import(self, node): \"\"\" Handle imports. \"\"\"", "= node.module + '.' + im.name if im.asname is None: self.imports[im.name] = full_import", "False def resolve_node(self, node): name = self._get_name(node) if not name: return None, None", "= False def resolve_node(self, node): name = self._get_name(node) if not name: return None,", "expression lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs is None or rhs", "_process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node) return gen.line except Exception as e: return", "return \"__builtin__.%s\" % key raise KeyError(\"%s not a builtin\" % key) class Context(object):", "we are in a function. scope = self.global_scope if self.in_func: scope = self.func_scope", "left-hand and right-hand side of assignment expression lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value)", "to its full name. \"\"\" if is_literal(node): return node_to_type(node) n = node if", "is not a type\" % key) class BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__,", "one. parts.append(n.attr) n = n.value # If we actually ended up at an", "= self.global_scope if self.in_func: scope = self.func_scope # Check to see if the", "return True return False def literal_value(node): if not is_literal(node): return '' if isinstance(node,", "node.id == 'None': return \"__builtin__.None\" if type(node) == ast.Lambda: return \"LambdaType\" return \"unknown\"", "full_import def visit_Assign(self, node): \"\"\" On an assignment expression, we add variables to", "the provided LHS variable - If the RHS is a literal, we add", "if we are in a class lhs starts with \"self.\" if self.in_class and", "KeyError(\"%s is not a type\" % key) class BuiltinScope(object): def __contains__(self, name): return", "self._check_in(name, scope): return scopeName, self._resolve_in(name, scope) return None, None def in_class_scope(self, node): name", "local or global scope based on whether we are in a function. We", "if len(node.targets) != 1: return # Resolve left-hand and right-hand side of assignment", "from utils import node_to_code LITERAL_NODES = { ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\",", "= self.func_scope # Check to see if the RHS is a known assignment", "LITERAL_NODES = { ast.Num: \"number\", ast.Str: \"str\", ast.List: \"list\", ast.Tuple: \"tuple\", ast.Set: \"set\",", "If the RHS is from an imported statement, we add it into scope", "class lhs starts with \"self.\" if self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope #", "n = node if isinstance(node, ast.Call): n = node.func parts = [] while", "return # Resolve left-hand and right-hand side of assignment expression lhs = self._get_name(node.targets[0])", "in LITERAL_NODES.values(): scope[lhs] = rhs # Remove re-assignments that were unrecognized elif lhs", "del scope[lhs] def _check_in(self, name, scope): if name in scope: return True parts", "scopeName, scope in scopes: if self._check_in(name, scope): return scopeName, self._resolve_in(name, scope) return None,", "} NUMBER_TYPES = { int: \"int\", float: \"float\", long: \"long\", complex: \"complex\", }", "import __builtin__ import types import ast from contextlib import contextmanager from pprint import", "in NUMBER_TYPES.items(): if isinstance(node.n, t): return name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if", "= True def end_FunctionDef(self): self.func_scope = {} self.in_func = False def start_ClassDef(self): self.class_scope", "# We just have to reverse the parts we added above. if isinstance(n,", "just have to reverse the parts we added above. if isinstance(n, ast.Name): parts.append(n.id)", "None: return for im in node.names: full_import = node.module + '.' + im.name", "Extract them one by one. parts.append(n.attr) n = n.value # If we actually", "type(node) == ast.Lambda: return \"LambdaType\" return \"unknown\" class TypeScope(object): def __contains__(self, name): return", "('imports', self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse()", "def node_to_type(node): if type(node) in LITERAL_NODES: if type(node) == ast.Num: for t, name", "\"long\", complex: \"complex\", } def is_literal(node): if type(node) in LITERAL_NODES: return True if", "key): if key in self: return \"__builtin__.%s\" % key raise KeyError(\"%s not a", "self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs]", "lhs.startswith(\"self.\"): scope = self.class_scope # Check imports if self._check_in(rhs, self.imports): scope[lhs] = self._resolve_in(rhs,", "Resolve left-hand and right-hand side of assignment expression lhs = self._get_name(node.targets[0]) rhs =", "\"\"\" Handle imports. \"\"\" for im in node.names: if im.asname is None: self.imports[im.name]", "the LHS is already in scope, we remove it. \"\"\" # Check to", "ast.Dict: \"dict\", } NUMBER_TYPES = { int: \"int\", float: \"float\", long: \"long\", complex:", "builtin\" % key) class Context(object): def __init__(self): self.imports = {} self.func_scope = {}", "self._known_assignment_type(node.value): return # Only consider single-assignments for now if len(node.targets) != 1: return", "not is_literal(node): return '' if isinstance(node, ast.Num): return str(node.n) if isinstance(node, ast.Str): return", "None: self.imports[im.name] = full_import else: self.imports[im.asname] = full_import def visit_Assign(self, node): \"\"\" On", "\"complex\", } def is_literal(node): if type(node) in LITERAL_NODES: return True if isinstance(node, ast.Name):", "False def visit_Import(self, node): \"\"\" Handle imports. \"\"\" for im in node.names: if", "self._resolve_in(rhs, self.builtin_scope) # Check literals elif rhs in LITERAL_NODES.values(): scope[lhs] = rhs #", "for the RHS in assignment expressions. \"\"\" return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute)))", "If the RHS is a literal, we add it into scope bound to", "of # ast.Attribute nodes. Extract them one by one. parts.append(n.attr) n = n.value", "is a known assignment type if not self._known_assignment_type(node.value): return # Only consider single-assignments", "node, we have a # all the components of the selector expression that", "visit_Import(self, node): \"\"\" Handle imports. \"\"\" for im in node.names: if im.asname is", "not self._known_assignment_type(node.value): return # Only consider single-assignments for now if len(node.targets) != 1:", "LHS variable - Otherwise, if the LHS is already in scope, we remove", "if node.id in ('True', 'False', 'None'): return True if type(node) is ast.Lambda: return", "== 'True' or node.id == 'False'): return \"__builtin__.bool\" elif node.id == 'None': return", "were unrecognized elif lhs in scope: del scope[lhs] def _check_in(self, name, scope): if", "def end_FunctionDef(self): self.func_scope = {} self.in_func = False def start_ClassDef(self): self.class_scope = {}", "a function. scope = self.global_scope if self.in_func: scope = self.func_scope # Check to", "if we are in a function. scope = self.global_scope if self.in_func: scope =", "_get_name(self, node): \"\"\" Resolve a node to its full name. \"\"\" if is_literal(node):", "name in NUMBER_TYPES.items(): if isinstance(node.n, t): return name return LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)]", "self.class_scope)) scopes.reverse() for scopeName, scope in scopes: if self._check_in(name, scope): return scopeName, self._resolve_in(name,", "bound to the provided LHS variable - Otherwise, if the LHS is already", "self.global_scope if self.in_func: scope = self.func_scope # Check to see if the RHS", "have to reverse the parts we added above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse()", "the components of the selector expression that make up the call. # We", "self.func_scope # Check to see if the RHS is a known assignment type", "'False', 'None')): return node.id if isinstance(node, ast.Lambda): return node_to_code(node) return '' def _process_lambda(node):", "we are in a function. We resolved the LHS and RHS to their", "BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__, name) def __getitem__(self, key): if key in", "an imported statement, we add it into scope bound to the provided LHS", "in ('True', 'False', 'None'): return True if type(node) is ast.Lambda: return True return", "scopes: if self._check_in(name, scope): return scopeName, self._resolve_in(name, scope) return None, None def in_class_scope(self,", "self.builtin_scope = BuiltinScope() self.in_func = False self.in_class = False def resolve_node(self, node): name", "@contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {} self.in_func =", "# Use class scope if we are in a class lhs starts with", "return \"\" def node_to_type(node): if type(node) in LITERAL_NODES: if type(node) == ast.Num: for", "visit_Assign(self, node): \"\"\" On an assignment expression, we add variables to local or", "self.in_class = False def visit_Import(self, node): \"\"\" Handle imports. \"\"\" for im in", "isinstance(n, ast.Attribute): # For function calls that are nested in selector expressions, #", "1: return # Resolve left-hand and right-hand side of assignment expression lhs =", "self.class_scope = {} self.global_scope = {} self.type_scope = TypeScope() self.builtin_scope = BuiltinScope() self.in_func", "scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs,", "start_FunctionDef(self): self.func_scope = {} self.in_func = True def end_FunctionDef(self): self.func_scope = {} self.in_func", "start_ClassDef(self): self.class_scope = {} self.in_class = True def end_ClassDef(self): self.class_scope = {} self.in_class", "if name in scope: return True parts = name.split(\".\") for i in range(1,", "return \"__builtin__.bool\" elif node.id == 'None': return \"__builtin__.None\" if type(node) == ast.Lambda: return", "node.module is None: return for im in node.names: full_import = node.module + '.'", "isinstance(node, ast.Num): return str(node.n) if isinstance(node, ast.Str): return node.s if (isinstance(node, ast.Name) and", "self.in_func: scopes.append(('function', self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope in scopes:", "return scope[name] parts = name.split(\".\") for i in range(1, len(parts)): im = '.'.join(parts[:-i])", "scope based on whether we are in a function. We resolved the LHS", "nested in selector expressions, # e.g os.path.join, they are chained together as a", "return \"LambdaType\" return \"unknown\" class TypeScope(object): def __contains__(self, name): return hasattr(types, name) def", "elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope): scope[lhs] = self._resolve_in(rhs, self.class_scope) # Check", "while isinstance(n, ast.Attribute): # For function calls that are nested in selector expressions,", "== ast.Num: for t, name in NUMBER_TYPES.items(): if isinstance(node.n, t): return name return", "= {} self.in_class = False def visit_Import(self, node): \"\"\" Handle imports. \"\"\" for", "and right-hand side of assignment expression lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value) if", "ast.Name node, we have a # all the components of the selector expression", "\"\" def node_to_type(node): if type(node) in LITERAL_NODES: if type(node) == ast.Num: for t,", "% key) class BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__, name) def __getitem__(self, key):", "_check_in(self, name, scope): if name in scope: return True parts = name.split(\".\") for", "# If we actually ended up at an ast.Name node, we have a", "selector expression that make up the call. # We just have to reverse", "\"\"\" for im in node.names: if im.asname is None: self.imports[im.name] = im.name else:", "the RHS is a literal, we add it into scope bound to the", "name: return None, None scopes = [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global',", "return None def _known_assignment_type(self, target): \"\"\" List of types we support for the", "not a type\" % key) class BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__, name)", "elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check literals elif rhs in", "True return False def _resolve_in(self, name, scope): if name in scope: return scope[name]", "we are in a class lhs starts with \"self.\" if self.in_class and lhs.startswith(\"self.\"):", "Then: - If the RHS is from an imported statement, we add it", "Check to see if we are in a function. scope = self.global_scope if", "scope) # Check rhs in class scope elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs,", "def __getitem__(self, key): if key in self: return \"__builtin__.%s\" % key raise KeyError(\"%s", "\"set\", ast.Dict: \"dict\", } NUMBER_TYPES = { int: \"int\", float: \"float\", long: \"long\",", "'None')): return node.id if isinstance(node, ast.Lambda): return node_to_code(node) return '' def _process_lambda(node): try:", "= {} self.func_scope = {} self.class_scope = {} self.global_scope = {} self.type_scope =", "+ '.' + im.name if im.asname is None: self.imports[im.name] = full_import else: self.imports[im.asname]", "scope bound to the provided LHS variable - If the RHS is a", "the parts we added above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if", "for i in range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope: return", "True parts = name.split(\".\") for i in range(1, len(parts)): im = '.'.join(parts[:-i]) if", "self.class_scope) # Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check", "_known_assignment_type(self, target): \"\"\" List of types we support for the RHS in assignment", "up at an ast.Name node, we have a # all the components of", "scope: return scope[im] + name[len(im):] return None def _known_assignment_type(self, target): \"\"\" List of", "Resolve a node to its full name. \"\"\" if is_literal(node): return node_to_type(node) n", "in a function. scope = self.global_scope if self.in_func: scope = self.func_scope # Check", "{} self.in_func = True def end_FunctionDef(self): self.func_scope = {} self.in_func = False def", "lhs is None or rhs is None: return # Use class scope if", "selector expressions, # e.g os.path.join, they are chained together as a series of", "node): \"\"\" Handle from imports. \"\"\" if node.module is None: return for im", "e: return \"\" def node_to_type(node): if type(node) in LITERAL_NODES: if type(node) == ast.Num:", "def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self):", "'.' + im.name if im.asname is None: self.imports[im.name] = full_import else: self.imports[im.asname] =", "a class lhs starts with \"self.\" if self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope", "'.'.join(parts[:-i]) if im in scope: return scope[im] + name[len(im):] return None def _known_assignment_type(self,", "we support for the RHS in assignment expressions. \"\"\" return (is_literal(target) or isinstance(target,", "single-assignments for now if len(node.targets) != 1: return # Resolve left-hand and right-hand", "expressions. \"\"\" return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\" Resolve", "def _get_name(self, node): \"\"\" Resolve a node to its full name. \"\"\" if", "lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs is None or rhs is", "we have a # all the components of the selector expression that make", "# Check rhs in class scope elif self.in_class and rhs.startswith(\"self.\"): if self._check_in(rhs, self.class_scope):", "type(node) == ast.Name: if (node.id == 'True' or node.id == 'False'): return \"__builtin__.bool\"", "yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {} self.in_func = True def end_FunctionDef(self): self.func_scope", "are in a function. We resolved the LHS and RHS to their full", "if name in scope: return scope[name] parts = name.split(\".\") for i in range(1,", "def in_class_scope(self, node): name = self._get_name(node) if not name: return False return self._check_in(name,", "- If the RHS is from an imported statement, we add it into", "RHS in assignment expressions. \"\"\" return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self,", "if isinstance(node, ast.Str): return node.s if (isinstance(node, ast.Name) and node.id in ('True', 'False',", "their full names. Then: - If the RHS is from an imported statement,", "key) class Context(object): def __init__(self): self.imports = {} self.func_scope = {} self.class_scope =", "def __init__(self): self.imports = {} self.func_scope = {} self.class_scope = {} self.global_scope =", "not name: return False return self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef()", "an ast.Name node, we have a # all the components of the selector", "= im.name def visit_ImportFrom(self, node): \"\"\" Handle from imports. \"\"\" if node.module is", "return gen.line except Exception as e: return \"\" def node_to_type(node): if type(node) in", "import contextmanager from pprint import pprint from utils import node_to_code LITERAL_NODES = {", "self.global_scope = {} self.type_scope = TypeScope() self.builtin_scope = BuiltinScope() self.in_func = False self.in_class", "= im.name else: self.imports[im.asname] = im.name def visit_ImportFrom(self, node): \"\"\" Handle from imports.", "= {} self.type_scope = TypeScope() self.builtin_scope = BuiltinScope() self.in_func = False self.in_class =", "= node.func parts = [] while isinstance(n, ast.Attribute): # For function calls that", "True def end_FunctionDef(self): self.func_scope = {} self.in_func = False def start_ClassDef(self): self.class_scope =", "import pprint from utils import node_to_code LITERAL_NODES = { ast.Num: \"number\", ast.Str: \"str\",", "added above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call): return", "self.imports): scope[lhs] = self._resolve_in(rhs, self.imports) # Check current scope (global, func or class)", "type\" % key) class BuiltinScope(object): def __contains__(self, name): return hasattr(__builtin__, name) def __getitem__(self,", "None: self.imports[im.name] = im.name else: self.imports[im.asname] = im.name def visit_ImportFrom(self, node): \"\"\" Handle", "import ast from contextlib import contextmanager from pprint import pprint from utils import", "return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\" Resolve a node", "None: return # Use class scope if we are in a class lhs", "node.s if (isinstance(node, ast.Name) and node.id in ('True', 'False', 'None')): return node.id if", "% key raise KeyError(\"%s is not a type\" % key) class BuiltinScope(object): def", "<reponame>kiteco/kiteco-public import __builtin__ import types import ast from contextlib import contextmanager from pprint", "scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope in scopes: if self._check_in(name, scope): return scopeName,", "scope (global, func or class) elif self._check_in(rhs, scope): scope[lhs] = self._resolve_in(rhs, scope) #", "ast.Attribute nodes. Extract them one by one. parts.append(n.attr) n = n.value # If", "if im.asname is None: self.imports[im.name] = im.name else: self.imports[im.asname] = im.name def visit_ImportFrom(self,", "is None: return # Use class scope if we are in a class", "if isinstance(node, ast.Name): if node.id in ('True', 'False', 'None'): return True if type(node)", "the RHS is a known assignment type if not self._known_assignment_type(node.value): return # Only", "to see if we are in a function. scope = self.global_scope if self.in_func:", "components of the selector expression that make up the call. # We just", "resolve_node(self, node): name = self._get_name(node) if not name: return None, None scopes =", "self.func_scope = {} self.in_func = False def start_ClassDef(self): self.class_scope = {} self.in_class =", "None scopes = [('types', self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if self.in_func:", "type if not self._known_assignment_type(node.value): return # Only consider single-assignments for now if len(node.targets)", "self.in_func = False self.in_class = False def resolve_node(self, node): name = self._get_name(node) if", "a builtin\" % key) class Context(object): def __init__(self): self.imports = {} self.func_scope =", "isinstance(node, ast.Call): n = node.func parts = [] while isinstance(n, ast.Attribute): # For", "of assignment expression lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs is None", "'' if isinstance(node, ast.Num): return str(node.n) if isinstance(node, ast.Str): return node.s if (isinstance(node,", "is a literal, we add it into scope bound to the provided LHS", "based on whether we are in a function. We resolved the LHS and", "assignment expression lhs = self._get_name(node.targets[0]) rhs = self._get_name(node.value) if lhs is None or", "names. Then: - If the RHS is from an imported statement, we add", "\"\"\" return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node): \"\"\" Resolve a", "{} self.in_class = True def end_ClassDef(self): self.class_scope = {} self.in_class = False def", "function. scope = self.global_scope if self.in_func: scope = self.func_scope # Check to see", "# Resolve left-hand and right-hand side of assignment expression lhs = self._get_name(node.targets[0]) rhs", "full_import else: self.imports[im.asname] = full_import def visit_Assign(self, node): \"\"\" On an assignment expression,", "LITERAL_NODES.values(): scope[lhs] = rhs # Remove re-assignments that were unrecognized elif lhs in", "\"__builtin__.None\" if type(node) == ast.Lambda: return \"LambdaType\" return \"unknown\" class TypeScope(object): def __contains__(self,", "len(node.targets) != 1: return # Resolve left-hand and right-hand side of assignment expression", "- If the RHS is a literal, we add it into scope bound", "it into scope bound to the provided LHS variable - Otherwise, if the", "we add variables to local or global scope based on whether we are", "{ int: \"int\", float: \"float\", long: \"long\", complex: \"complex\", } def is_literal(node): if", "im in node.names: full_import = node.module + '.' + im.name if im.asname is", "lhs in scope: del scope[lhs] def _check_in(self, name, scope): if name in scope:", "self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {} self.in_func = True def end_FunctionDef(self): self.func_scope =", "range(1, len(parts)): im = '.'.join(parts[:-i]) if im in scope: return scope[im] + name[len(im):]", "scope, we remove it. \"\"\" # Check to see if we are in", "} def is_literal(node): if type(node) in LITERAL_NODES: return True if isinstance(node, ast.Name): if", "return self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self): self.start_ClassDef()", "name, scope): if name in scope: return True parts = name.split(\".\") for i", "!= 1: return # Resolve left-hand and right-hand side of assignment expression lhs", "ast.Lambda: return \"LambdaType\" return \"unknown\" class TypeScope(object): def __contains__(self, name): return hasattr(types, name)", "self.in_class = False def resolve_node(self, node): name = self._get_name(node) if not name: return", "a series of # ast.Attribute nodes. Extract them one by one. parts.append(n.attr) n", "Handle imports. \"\"\" for im in node.names: if im.asname is None: self.imports[im.name] =", "BuiltinScope() self.in_func = False self.in_class = False def resolve_node(self, node): name = self._get_name(node)", "target): \"\"\" List of types we support for the RHS in assignment expressions.", "to their full names. Then: - If the RHS is from an imported", "return str(node.n) if isinstance(node, ast.Str): return node.s if (isinstance(node, ast.Name) and node.id in", "is from an imported statement, we add it into scope bound to the", "except Exception as e: return \"\" def node_to_type(node): if type(node) in LITERAL_NODES: if", "to see if the RHS is a known assignment type if not self._known_assignment_type(node.value):", "# Check to see if we are in a function. scope = self.global_scope", "- Otherwise, if the LHS is already in scope, we remove it. \"\"\"", "(isinstance(node, ast.Name) and node.id in ('True', 'False', 'None')): return node.id if isinstance(node, ast.Lambda):", "self.func_scope)) if self.in_class: scopes.append(('class', self.class_scope)) scopes.reverse() for scopeName, scope in scopes: if self._check_in(name,", "Handle from imports. \"\"\" if node.module is None: return for im in node.names:", "rhs # Remove re-assignments that were unrecognized elif lhs in scope: del scope[lhs]", "self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {} self.in_func = True def end_FunctionDef(self):", "now if len(node.targets) != 1: return # Resolve left-hand and right-hand side of", "if node.module is None: return for im in node.names: full_import = node.module +", "= [] while isinstance(n, ast.Attribute): # For function calls that are nested in", "self._get_name(node.value) if lhs is None or rhs is None: return # Use class", "Use class scope if we are in a class lhs starts with \"self.\"", "we added above. if isinstance(n, ast.Name): parts.append(n.id) parts.reverse() return '.'.join(parts) if isinstance(n, ast.Call):", "name): return hasattr(types, name) def __getitem__(self, key): if key in self: return \"types.%s\"", "scope[lhs] def _check_in(self, name, scope): if name in scope: return True parts =", "is None: self.imports[im.name] = full_import else: self.imports[im.asname] = full_import def visit_Assign(self, node): \"\"\"", "def class_context(self): self.start_ClassDef() yield self.end_ClassDef() def start_FunctionDef(self): self.func_scope = {} self.in_func = True", "False def literal_value(node): if not is_literal(node): return '' if isinstance(node, ast.Num): return str(node.n)", "Check builtins elif self._check_in(rhs, self.builtin_scope): scope[lhs] = self._resolve_in(rhs, self.builtin_scope) # Check literals elif", "call. # We just have to reverse the parts we added above. if", "LHS and RHS to their full names. Then: - If the RHS is", "name: return False return self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager", "literal, we add it into scope bound to the provided LHS variable -", "False return self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def class_context(self):", "node.module + '.' + im.name if im.asname is None: self.imports[im.name] = full_import else:", "scope: return True return False def _resolve_in(self, name, scope): if name in scope:", "statement, we add it into scope bound to the provided LHS variable -", "starts with \"self.\" if self.in_class and lhs.startswith(\"self.\"): scope = self.class_scope # Check imports", "ended up at an ast.Name node, we have a # all the components", "return '' def _process_lambda(node): try: gen = ASTCodeGenerator() gen.visit(node) return gen.line except Exception", "assignment type if not self._known_assignment_type(node.value): return # Only consider single-assignments for now if", "at an ast.Name node, we have a # all the components of the", "return False return self._check_in(name, self.class_scope) @contextmanager def function_context(self): self.start_FunctionDef() yield self.end_FunctionDef() @contextmanager def", "in ('True', 'False', 'None')): return node.id if isinstance(node, ast.Lambda): return node_to_code(node) return ''", "full names. Then: - If the RHS is from an imported statement, we", "self.func_scope = {} self.class_scope = {} self.global_scope = {} self.type_scope = TypeScope() self.builtin_scope", "in scope: return True return False def _resolve_in(self, name, scope): if name in", "self.type_scope), ('builtin', self.builtin_scope), ('imports', self.imports), ('global', self.global_scope)] if self.in_func: scopes.append(('function', self.func_scope)) if self.in_class:", "self.imports[im.asname] = full_import def visit_Assign(self, node): \"\"\" On an assignment expression, we add", "see if the RHS is a known assignment type if not self._known_assignment_type(node.value): return", "in assignment expressions. \"\"\" return (is_literal(target) or isinstance(target, (ast.Call, ast.Attribute))) def _get_name(self, node):", "LITERAL_NODES[type(node)] else: return LITERAL_NODES[type(node)] if type(node) == ast.Name: if (node.id == 'True' or", "LITERAL_NODES: return True if isinstance(node, ast.Name): if node.id in ('True', 'False', 'None'): return", "return True parts = name.split(\".\") for i in range(1, len(parts)): im = '.'.join(parts[:-i])", "List of types we support for the RHS in assignment expressions. \"\"\" return", "self._resolve_in(name, scope) return None, None def in_class_scope(self, node): name = self._get_name(node) if not", "in a function. We resolved the LHS and RHS to their full names.", "scope): return scopeName, self._resolve_in(name, scope) return None, None def in_class_scope(self, node): name =", "'True' or node.id == 'False'): return \"__builtin__.bool\" elif node.id == 'None': return \"__builtin__.None\"", "return '.'.join(parts) if isinstance(n, ast.Call): return self._get_name(n) if is_literal(n): nodeType = node_to_type(n) parts.append(nodeType)", "\"\"\" On an assignment expression, we add variables to local or global scope", "scope = self.global_scope if self.in_func: scope = self.func_scope # Check to see if" ]
[ "def assert_functor_identity(fa): assert identity |fmap| fa == fa def assert_functor_composition(f, g, fa): assert", "def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda x: x+1 g = lambda", "x: x+1 g = lambda x: x+2 fa = [1,2,3] assert_functor_composition(f, g, fa)", "hypothesis import given import hypothesis.strategies as st from pyzeta.typeclasses.functor import fmap ## TODO:", "import fmap ## TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f =", "use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda x: x+1 g", "fa == fa def assert_functor_composition(f, g, fa): assert g |fmap| (f |fmap| fa)", "= lambda x: x+1 g = lambda x: x+2 fa = [1,2,3] assert_functor_composition(f,", "x+1 g = lambda x: x+2 fa = [1,2,3] assert_functor_composition(f, g, fa) def", "test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4] ## Laws ## def assert_functor_identity(fa):", "assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4] ## Laws ## def assert_functor_identity(fa): assert", "identity, compose, curry from hypothesis import given import hypothesis.strategies as st from pyzeta.typeclasses.functor", "x+2 fa = [1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried(): f = curry(fmap) assert", "TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda x: x+1", "|fmap| fa == fa def assert_functor_composition(f, g, fa): assert g |fmap| (f |fmap|", "<reponame>victoradan/pythonZeta<filename>test/typeclasses/test_functor.py<gh_stars>0 from toolz import identity, compose, curry from hypothesis import given import hypothesis.strategies", "fa) def test_functor_curried(): f = curry(fmap) assert f(lambda x: x+1)([1,2,3]) == [2,3,4] def", "curry(fmap) assert f(lambda x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert fmap(lambda x: x+1,", "## TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda x:", "assert_functor_identity(fa): assert identity |fmap| fa == fa def assert_functor_composition(f, g, fa): assert g", "lambda x: x+2 fa = [1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried(): f =", "lambda x: x+1 g = lambda x: x+2 fa = [1,2,3] assert_functor_composition(f, g,", "[1,2,3]) == [2,3,4] ## Laws ## def assert_functor_identity(fa): assert identity |fmap| fa ==", "## def assert_functor_identity(fa): assert identity |fmap| fa == fa def assert_functor_composition(f, g, fa):", "st from pyzeta.typeclasses.functor import fmap ## TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def", "g = lambda x: x+2 fa = [1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried():", "g, fa): assert g |fmap| (f |fmap| fa) == compose(g, f) |fmap| fa", "hypothesis.strategies as st from pyzeta.typeclasses.functor import fmap ## TODO: use hypothesis def test_functor_identity():", "[2,3,4] def test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4] ## Laws ##", "assert identity |fmap| fa == fa def assert_functor_composition(f, g, fa): assert g |fmap|", "def test_functor_curried(): f = curry(fmap) assert f(lambda x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func():", "== [2,3,4] ## Laws ## def assert_functor_identity(fa): assert identity |fmap| fa == fa", "import hypothesis.strategies as st from pyzeta.typeclasses.functor import fmap ## TODO: use hypothesis def", "x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4] ##", "curry from hypothesis import given import hypothesis.strategies as st from pyzeta.typeclasses.functor import fmap", "f(lambda x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3]) ==", "assert f(lambda x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3])", "Laws ## def assert_functor_identity(fa): assert identity |fmap| fa == fa def assert_functor_composition(f, g,", "x: x+2 fa = [1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried(): f = curry(fmap)", "assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda x: x+1 g = lambda x: x+2", "from toolz import identity, compose, curry from hypothesis import given import hypothesis.strategies as", "hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda x: x+1 g =", "f = curry(fmap) assert f(lambda x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert fmap(lambda", "assert_functor_composition(f, g, fa) def test_functor_curried(): f = curry(fmap) assert f(lambda x: x+1)([1,2,3]) ==", "def test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4] ## Laws ## def", "test_functor_composition(): f = lambda x: x+1 g = lambda x: x+2 fa =", "x: x+1, [1,2,3]) == [2,3,4] ## Laws ## def assert_functor_identity(fa): assert identity |fmap|", "## Laws ## def assert_functor_identity(fa): assert identity |fmap| fa == fa def assert_functor_composition(f,", "fmap(lambda x: x+1, [1,2,3]) == [2,3,4] ## Laws ## def assert_functor_identity(fa): assert identity", "from pyzeta.typeclasses.functor import fmap ## TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition():", "test_functor_curried(): f = curry(fmap) assert f(lambda x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert", "fa = [1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried(): f = curry(fmap) assert f(lambda", "import given import hypothesis.strategies as st from pyzeta.typeclasses.functor import fmap ## TODO: use", "def assert_functor_composition(f, g, fa): assert g |fmap| (f |fmap| fa) == compose(g, f)", "g, fa) def test_functor_curried(): f = curry(fmap) assert f(lambda x: x+1)([1,2,3]) == [2,3,4]", "as st from pyzeta.typeclasses.functor import fmap ## TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3])", "x+1, [1,2,3]) == [2,3,4] ## Laws ## def assert_functor_identity(fa): assert identity |fmap| fa", "fmap ## TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda", "from hypothesis import given import hypothesis.strategies as st from pyzeta.typeclasses.functor import fmap ##", "f = lambda x: x+1 g = lambda x: x+2 fa = [1,2,3]", "def test_functor_composition(): f = lambda x: x+1 g = lambda x: x+2 fa", "given import hypothesis.strategies as st from pyzeta.typeclasses.functor import fmap ## TODO: use hypothesis", "= curry(fmap) assert f(lambda x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert fmap(lambda x:", "[1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried(): f = curry(fmap) assert f(lambda x: x+1)([1,2,3])", "= [1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried(): f = curry(fmap) assert f(lambda x:", "assert_functor_composition(f, g, fa): assert g |fmap| (f |fmap| fa) == compose(g, f) |fmap|", "= lambda x: x+2 fa = [1,2,3] assert_functor_composition(f, g, fa) def test_functor_curried(): f", "== [2,3,4] def test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4] ## Laws", "[2,3,4] ## Laws ## def assert_functor_identity(fa): assert identity |fmap| fa == fa def", "compose, curry from hypothesis import given import hypothesis.strategies as st from pyzeta.typeclasses.functor import", "import identity, compose, curry from hypothesis import given import hypothesis.strategies as st from", "fa def assert_functor_composition(f, g, fa): assert g |fmap| (f |fmap| fa) == compose(g,", "toolz import identity, compose, curry from hypothesis import given import hypothesis.strategies as st", "x: x+1)([1,2,3]) == [2,3,4] def test_functor_as_func(): assert fmap(lambda x: x+1, [1,2,3]) == [2,3,4]", "test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f = lambda x: x+1 g = lambda x:", "pyzeta.typeclasses.functor import fmap ## TODO: use hypothesis def test_functor_identity(): assert_functor_identity([1,2,3]) def test_functor_composition(): f", "== fa def assert_functor_composition(f, g, fa): assert g |fmap| (f |fmap| fa) ==", "identity |fmap| fa == fa def assert_functor_composition(f, g, fa): assert g |fmap| (f" ]
[ "= self.sa.GetJson lastDate = self.getLastDate() self.starttime = lastDate + 'T00:00:00' self.endtime = lastDate", "'1Min' url = AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf) data = requests.get(url,", "All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('----------", "vpros): for vpro in vpros: if (abs(close - vpro) / close < self.nearMargin):", "headers=self.custom_header) return data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data", "e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False @staticmethod", "0.05 num_samples = len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(),", "is None: symbol = self.symbol else: self.symbol = symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol)", "np import pandas as pd from util import StockAnalysis, AllStocks from alpaca import", "return False @staticmethod def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__", "0 def Run(self, symbol=None): if symbol is None: symbol = self.symbol else: self.symbol", "close, vpros): for vpro in vpros: if (abs(close - vpro) / close <", "= FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------')", "self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except Exception", "logging from scipy import stats, signal import numpy as np import pandas as", "symbol, start, end, tf) data = requests.get(url, headers=self.custom_header) return data def volumeProfiles(self, df):", "pandas as pd from util import StockAnalysis, AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots", "num_samples) kdy = kde(xr) ticks_per_sample = (xr.max() - xr.min()) / num_samples peaks, _", "= data['close'] kde_factor = 0.05 num_samples = len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor)", "= AllStocks.GetDailyStockData(symbol) if isLoaded: try: price = tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro", "None: symbol = self.symbol else: self.symbol = symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if", "( symbol, start, end, tf) data = requests.get(url, headers=self.custom_header) return data def volumeProfiles(self,", "class FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson lastDate = self.getLastDate()", "url = AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf) data = requests.get(url, headers=self.custom_header)", "peaks, _ = signal.find_peaks(kdy) pkx = xr[peaks] pky = kdy[peaks] min_prom = kdy.max()", "pkx = xr[peaks] pky = kdy[peaks] min_prom = kdy.max() * 0.3 peaks, peak_props", "self.nearMargin): return True, vpro return False, 0 def Run(self, symbol=None): if symbol is", "round(float(vpro), 2)) except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol,", "= lastDate + 'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0)", "= df volume = data['volume'] close = data['close'] kde_factor = 0.05 num_samples =", "Run(self, symbol=None): if symbol is None: symbol = self.symbol else: self.symbol = symbol", "True, vpro return False, 0 def Run(self, symbol=None): if symbol is None: symbol", "symbol=None): if symbol is None: symbol = self.symbol else: self.symbol = symbol isLoaded,", "return date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def HistoricalPrices(self,", "* 0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky = kdy[peaks]", "for symbol in snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date", "import logging from scipy import stats, signal import numpy as np import pandas", "close < self.nearMargin): return True, vpro return False, 0 def Run(self, symbol=None): if", "self.sa = StockAnalysis() self.jsonData = self.sa.GetJson lastDate = self.getLastDate() self.starttime = lastDate +", "alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis() self.jsonData =", "data = requests.get(url, headers=self.custom_header) return data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume':", "isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol,", "symbol in snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date except", "'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False @staticmethod def All(): filter =", "json.loads(data.text) symbols = '' for symbol in snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date", "kde_factor = 0.05 num_samples = len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr =", "num_samples peaks, _ = signal.find_peaks(kdy) pkx = xr[peaks] pky = kdy[peaks] min_prom =", "from util import StockAnalysis, AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def", "kdy = kde(xr) ticks_per_sample = (xr.max() - xr.min()) / num_samples peaks, _ =", "= kdy.max() * 0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky", "snapshots = json.loads(data.text) symbols = '' for symbol in snapshots: try: dailyBar =", "pkx = xr[peaks] pky = kdy[peaks] return pkx def isNearVP(self, close, vpros): for", "print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False @staticmethod def", "self.symbol, 'vpros', 0) return False @staticmethod def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False)", "return data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data =", "False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False @staticmethod def All(): filter = FilterVolumeProfileDailySetup()", "df volume = data['volume'] close = data['close'] kde_factor = 0.05 num_samples = len(df)", "= signal.find_peaks(kdy) pkx = xr[peaks] pky = kdy[peaks] min_prom = kdy.max() * 0.3", "'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL'])", "timeframe, datatype=None, starttime=None, endtime=None): start = self.startime end = self.endtime tf = '1Min'", "len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples) kdy =", "AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols =", "= self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol,", "xr.min()) / num_samples peaks, _ = signal.find_peaks(kdy) pkx = xr[peaks] pky = kdy[peaks]", "e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None):", "as np import pandas as pd from util import StockAnalysis, AllStocks from alpaca", "vpros: if (abs(close - vpro) / close < self.nearMargin): return True, vpro return", "inplace=True) data = df volume = data['volume'] close = data['close'] kde_factor = 0.05", "self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except Exception as e:", "lastDate + 'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data", "price = tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol,", "signal.find_peaks(kdy) pkx = xr[peaks] pky = kdy[peaks] min_prom = kdy.max() * 0.3 peaks,", "tf) data = requests.get(url, headers=self.custom_header) return data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True)", "import json import logging from scipy import stats, signal import numpy as np", "= xr[peaks] pky = kdy[peaks] return pkx def isNearVP(self, close, vpros): for vpro", "= self.symbol else: self.symbol = symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try:", "isLoaded: try: price = tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles)", "'vpros', round(float(vpro), 2)) except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData,", "tf = '1Min' url = AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf) data", "isNearVP(self, close, vpros): for vpro in vpros: if (abs(close - vpro) / close", "/ close < self.nearMargin): return True, vpro return False, 0 def Run(self, symbol=None):", "self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False @staticmethod def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run,", "requests import json import logging from scipy import stats, signal import numpy as", "data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols = '' for symbol in snapshots:", "ERROR: {e}') return None def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start =", "lastDate + 'T00:00:00' self.endtime = lastDate + 'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={},", "pky = kdy[peaks] return pkx def isNearVP(self, close, vpros): for vpro in vpros:", "/ num_samples peaks, _ = signal.find_peaks(kdy) pkx = xr[peaks] pky = kdy[peaks] min_prom", "stats, signal import numpy as np import pandas as pd from util import", "StockAnalysis() self.jsonData = self.sa.GetJson lastDate = self.getLastDate() self.starttime = lastDate + 'T00:00:00' self.endtime", "def Run(self, symbol=None): if symbol is None: symbol = self.symbol else: self.symbol =", "in vpros: if (abs(close - vpro) / close < self.nearMargin): return True, vpro", "2)) except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros',", "self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False @staticmethod def All(): filter", "= signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky = kdy[peaks] return pkx def isNearVP(self,", "= xr[peaks] pky = kdy[peaks] min_prom = kdy.max() * 0.3 peaks, peak_props =", "df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data = df volume = data['volume'] close", "'' for symbol in snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return", "= kdy[peaks] return pkx def isNearVP(self, close, vpros): for vpro in vpros: if", "AllStocks.GetDailyStockData(symbol) if isLoaded: try: price = tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro =", "= tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro',", "snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}')", "self.symbol = symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: price = tp.Close[0]", "import numpy as np import pandas as pd from util import StockAnalysis, AllStocks", "if isLoaded: try: price = tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price,", "self.startime end = self.endtime tf = '1Min' url = AlpacaHistorical.ALPACA_URL % ( symbol,", "df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data = df volume = data['volume']", "= snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR:", "AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson lastDate", "minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols = '' for symbol", "False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter =", "FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson lastDate = self.getLastDate() self.starttime", "weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr) ticks_per_sample = (xr.max()", "self.getLastDate() self.starttime = lastDate + 'T00:00:00' self.endtime = lastDate + 'T23:59:59' def getLastDate(self):", "vpro return False, 0 def Run(self, symbol=None): if symbol is None: symbol =", "tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: price = tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear,", "__init__(self): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson lastDate = self.getLastDate() self.starttime = lastDate", "= (xr.max() - xr.min()) / num_samples peaks, _ = signal.find_peaks(kdy) pkx = xr[peaks]", "HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.startime end = self.endtime tf", "AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson lastDate =", "getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots =", "tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear)", "end, tf) data = requests.get(url, headers=self.custom_header) return data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'},", "(xr.max() - xr.min()) / num_samples peaks, _ = signal.find_peaks(kdy) pkx = xr[peaks] pky", "if symbol is None: symbol = self.symbol else: self.symbol = symbol isLoaded, tp", "vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2))", "= self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except", "app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols = '' for symbol in snapshots: try: dailyBar", "vpro) / close < self.nearMargin): return True, vpro return False, 0 def Run(self,", "0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky = kdy[peaks] return", "except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def HistoricalPrices(self, symbol, timeframe,", "self.symbol else: self.symbol = symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: price", "def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots", "@staticmethod def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__':", "symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.startime end = self.endtime tf =", "minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols = ''", "Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def HistoricalPrices(self, symbol, timeframe, datatype=None,", "isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro),", "__name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter = FilterVolumeProfileDailySetup() # filter.Run('AAPL')", "= AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf) data = requests.get(url, headers=self.custom_header) return", "else: self.symbol = symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: price =", "volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except Exception as", "self.jsonData = self.sa.GetJson lastDate = self.getLastDate() self.starttime = lastDate + 'T00:00:00' self.endtime =", "= '' for symbol in snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0]", "logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start", "symbol is None: symbol = self.symbol else: self.symbol = symbol isLoaded, tp =", "volProfiles = self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData,", "= self.getLastDate() self.starttime = lastDate + 'T00:00:00' self.endtime = lastDate + 'T23:59:59' def", "import requests import json import logging from scipy import stats, signal import numpy", "'volume'}, inplace=True) data = df volume = data['volume'] close = data['close'] kde_factor =", "+ 'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data =", "= kde(xr) ticks_per_sample = (xr.max() - xr.min()) / num_samples peaks, _ = signal.find_peaks(kdy)", "self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except Exception as e: print(e)", "kdy.max() * 0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky =", "= symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: price = tp.Close[0] volProfiles", "self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False @staticmethod def All():", "data = df volume = data['volume'] close = data['close'] kde_factor = 0.05 num_samples", "peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky = kdy[peaks] return pkx", "end = self.endtime tf = '1Min' url = AlpacaHistorical.ALPACA_URL % ( symbol, start,", "= self.endtime tf = '1Min' url = AlpacaHistorical.ALPACA_URL % ( symbol, start, end,", "AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf) data = requests.get(url, headers=self.custom_header) return data", "try: dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date except Exception as e:", "json import logging from scipy import stats, signal import numpy as np import", "symbols = '' for symbol in snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date =", "inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data = df volume = data['volume'] close = data['close']", "min_prom = kdy.max() * 0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks]", "kdy[peaks] return pkx def isNearVP(self, close, vpros): for vpro in vpros: if (abs(close", "return pkx def isNearVP(self, close, vpros): for vpro in vpros: if (abs(close -", "self.sa.GetJson lastDate = self.getLastDate() self.starttime = lastDate + 'T00:00:00' self.endtime = lastDate +", "None def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.startime end =", "AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter", "data['volume'] close = data['close'] kde_factor = 0.05 num_samples = len(df) kde = stats.gaussian_kde(close,", "= StockAnalysis() self.jsonData = self.sa.GetJson lastDate = self.getLastDate() self.starttime = lastDate + 'T00:00:00'", "kde(xr) ticks_per_sample = (xr.max() - xr.min()) / num_samples peaks, _ = signal.find_peaks(kdy) pkx", "pkx def isNearVP(self, close, vpros): for vpro in vpros: if (abs(close - vpro)", "= lastDate + 'T00:00:00' self.endtime = lastDate + 'T23:59:59' def getLastDate(self): app =", "if (abs(close - vpro) / close < self.nearMargin): return True, vpro return False,", "'__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter = FilterVolumeProfileDailySetup() # filter.Run('AAPL') # print('----------", "import pandas as pd from util import StockAnalysis, AllStocks from alpaca import AlpacaHistorical,", "from alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis() self.jsonData", "= AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols", "FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') #", "= kdy[peaks] min_prom = kdy.max() * 0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx", "for vpro in vpros: if (abs(close - vpro) / close < self.nearMargin): return", "import StockAnalysis, AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa", "def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data = df volume", "isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: price = tp.Close[0] volProfiles = self.volumeProfiles(tp)", "xr = np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr) ticks_per_sample = (xr.max() - xr.min())", "return None def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.startime end", "FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter = FilterVolumeProfileDailySetup() # filter.Run('AAPL') # print('---------- done", "self.endtime tf = '1Min' url = AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf)", "numpy as np import pandas as pd from util import StockAnalysis, AllStocks from", "np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr) ticks_per_sample = (xr.max() - xr.min()) / num_samples", "maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols = '' for symbol in", "prominence=min_prom) pkx = xr[peaks] pky = kdy[peaks] return pkx def isNearVP(self, close, vpros):", "requests.get(url, headers=self.custom_header) return data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True)", "def isNearVP(self, close, vpros): for vpro in vpros: if (abs(close - vpro) /", "start = self.startime end = self.endtime tf = '1Min' url = AlpacaHistorical.ALPACA_URL %", "filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done", "num_samples = len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples)", "in snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date except Exception", "AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis()", "starttime=None, endtime=None): start = self.startime end = self.endtime tf = '1Min' url =", "< self.nearMargin): return True, vpro return False, 0 def Run(self, symbol=None): if symbol", "pky = kdy[peaks] min_prom = kdy.max() * 0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom)", "xr[peaks] pky = kdy[peaks] return pkx def isNearVP(self, close, vpros): for vpro in", "_ = signal.find_peaks(kdy) pkx = xr[peaks] pky = kdy[peaks] min_prom = kdy.max() *", "xr[peaks] pky = kdy[peaks] min_prom = kdy.max() * 0.3 peaks, peak_props = signal.find_peaks(kdy,", "def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.startime end = self.endtime", "'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData,", "app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text)", "= json.loads(data.text) symbols = '' for symbol in snapshots: try: dailyBar = snapshots[symbol]['dailyBar']", "symbol = self.symbol else: self.symbol = symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded:", "ticks_per_sample = (xr.max() - xr.min()) / num_samples peaks, _ = signal.find_peaks(kdy) pkx =", "kdy[peaks] min_prom = kdy.max() * 0.3 peaks, peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx =", "as pd from util import StockAnalysis, AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots class", "+ 'T00:00:00' self.endtime = lastDate + 'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0,", "print('---------- done ----------') # filter = FilterVolumeProfileDailySetup() # filter.Run('AAPL') # print('---------- done ----------')", "date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def HistoricalPrices(self, symbol,", "= dailyBar['t'].split('T')[0] return date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None", "peak_props = signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky = kdy[peaks] return pkx def", "dailyBar['t'].split('T')[0] return date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def", "Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return", "{e}') return None def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None, endtime=None): start = self.startime", "endtime=None): start = self.startime end = self.endtime tf = '1Min' url = AlpacaHistorical.ALPACA_URL", "lastDate = self.getLastDate() self.starttime = lastDate + 'T00:00:00' self.endtime = lastDate + 'T23:59:59'", "pd from util import StockAnalysis, AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup:", "maxPrice=0, minVolume=0, maxVolume=0) data = app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols = '' for", "snapshots: try: dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date except Exception as", "False @staticmethod def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ ==", "= len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples) kdy", "dailyBar = snapshots[symbol]['dailyBar'] date = dailyBar['t'].split('T')[0] return date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot().", "= app.HistoricalSnapshots(['AAPL']) snapshots = json.loads(data.text) symbols = '' for symbol in snapshots: try:", "volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data = df volume =", "= requests.get(url, headers=self.custom_header) return data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'},", "datatype=None, starttime=None, endtime=None): start = self.startime end = self.endtime tf = '1Min' url", "= data['volume'] close = data['close'] kde_factor = 0.05 num_samples = len(df) kde =", "vpro in vpros: if (abs(close - vpro) / close < self.nearMargin): return True,", "self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', isNear) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros',", "import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson", "scipy import stats, signal import numpy as np import pandas as pd from", "return False, 0 def Run(self, symbol=None): if symbol is None: symbol = self.symbol", "- xr.min()) / num_samples peaks, _ = signal.find_peaks(kdy) pkx = xr[peaks] pky =", "as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return None def HistoricalPrices(self, symbol, timeframe, datatype=None, starttime=None,", "stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr) ticks_per_sample =", "= 0.05 num_samples = len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(),", "bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr) ticks_per_sample = (xr.max() -", "signal import numpy as np import pandas as pd from util import StockAnalysis,", "kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr)", "data['close'] kde_factor = 0.05 num_samples = len(df) kde = stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr", "(abs(close - vpro) / close < self.nearMargin): return True, vpro return False, 0", "if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter = FilterVolumeProfileDailySetup() #", "StockAnalysis, AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self): self.sa =", "% ( symbol, start, end, tf) data = requests.get(url, headers=self.custom_header) return data def", "- vpro) / close < self.nearMargin): return True, vpro return False, 0 def", "try: price = tp.Close[0] volProfiles = self.volumeProfiles(tp) isNear, vpro = self.isNearVP(price, volProfiles) self.sa.UpdateFilter(self.jsonData,", "symbol isLoaded, tp = AllStocks.GetDailyStockData(symbol) if isLoaded: try: price = tp.Close[0] volProfiles =", "return True, vpro return False, 0 def Run(self, symbol=None): if symbol is None:", "def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All()", "'T00:00:00' self.endtime = lastDate + 'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0,", "= '1Min' url = AlpacaHistorical.ALPACA_URL % ( symbol, start, end, tf) data =", "'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data = df volume = data['volume'] close =", "<gh_stars>0 import requests import json import logging from scipy import stats, signal import", "= self.startime end = self.endtime tf = '1Min' url = AlpacaHistorical.ALPACA_URL % (", "self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', round(float(vpro), 2)) except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro',", "self.starttime = lastDate + 'T00:00:00' self.endtime = lastDate + 'T23:59:59' def getLastDate(self): app", "from scipy import stats, signal import numpy as np import pandas as pd", "df.rename(columns={'Volume': 'volume'}, inplace=True) data = df volume = data['volume'] close = data['close'] kde_factor", "except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0)", "= stats.gaussian_kde(close, weights=volume, bw_method=kde_factor) xr = np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr) ticks_per_sample", "import stats, signal import numpy as np import pandas as pd from util", "start, end, tf) data = requests.get(url, headers=self.custom_header) return data def volumeProfiles(self, df): df.rename(columns={'Close':", "close = data['close'] kde_factor = 0.05 num_samples = len(df) kde = stats.gaussian_kde(close, weights=volume,", "= np.linspace(close.min(), close.max(), num_samples) kdy = kde(xr) ticks_per_sample = (xr.max() - xr.min()) /", "self.endtime = lastDate + 'T23:59:59' def getLastDate(self): app = AlpacaSnapshots(favorites={}, minPrice=0, maxPrice=0, minVolume=0,", "close.max(), num_samples) kdy = kde(xr) ticks_per_sample = (xr.max() - xr.min()) / num_samples peaks,", "util import StockAnalysis, AllStocks from alpaca import AlpacaHistorical, AlpacaSnapshots class FilterVolumeProfileDailySetup: def __init__(self):", "signal.find_peaks(kdy, prominence=min_prom) pkx = xr[peaks] pky = kdy[peaks] return pkx def isNearVP(self, close,", "self.symbol, 'vpros', round(float(vpro), 2)) except Exception as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False)", "'vpros', 0) return False @staticmethod def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData)", "0) return False @staticmethod def All(): filter = FilterVolumeProfileDailySetup() AllStocks.Run(filter.Run, False) filter.sa.WriteJson(filter.jsonData) if", "== '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter = FilterVolumeProfileDailySetup() # filter.Run('AAPL') #", "def __init__(self): self.sa = StockAnalysis() self.jsonData = self.sa.GetJson lastDate = self.getLastDate() self.starttime =", "data def volumeProfiles(self, df): df.rename(columns={'Close': 'close'}, inplace=True) df.rename(columns={'Volume': 'volume'}, inplace=True) data = df", "date = dailyBar['t'].split('T')[0] return date except Exception as e: logging.error(f'AlpacaSnapshot.getSnapshot(). ERROR: {e}') return", "False, 0 def Run(self, symbol=None): if symbol is None: symbol = self.symbol else:", "volume = data['volume'] close = data['close'] kde_factor = 0.05 num_samples = len(df) kde", "filter.sa.WriteJson(filter.jsonData) if __name__ == '__main__': FilterVolumeProfileDailySetup.All() print('---------- done ----------') # filter = FilterVolumeProfileDailySetup()", "as e: print(e) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpro', False) self.sa.UpdateFilter(self.jsonData, self.symbol, 'vpros', 0) return False" ]
[ "will have [GraphNode { val = 'A' edges = { 'B': 4 'C':", "}, \"\"\" def __init__(self, val: str): self.edges: Dict[UndirectedGraphNode, list] = {} self.val =", "= { 'A': 1 'B': 7 } }, GraphNode { val = 'D'", "(cost 2) We will have [GraphNode { val = 'A' edges = {", "GraphNode { val = 'D' edges = { 'B': 2 } }, \"\"\"", "1) B <-> C (cost 7) B <-> D (cost 2) We will", "'B': 2 } }, \"\"\" def __init__(self, val: str): self.edges: Dict[UndirectedGraphNode, list] =", "{ 'A': 4 'C': 7 'D': 2 } }, GraphNode { val =", "[GraphNode { val = 'A' edges = { 'B': 4 'C': 1 }", "= { 'B': 4 'C': 1 } }, GraphNode { val = 'B'", "have [GraphNode { val = 'A' edges = { 'B': 4 'C': 1", "'C': 1 } }, GraphNode { val = 'B' edges = { 'A':", "2 } }, GraphNode { val = 'C' edges = { 'A': 1", "= { 'A': 4 'C': 7 'D': 2 } }, GraphNode { val", "= 'C' edges = { 'A': 1 'B': 7 } }, GraphNode {", "'C' edges = { 'A': 1 'B': 7 } }, GraphNode { val", "'A': 1 'B': 7 } }, GraphNode { val = 'D' edges =", "'A': 4 'C': 7 'D': 2 } }, GraphNode { val = 'C'", "{ 'A': 1 'B': 7 } }, GraphNode { val = 'D' edges", "GraphNode { val = 'C' edges = { 'A': 1 'B': 7 }", "2 } }, \"\"\" def __init__(self, val: str): self.edges: Dict[UndirectedGraphNode, list] = {}", "4) A <-> C (cost 1) B <-> C (cost 7) B <->", "UndirectedGraphNode: \"\"\" Definition of GraphNode For the weighted undirected graph: A <-> B", "{ 'B': 2 } }, \"\"\" def __init__(self, val: str): self.edges: Dict[UndirectedGraphNode, list]", "graph: A <-> B (cost 4) A <-> C (cost 1) B <->", "(cost 4) A <-> C (cost 1) B <-> C (cost 7) B", "import Dict, List class UndirectedGraphNode: \"\"\" Definition of GraphNode For the weighted undirected", "D (cost 2) We will have [GraphNode { val = 'A' edges =", "List class UndirectedGraphNode: \"\"\" Definition of GraphNode For the weighted undirected graph: A", "1 } }, GraphNode { val = 'B' edges = { 'A': 4", "<-> C (cost 7) B <-> D (cost 2) We will have [GraphNode", "val = 'C' edges = { 'A': 1 'B': 7 } }, GraphNode", "the weighted undirected graph: A <-> B (cost 4) A <-> C (cost", "B <-> C (cost 7) B <-> D (cost 2) We will have", "\"\"\" Definition of GraphNode For the weighted undirected graph: A <-> B (cost", "'C': 7 'D': 2 } }, GraphNode { val = 'C' edges =", "edges = { 'B': 4 'C': 1 } }, GraphNode { val =", "{ val = 'B' edges = { 'A': 4 'C': 7 'D': 2", "'B': 4 'C': 1 } }, GraphNode { val = 'B' edges =", "B (cost 4) A <-> C (cost 1) B <-> C (cost 7)", "C (cost 7) B <-> D (cost 2) We will have [GraphNode {", "<-> B (cost 4) A <-> C (cost 1) B <-> C (cost", "}, GraphNode { val = 'D' edges = { 'B': 2 } },", "4 'C': 1 } }, GraphNode { val = 'B' edges = {", "<-> C (cost 1) B <-> C (cost 7) B <-> D (cost", "2) We will have [GraphNode { val = 'A' edges = { 'B':", "}, GraphNode { val = 'C' edges = { 'A': 1 'B': 7", "\"\"\" def __init__(self, val: str): self.edges: Dict[UndirectedGraphNode, list] = {} self.val = val", "Definition of GraphNode For the weighted undirected graph: A <-> B (cost 4)", "7) B <-> D (cost 2) We will have [GraphNode { val =", "{ val = 'C' edges = { 'A': 1 'B': 7 } },", "{ val = 'D' edges = { 'B': 2 } }, \"\"\" def", "= { 'B': 2 } }, \"\"\" def __init__(self, val: str): self.edges: Dict[UndirectedGraphNode,", "For the weighted undirected graph: A <-> B (cost 4) A <-> C", "typing import Dict, List class UndirectedGraphNode: \"\"\" Definition of GraphNode For the weighted", "1 'B': 7 } }, GraphNode { val = 'D' edges = {", "'A' edges = { 'B': 4 'C': 1 } }, GraphNode { val", "'D' edges = { 'B': 2 } }, \"\"\" def __init__(self, val: str):", "} }, \"\"\" def __init__(self, val: str): self.edges: Dict[UndirectedGraphNode, list] = {} self.val", "class UndirectedGraphNode: \"\"\" Definition of GraphNode For the weighted undirected graph: A <->", "We will have [GraphNode { val = 'A' edges = { 'B': 4", "val = 'B' edges = { 'A': 4 'C': 7 'D': 2 }", "'B': 7 } }, GraphNode { val = 'D' edges = { 'B':", "edges = { 'A': 4 'C': 7 'D': 2 } }, GraphNode {", "val = 'D' edges = { 'B': 2 } }, \"\"\" def __init__(self,", "A <-> B (cost 4) A <-> C (cost 1) B <-> C", "(cost 1) B <-> C (cost 7) B <-> D (cost 2) We", "GraphNode { val = 'B' edges = { 'A': 4 'C': 7 'D':", "of GraphNode For the weighted undirected graph: A <-> B (cost 4) A", "Dict, List class UndirectedGraphNode: \"\"\" Definition of GraphNode For the weighted undirected graph:", "A <-> C (cost 1) B <-> C (cost 7) B <-> D", "(cost 7) B <-> D (cost 2) We will have [GraphNode { val", "'D': 2 } }, GraphNode { val = 'C' edges = { 'A':", "val = 'A' edges = { 'B': 4 'C': 1 } }, GraphNode", "= 'A' edges = { 'B': 4 'C': 1 } }, GraphNode {", "weighted undirected graph: A <-> B (cost 4) A <-> C (cost 1)", "{ 'B': 4 'C': 1 } }, GraphNode { val = 'B' edges", "{ val = 'A' edges = { 'B': 4 'C': 1 } },", "} }, GraphNode { val = 'C' edges = { 'A': 1 'B':", "B <-> D (cost 2) We will have [GraphNode { val = 'A'", "} }, GraphNode { val = 'B' edges = { 'A': 4 'C':", "undirected graph: A <-> B (cost 4) A <-> C (cost 1) B", "<-> D (cost 2) We will have [GraphNode { val = 'A' edges", "= 'D' edges = { 'B': 2 } }, \"\"\" def __init__(self, val:", "edges = { 'B': 2 } }, \"\"\" def __init__(self, val: str): self.edges:", "edges = { 'A': 1 'B': 7 } }, GraphNode { val =", "} }, GraphNode { val = 'D' edges = { 'B': 2 }", "GraphNode For the weighted undirected graph: A <-> B (cost 4) A <->", "= 'B' edges = { 'A': 4 'C': 7 'D': 2 } },", "7 'D': 2 } }, GraphNode { val = 'C' edges = {", "4 'C': 7 'D': 2 } }, GraphNode { val = 'C' edges", "from typing import Dict, List class UndirectedGraphNode: \"\"\" Definition of GraphNode For the", "7 } }, GraphNode { val = 'D' edges = { 'B': 2", "'B' edges = { 'A': 4 'C': 7 'D': 2 } }, GraphNode", "C (cost 1) B <-> C (cost 7) B <-> D (cost 2)", "}, GraphNode { val = 'B' edges = { 'A': 4 'C': 7" ]
[ "decision_steps, terminal_steps): for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0: #", "self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size", "logprob): cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id):", "= torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device)", "self.gt[i] = gt self.advantages[i] = gt - state_values[i] def gae_advantage(self, state_values, new_state_values): self.full", "self.dones[i]) def reset(self, full=False): if full: self.buffer_index = 0 self.episode_step[self.episode_step != 0] =", "its episode. When episode for certain agent ends, whole episode buffer is inserted", "obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt]", "in which will store each # step of only its episode. When episode", "inserted to the main buffer def __init__(self, num_workers, state_shape, action_shape, episode_length): self.device =", "torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages =", "OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] =", "is inserted to the main buffer def __init__(self, num_workers, state_shape, action_shape, episode_length): self.device", "torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode", "last_index] = self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index = last_index % Config.batch_size if", "1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full = False def add_old(self, decision_steps, actions,", "= False self.gt[Config.batch_size] = new_state_values[-1] for i in reversed(range(Config.batch_size)): delta = self.rewards[i] +", "+= 1 for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id,", "= torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages", "for certain agent ends, whole episode buffer is inserted to the main buffer", "episodes in # buffer so the advantage can be calculated, each agent will", "whole episode buffer is inserted to the main buffer def __init__(self, num_workers, state_shape,", "EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device)", "the enviroment we use has multiple agents that work in parallel and PPO", "if torch.cuda.is_available() else 'cpu' self.buffer_index = 0 self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER", "self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode", "self.buffer_index] self.buffer_index = last_index % Config.batch_size if self.buffer_index == 0: self.full = True", "advantage(self, state_values, last_state_value): self.full = False gt = last_state_value for i in reversed(range(Config.batch_size)):", "self.episode_step[a_id] += 1 for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1", "Since the enviroment we use has multiple agents that work in parallel and", "parallel and PPO requires to store whole episodes in # buffer so the", "each agent will have separate episode buffer in which will store each #", "self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode =", "= torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode =", "#----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length,", "dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full =", "num_workers, state_shape, action_shape, episode_length): self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index =", "torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]]", "self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1 if not", "+ Config.gamma * gt * (1 - self.dones[i]) self.gt[i] = gt self.advantages[i] =", "self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index = last_index % Config.batch_size", "= last_state_value for i in reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma * gt", "------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device)", "state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size +", "last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index", "SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]]", "in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]]", "= 0 self.episode_step[a_id] += 1 for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]]", "self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] +=", "= 0 self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device)", "- self.dones[i]) # For critic self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] *", "self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers,", "decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt]", "self.rewards[i] + Config.gamma * new_state_values[i] * (1 - self.dones[i]) - state_values[i] self.advantages[i] =", "and PPO requires to store whole episodes in # buffer so the advantage", "* Config.gamma * self.advantages[i+1] * (1 - self.dones[i]) # For critic self.gt[i] =", "def advantage(self, state_values, last_state_value): self.full = False gt = last_state_value for i in", "store each # step of only its episode. When episode for certain agent", "obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)", "delta = self.rewards[i] + Config.gamma * new_state_values[i] * (1 - self.dones[i]) - state_values[i]", "self.buffer_index = 0 self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size,", "(1 - self.dones[i]) - state_values[i] self.advantages[i] = delta + Config.gae_lambda * Config.gamma *", "self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index: last_index] =", "self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards", "= torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt += 1", "delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 - self.dones[i]) # For", "'cpu' self.buffer_index = 0 self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states =", "class Buffer: # Since the enviroment we use has multiple agents that work", "self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states =", "- self.buffer_index] self.buffer_index = last_index % Config.batch_size if self.buffer_index == 0: self.full =", "self.advantages[i] = gt - state_values[i] def gae_advantage(self, state_values, new_state_values): self.full = False self.gt[Config.batch_size]", "the main buffer def __init__(self, num_workers, state_shape, action_shape, episode_length): self.device = 'cuda' if", "= torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id,", "state_values[i] self.advantages[i] = delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 -", "actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt += 1 def add(self, decision_steps, terminal_steps): for", "gt self.advantages[i] = gt - state_values[i] def gae_advantage(self, state_values, new_state_values): self.full = False", "zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ OVO continue", "self.dones[i]) - state_values[i] self.advantages[i] = delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] *", "self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full = False", "= new_state_values[-1] for i in reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma * new_state_values[i]", "0 self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions", "last_index] = self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index", "self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id,", "agent ends, whole episode buffer is inserted to the main buffer def __init__(self,", "self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id]", "1 for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]]", "torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1 for obs, a_id in zip(terminal_steps.obs[0],", "'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index = 0 self.episode_length = episode_length #------------------------------------------------- MAIN", "last_index - self.buffer_index] self.buffer_index = last_index % Config.batch_size if self.buffer_index == 0: self.full", "False def add_old(self, decision_steps, actions, logprob): cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device) for", "for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] =", "last_index] = self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index", "self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index = last_index %", "a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ", "= self.rewards[i] + Config.gamma * self.gt[i+1] * (1 - self.dones[i]) def reset(self, full=False):", "self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, :", "will store each # step of only its episode. When episode for certain", "for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0: # TERMINALNI JE", "= 0 actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]]", "will have separate episode buffer in which will store each # step of", "= last_index % Config.batch_size if self.buffer_index == 0: self.full = True self.episode_step[a_id] =", "torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers,", "certain agent ends, whole episode buffer is inserted to the main buffer def", "in reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma * gt * (1 - self.dones[i])", "to store whole episodes in # buffer so the advantage can be calculated,", "__init__(self, num_workers, state_shape, action_shape, episode_length): self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index", "gt - state_values[i] def gae_advantage(self, state_values, new_state_values): self.full = False self.gt[Config.batch_size] = new_state_values[-1]", "MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob =", "ends, whole episode buffer is inserted to the main buffer def __init__(self, num_workers,", "new_state_values): self.full = False self.gt[Config.batch_size] = new_state_values[-1] for i in reversed(range(Config.batch_size)): delta =", "new_state_values[i] * (1 - self.dones[i]) - state_values[i] self.advantages[i] = delta + Config.gae_lambda *", "torch.zeros(Config.batch_size + 1).to(self.device) self.full = False def add_old(self, decision_steps, actions, logprob): cnt =", "self.buffer_index = last_index % Config.batch_size if self.buffer_index == 0: self.full = True self.episode_step[a_id]", ": last_index - self.buffer_index] self.buffer_index = last_index % Config.batch_size if self.buffer_index == 0:", "= torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device)", "+ 1).to(self.device) self.full = False def add_old(self, decision_steps, actions, logprob): cnt = 0", "self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index = last_index % Config.batch_size if self.buffer_index ==", "= torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1 for obs, a_id in", "= self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index -", "self.buffer_index == 0: self.full = True self.episode_step[a_id] = 0 def advantage(self, state_values, last_state_value):", "if not self.full: last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id,", "self.dones[i]) # For critic self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] * (1", "self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device)", "self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1 for obs,", "import Config class Buffer: # Since the enviroment we use has multiple agents", "= 1 self.episode_step[a_id] += 1 if not self.full: last_index = min(self.buffer_index + self.episode_step[a_id],", "last_state_value): self.full = False gt = last_state_value for i in reversed(range(Config.batch_size)): gt =", "= torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device)", "episode. When episode for certain agent ends, whole episode buffer is inserted to", "in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]]", "- self.dones[i]) self.gt[i] = gt self.advantages[i] = gt - state_values[i] def gae_advantage(self, state_values,", "else 'cpu' self.buffer_index = 0 self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states", "#------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob", "1).to(self.device) self.full = False def add_old(self, decision_steps, actions, logprob): cnt = 0 actionsTensor", "= self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index -", "self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt", "decision_steps, actions, logprob): cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id in", "last_index - self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index: last_index]", "last_index % Config.batch_size if self.buffer_index == 0: self.full = True self.episode_step[a_id] = 0", "torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length,", "self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, :", "= 'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index = 0 self.episode_length = episode_length #-------------------------------------------------", "Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 - self.dones[i]) # For critic self.gt[i]", "i in reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma * new_state_values[i] * (1 -", "for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] =", "= torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full = False def", "torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device)", "agent will have separate episode buffer in which will store each # step", "+ Config.gamma * new_state_values[i] * (1 - self.dones[i]) - state_values[i] self.advantages[i] = delta", "not self.full: last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, :", "-1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1 if", "1 if not self.full: last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] =", "0 def advantage(self, state_values, last_state_value): self.full = False gt = last_state_value for i", "= True self.episode_step[a_id] = 0 def advantage(self, state_values, last_state_value): self.full = False gt", "= min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index]", "that work in parallel and PPO requires to store whole episodes in #", "last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index -", "agents that work in parallel and PPO requires to store whole episodes in", "use has multiple agents that work in parallel and PPO requires to store", "enviroment we use has multiple agents that work in parallel and PPO requires", "self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id,", "self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, :", "last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index", "state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device)", "torch.cuda.is_available() else 'cpu' self.buffer_index = 0 self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER -------------------------------------------------", "self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full = False def add_old(self, decision_steps, actions, logprob):", "self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1 for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id,", "= delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 - self.dones[i]) #", "= self.rewards[i] + Config.gamma * gt * (1 - self.dones[i]) self.gt[i] = gt", "1 self.episode_step[a_id] += 1 if not self.full: last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size)", "episode_length): self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index = 0 self.episode_length =", "self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers,", ": last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index:", "Config.gamma * self.gt[i+1] * (1 - self.dones[i]) def reset(self, full=False): if full: self.buffer_index", "self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, :", "= gt - state_values[i] def gae_advantage(self, state_values, new_state_values): self.full = False self.gt[Config.batch_size] =", "= torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt", "self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt += 1 def add(self, decision_steps, terminal_steps): for obs,", "self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER", "torch import Config class Buffer: # Since the enviroment we use has multiple", "def __init__(self, num_workers, state_shape, action_shape, episode_length): self.device = 'cuda' if torch.cuda.is_available() else 'cpu'", "store whole episodes in # buffer so the advantage can be calculated, each", "last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index", "= gt self.advantages[i] = gt - state_values[i] def gae_advantage(self, state_values, new_state_values): self.full =", "+= 1 if not self.full: last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index]", "torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device)", "self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id,", "obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK,", "buffer so the advantage can be calculated, each agent will have separate episode", "+ Config.gamma * self.gt[i+1] * (1 - self.dones[i]) def reset(self, full=False): if full:", "Config.batch_size if self.buffer_index == 0: self.full = True self.episode_step[a_id] = 0 def advantage(self,", "True self.episode_step[a_id] = 0 def advantage(self, state_values, last_state_value): self.full = False gt =", "# buffer so the advantage can be calculated, each agent will have separate", "* (1 - self.dones[i]) self.gt[i] = gt self.advantages[i] = gt - state_values[i] def", "reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma * new_state_values[i] * (1 - self.dones[i]) -", "def gae_advantage(self, state_values, new_state_values): self.full = False self.gt[Config.batch_size] = new_state_values[-1] for i in", "0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1 for", "# step of only its episode. When episode for certain agent ends, whole", "- self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] =", "For critic self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] * (1 - self.dones[i])", "the advantage can be calculated, each agent will have separate episode buffer in", "work in parallel and PPO requires to store whole episodes in # buffer", ": last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index:", "= torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards =", "self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id]", "self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1 if not self.full: last_index = min(self.buffer_index +", "import torch import Config class Buffer: # Since the enviroment we use has", "requires to store whole episodes in # buffer so the advantage can be", "has multiple agents that work in parallel and PPO requires to store whole", "episode buffer is inserted to the main buffer def __init__(self, num_workers, state_shape, action_shape,", "- self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] =", "action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones", "action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers,", "multiple agents that work in parallel and PPO requires to store whole episodes", ": last_index - self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index", "we use has multiple agents that work in parallel and PPO requires to", "TERMINALNI JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] =", "episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device)", "if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]]", "self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt +=", "def reset(self, full=False): if full: self.buffer_index = 0 self.episode_step[self.episode_step != 0] = 0", "(1 - self.dones[i]) def reset(self, full=False): if full: self.buffer_index = 0 self.episode_step[self.episode_step !=", "0 actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] =", "main buffer def __init__(self, num_workers, state_shape, action_shape, episode_length): self.device = 'cuda' if torch.cuda.is_available()", "self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index]", "- self.dones[i]) - state_values[i] self.advantages[i] = delta + Config.gae_lambda * Config.gamma * self.advantages[i+1]", "state_shape, action_shape, episode_length): self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index = 0", "min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index:", "= False gt = last_state_value for i in reversed(range(Config.batch_size)): gt = self.rewards[i] +", "False gt = last_state_value for i in reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma", "self.full: last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index", "self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size +", "self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode =", "BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode", ": last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index:", "self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device)", "# Since the enviroment we use has multiple agents that work in parallel", "only its episode. When episode for certain agent ends, whole episode buffer is", "* new_state_values[i] * (1 - self.dones[i]) - state_values[i] self.advantages[i] = delta + Config.gae_lambda", "each # step of only its episode. When episode for certain agent ends,", "logprob[cnt] cnt += 1 def add(self, decision_steps, terminal_steps): for obs, a_id in zip(decision_steps.obs[0],", "self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id,", "whole episodes in # buffer so the advantage can be calculated, each agent", "torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1 if not self.full: last_index =", "of only its episode. When episode for certain agent ends, whole episode buffer", "= self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index -", "self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt += 1 def add(self,", "self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length,", "Config.gamma * new_state_values[i] * (1 - self.dones[i]) - state_values[i] self.advantages[i] = delta +", "cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id,", "self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] * (1 - self.dones[i]) def reset(self,", "= torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size,", "== 0: # TERMINALNI JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1", "separate episode buffer in which will store each # step of only its", "which will store each # step of only its episode. When episode for", "so the advantage can be calculated, each agent will have separate episode buffer", "# For critic self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] * (1 -", "gt * (1 - self.dones[i]) self.gt[i] = gt self.advantages[i] = gt - state_values[i]", "# TERMINALNI JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]]", "add_old(self, decision_steps, actions, logprob): cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id", "torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt =", "self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1 for obs, a_id", "self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones =", "self.rewards[i] + Config.gamma * self.gt[i+1] * (1 - self.dones[i]) def reset(self, full=False): if", "= self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index -", "self.dones[i]) self.gt[i] = gt self.advantages[i] = gt - state_values[i] def gae_advantage(self, state_values, new_state_values):", "torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt += 1 def", "(1 - self.dones[i]) # For critic self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1]", "self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index]", "+ Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 - self.dones[i]) # For critic", "= 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1", "self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt += 1 def add(self, decision_steps,", "+= 1 def add(self, decision_steps, terminal_steps): for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if", "decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] =", "zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] =", "self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1 if not self.full: last_index = min(self.buffer_index", ": last_index - self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index:", "False self.gt[Config.batch_size] = new_state_values[-1] for i in reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma", "self.full = False def add_old(self, decision_steps, actions, logprob): cnt = 0 actionsTensor =", "- state_values[i] self.advantages[i] = delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1", "state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states", "BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size, action_shape).to(self.device) self.logprob = torch.zeros(Config.batch_size,", "= torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------ self.states_episode = torch.zeros(num_workers,", "last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index - self.buffer_index] self.new_states[self.buffer_index: last_index]", "0 self.episode_step[a_id] += 1 for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] =", "cnt += 1 def add(self, decision_steps, terminal_steps): for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id):", "step of only its episode. When episode for certain agent ends, whole episode", "- self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index: last_index] =", "self.dones_episode[a_id, self.episode_step[a_id]] = 0 self.episode_step[a_id] += 1 for obs, a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id):", "def add(self, decision_steps, terminal_steps): for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] ==", "= torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length,", "* gt * (1 - self.dones[i]) self.gt[i] = gt self.advantages[i] = gt -", "PPO requires to store whole episodes in # buffer so the advantage can", "= torch.zeros(Config.batch_size + 1).to(self.device) self.full = False def add_old(self, decision_steps, actions, logprob): cnt", "self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] +=", "- self.dones[i]) def reset(self, full=False): if full: self.buffer_index = 0 self.episode_step[self.episode_step != 0]", "= self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] = self.rewards_episode[a_id, : last_index -", "When episode for certain agent ends, whole episode buffer is inserted to the", "to the main buffer def __init__(self, num_workers, state_shape, action_shape, episode_length): self.device = 'cuda'", "last_state_value for i in reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma * gt *", "self.episode_length = episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions =", "0: self.full = True self.episode_step[a_id] = 0 def advantage(self, state_values, last_state_value): self.full =", "KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id,", "in # buffer so the advantage can be calculated, each agent will have", "buffer in which will store each # step of only its episode. When", "action_shape, episode_length): self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index = 0 self.episode_length", "for i in reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma * gt * (1", "episode for certain agent ends, whole episode buffer is inserted to the main", "= torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device)", "critic self.gt[i] = self.rewards[i] + Config.gamma * self.gt[i+1] * (1 - self.dones[i]) def", "- self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index: last_index] =", "self.gt[i+1] * (1 - self.dones[i]) def reset(self, full=False): if full: self.buffer_index = 0", "i in reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma * gt * (1 -", "gae_advantage(self, state_values, new_state_values): self.full = False self.gt[Config.batch_size] = new_state_values[-1] for i in reversed(range(Config.batch_size)):", "= episode_length #------------------------------------------------- MAIN BUFFER ------------------------------------------------- self.states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.actions = torch.zeros(Config.batch_size,", "action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers,", "torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode", "self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device)", "------------------------------------------------ self.states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode =", "self.rewards[i] + Config.gamma * gt * (1 - self.dones[i]) self.gt[i] = gt self.advantages[i]", "gt = self.rewards[i] + Config.gamma * gt * (1 - self.dones[i]) self.gt[i] =", "self.advantages[i] = delta + Config.gae_lambda * Config.gamma * self.advantages[i+1] * (1 - self.dones[i])", "= logprob[cnt] cnt += 1 def add(self, decision_steps, terminal_steps): for obs, a_id in", "self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, :", "self.full = True self.episode_step[a_id] = 0 def advantage(self, state_values, last_state_value): self.full = False", "* (1 - self.dones[i]) # For critic self.gt[i] = self.rewards[i] + Config.gamma *", "terminal_steps): for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0: # TERMINALNI", "self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode =", "+ self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index: last_index]", "self.episode_step[a_id] = 0 def advantage(self, state_values, last_state_value): self.full = False gt = last_state_value", "episode buffer in which will store each # step of only its episode.", "= torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE BUFFER ------------------------------------------------", "self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index]", "self.gt[Config.batch_size] = new_state_values[-1] for i in reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma *", "(1 - self.dones[i]) self.gt[i] = gt self.advantages[i] = gt - state_values[i] def gae_advantage(self,", "decision_steps.agent_id): if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id,", "buffer is inserted to the main buffer def __init__(self, num_workers, state_shape, action_shape, episode_length):", "- state_values[i] def gae_advantage(self, state_values, new_state_values): self.full = False self.gt[Config.batch_size] = new_state_values[-1] for", "actions, logprob): cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0],", "- self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index = last_index", "self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers,", "torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode", "have separate episode buffer in which will store each # step of only", "last_index - self.buffer_index] self.dones[self.buffer_index: last_index] = self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index =", "zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] =", "Config.gamma * gt * (1 - self.dones[i]) self.gt[i] = gt self.advantages[i] = gt", "state_values, last_state_value): self.full = False gt = last_state_value for i in reversed(range(Config.batch_size)): gt", "torch.zeros(Config.batch_size, action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #-----------------------------------------------", "self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step = torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size", "last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index] self.rewards[self.buffer_index: last_index]", "JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)", "can be calculated, each agent will have separate episode buffer in which will", "= actionsTensor[cnt] self.logprob_episode[a_id, self.episode_step[a_id]] = logprob[cnt] cnt += 1 def add(self, decision_steps, terminal_steps):", "in parallel and PPO requires to store whole episodes in # buffer so", "last_index - self.buffer_index] self.new_states[self.buffer_index: last_index] = self.new_states_episode[a_id, : last_index - self.buffer_index] self.dones[self.buffer_index: last_index]", "* self.advantages[i+1] * (1 - self.dones[i]) # For critic self.gt[i] = self.rewards[i] +", "1 def add(self, decision_steps, terminal_steps): for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id]", "= self.dones_episode[a_id, : last_index - self.buffer_index] self.buffer_index = last_index % Config.batch_size if self.buffer_index", "continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 0", "in reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma * new_state_values[i] * (1 - self.dones[i])", "self.episode_step[a_id]] = logprob[cnt] cnt += 1 def add(self, decision_steps, terminal_steps): for obs, a_id", "state_values[i] def gae_advantage(self, state_values, new_state_values): self.full = False self.gt[Config.batch_size] = new_state_values[-1] for i", "add(self, decision_steps, terminal_steps): for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0:", "gt = last_state_value for i in reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma *", "== 0: self.full = True self.episode_step[a_id] = 0 def advantage(self, state_values, last_state_value): self.full", "* self.gt[i+1] * (1 - self.dones[i]) def reset(self, full=False): if full: self.buffer_index =", "= 0 def advantage(self, state_values, last_state_value): self.full = False gt = last_state_value for", "in zip(decision_steps.obs[0], decision_steps.agent_id): if decision_steps.reward[a_id] == 0: # TERMINALNI JE KORAK, SKIPUJ OVO", "% Config.batch_size if self.buffer_index == 0: self.full = True self.episode_step[a_id] = 0 def", "torch.zeros(num_workers, dtype=torch.long).to(self.device) self.gt = torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full", "self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id, : last_index - self.buffer_index]", "Buffer: # Since the enviroment we use has multiple agents that work in", "state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode =", "a_id in zip(terminal_steps.obs[0], terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id,", "new_state_values[-1] for i in reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma * new_state_values[i] *", "torch.zeros(Config.batch_size + 1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full = False def add_old(self,", "terminal_steps.agent_id): self.rewards_episode[a_id, self.episode_step[a_id]] = -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1", "self.actions_episode[a_id, : last_index - self.buffer_index] self.logprob[self.buffer_index: last_index] = self.logprob_episode[a_id, : last_index - self.buffer_index]", "self.device = 'cuda' if torch.cuda.is_available() else 'cpu' self.buffer_index = 0 self.episode_length = episode_length", "* (1 - self.dones[i]) - state_values[i] self.advantages[i] = delta + Config.gae_lambda * Config.gamma", "self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.rewards_episode", "actionsTensor = torch.Tensor(actions).to(self.device) for obs, a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs)", "state_values, new_state_values): self.full = False self.gt[Config.batch_size] = new_state_values[-1] for i in reversed(range(Config.batch_size)): delta", "for i in reversed(range(Config.batch_size)): delta = self.rewards[i] + Config.gamma * new_state_values[i] * (1", "= -1 self.new_states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1", "torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.actions_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device) self.logprob_episode = torch.zeros(num_workers, self.episode_length, action_shape).to(self.device)", "calculated, each agent will have separate episode buffer in which will store each", "* (1 - self.dones[i]) def reset(self, full=False): if full: self.buffer_index = 0 self.episode_step[self.episode_step", "+ 1).to(self.device) self.advantages = torch.zeros(Config.batch_size + 1).to(self.device) self.full = False def add_old(self, decision_steps,", "Config.batch_size) self.states[self.buffer_index: last_index] = self.states_episode[a_id, : last_index - self.buffer_index] self.actions[self.buffer_index: last_index] = self.actions_episode[a_id,", "if self.buffer_index == 0: self.full = True self.episode_step[a_id] = 0 def advantage(self, state_values,", "0: # TERMINALNI JE KORAK, SKIPUJ OVO continue self.rewards_episode[a_id, self.episode_step[a_id]] = 0.1 self.new_states_episode[a_id,", "be calculated, each agent will have separate episode buffer in which will store", "= self.rewards[i] + Config.gamma * new_state_values[i] * (1 - self.dones[i]) - state_values[i] self.advantages[i]", "self.episode_step[a_id]] = torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1 if not self.full:", "Config class Buffer: # Since the enviroment we use has multiple agents that", "a_id in zip(decision_steps.obs[0], decision_steps.agent_id): self.states_episode[a_id, self.episode_step[a_id]] = torch.from_numpy(obs) self.actions_episode[a_id, self.episode_step[a_id]] = actionsTensor[cnt] self.logprob_episode[a_id,", "action_shape).to(self.device) self.rewards = torch.zeros(Config.batch_size).to(self.device) self.new_states = torch.zeros(Config.batch_size, state_shape).to(self.device) self.dones = torch.zeros(Config.batch_size).to(self.device) #----------------------------------------------- EPISODE", "self.episode_step[a_id] += 1 if not self.full: last_index = min(self.buffer_index + self.episode_step[a_id], Config.batch_size) self.states[self.buffer_index:", "= False def add_old(self, decision_steps, actions, logprob): cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device)", "= torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step", "Config.gamma * self.advantages[i+1] * (1 - self.dones[i]) # For critic self.gt[i] = self.rewards[i]", "reversed(range(Config.batch_size)): gt = self.rewards[i] + Config.gamma * gt * (1 - self.dones[i]) self.gt[i]", "buffer def __init__(self, num_workers, state_shape, action_shape, episode_length): self.device = 'cuda' if torch.cuda.is_available() else", "self.full = False self.gt[Config.batch_size] = new_state_values[-1] for i in reversed(range(Config.batch_size)): delta = self.rewards[i]", "def add_old(self, decision_steps, actions, logprob): cnt = 0 actionsTensor = torch.Tensor(actions).to(self.device) for obs,", "= torch.from_numpy(obs) self.dones_episode[a_id, self.episode_step[a_id]] = 1 self.episode_step[a_id] += 1 if not self.full: last_index", "self.full = False gt = last_state_value for i in reversed(range(Config.batch_size)): gt = self.rewards[i]", "advantage can be calculated, each agent will have separate episode buffer in which", "self.advantages[i+1] * (1 - self.dones[i]) # For critic self.gt[i] = self.rewards[i] + Config.gamma", "torch.zeros(num_workers, self.episode_length).to(self.device) self.new_states_episode = torch.zeros(num_workers, self.episode_length, state_shape).to(self.device) self.dones_episode = torch.zeros(num_workers, self.episode_length).to(self.device) self.episode_step =" ]
[ "'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13),", "< (2, 7): # only some distros recognize stdlib argparse as already satisfying", "# Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ] # env", "extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws = acme.jose.jws:CLI.run', ], }, test_suite='acme',", "'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning", "'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient',", "env markers in extras_require cause problems with older pip: #517 if sys.version_info <", "satisfying install_requires.append('argparse') testing_extras = [ 'nose', 'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={", "import sys from setuptools import setup from setuptools import find_packages install_requires = [", "'werkzeug', ] # env markers in extras_require cause problems with older pip: #517", "extras_require cause problems with older pip: #517 if sys.version_info < (2, 7): #", "some distros recognize stdlib argparse as already satisfying install_requires.append('argparse') testing_extras = [ 'nose',", "older pip: #517 if sys.version_info < (2, 7): # only some distros recognize", "if sys.version_info < (2, 7): # only some distros recognize stdlib argparse as", "'nose', 'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts':", "# rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304)", "setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws =", "'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws = acme.jose.jws:CLI.run', ], }, test_suite='acme', )", "problems with older pip: #517 if sys.version_info < (2, 7): # only some", "import setup from setuptools import find_packages install_requires = [ 'argparse', # load_pem_private/public_key (>=0.6)", "'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests',", "import find_packages install_requires = [ 'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8',", "with older pip: #517 if sys.version_info < (2, 7): # only some distros", "urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug',", "(#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ] #", "cause problems with older pip: #517 if sys.version_info < (2, 7): # only", "rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1',", "py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) #", "name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws = acme.jose.jws:CLI.run',", "'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3", "# urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions", "distros recognize stdlib argparse as already satisfying install_requires.append('argparse') testing_extras = [ 'nose', 'tox',", "(>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ] # env markers in", "(>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ] # env markers in extras_require cause", "# py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304)", "] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws", "] # env markers in extras_require cause problems with older pip: #517 if", "(2, 7): # only some distros recognize stdlib argparse as already satisfying install_requires.append('argparse')", "as already satisfying install_requires.append('argparse') testing_extras = [ 'nose', 'tox', ] setup( name='acme', packages=find_packages(),", "from setuptools import find_packages install_requires = [ 'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors", "'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name", "load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', # urllib3", "(>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304) 'pyasn1', #", "<reponame>stewnorriss/letsencrypt import sys from setuptools import setup from setuptools import find_packages install_requires =", "urllib3 InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15)", "recognize stdlib argparse as already satisfying install_requires.append('argparse') testing_extras = [ 'nose', 'tox', ]", "[ 'nose', 'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={", "setup from setuptools import find_packages install_requires = [ 'argparse', # load_pem_private/public_key (>=0.6) #", "(>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', # urllib3 InsecurePlatformWarning", "sys.version_info < (2, 7): # only some distros recognize stdlib argparse as already", "sys from setuptools import setup from setuptools import find_packages install_requires = [ 'argparse',", "pip: #517 if sys.version_info < (2, 7): # only some distros recognize stdlib", "# env markers in extras_require cause problems with older pip: #517 if sys.version_info", "setuptools import find_packages install_requires = [ 'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8)", "X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ] # env markers in extras_require", "already satisfying install_requires.append('argparse') testing_extras = [ 'nose', 'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires,", "install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws = acme.jose.jws:CLI.run', ], },", "'requests', 'six', 'werkzeug', ] # env markers in extras_require cause problems with older", "'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ] # env markers in extras_require cause problems", "argparse as already satisfying install_requires.append('argparse') testing_extras = [ 'nose', 'tox', ] setup( name='acme',", "[ 'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339',", "(#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz',", "= [ 'nose', 'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, },", "Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ] # env markers", "= [ 'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26", "setuptools import setup from setuptools import find_packages install_requires = [ 'argparse', # load_pem_private/public_key", "# load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', # py26 'pyrfc3339', 'ndg-httpsclient', #", "'six', 'werkzeug', ] # env markers in extras_require cause problems with older pip:", "testing_extras = [ 'nose', 'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras,", "stdlib argparse as already satisfying install_requires.append('argparse') testing_extras = [ 'nose', 'tox', ] setup(", "'pytz', 'requests', 'six', 'werkzeug', ] # env markers in extras_require cause problems with", "install_requires.append('argparse') testing_extras = [ 'nose', 'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing':", "# only some distros recognize stdlib argparse as already satisfying install_requires.append('argparse') testing_extras =", "#517 if sys.version_info < (2, 7): # only some distros recognize stdlib argparse", "from setuptools import setup from setuptools import find_packages install_requires = [ 'argparse', #", "7): # only some distros recognize stdlib argparse as already satisfying install_requires.append('argparse') testing_extras", "install_requires = [ 'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0', #", "InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six', 'werkzeug', ]", "markers in extras_require cause problems with older pip: #517 if sys.version_info < (2,", "# urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15', 'pytz', 'requests', 'six',", "in extras_require cause problems with older pip: #517 if sys.version_info < (2, 7):", "'tox', ] setup( name='acme', packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts': [", "InsecurePlatformWarning (#304) 'pyasn1', # urllib3 InsecurePlatformWarning (#304) # Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15) 'PyOpenSSL>=0.15',", "packages=find_packages(), install_requires=install_requires, extras_require={ 'testing': testing_extras, }, entry_points={ 'console_scripts': [ 'jws = acme.jose.jws:CLI.run', ],", "only some distros recognize stdlib argparse as already satisfying install_requires.append('argparse') testing_extras = [", "find_packages install_requires = [ 'argparse', # load_pem_private/public_key (>=0.6) # rsa_recover_prime_factors (>=0.8) 'cryptography>=0.8', 'mock<1.1.0'," ]
[ "from periskop_client.models import HTTPContext @pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture def exporter(collector): return", "collector(): return ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\",", "@pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context():", "ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\", request_url=\"http://example.com\", request_headers={\"Cache-Control\": \"no-cache\"}) def get_exception_with_context(collector): return list(collector._aggregated_exceptions.values())[0].latest_errors[0]", "import HTTPContext @pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture", "ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\", request_url=\"http://example.com\", request_headers={\"Cache-Control\":", "periskop_client.exporter import ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture", "pytest from periskop_client.collector import ExceptionCollector from periskop_client.exporter import ExceptionExporter from periskop_client.models import HTTPContext", "ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture def exporter(collector):", "@pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\", request_url=\"http://example.com\", request_headers={\"Cache-Control\": \"no-cache\"})", "def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\", request_url=\"http://example.com\", request_headers={\"Cache-Control\": \"no-cache\"}) def", "return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\", request_url=\"http://example.com\", request_headers={\"Cache-Control\": \"no-cache\"}) def get_exception_with_context(collector): return", "HTTPContext @pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def", "ExceptionCollector from periskop_client.exporter import ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture def collector(): return", "exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\", request_url=\"http://example.com\", request_headers={\"Cache-Control\": \"no-cache\"}) def get_exception_with_context(collector):", "def collector(): return ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return", "import ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture def", "periskop_client.collector import ExceptionCollector from periskop_client.exporter import ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture def", "return ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector) @pytest.fixture def sample_http_context(): return HTTPContext(request_method=\"GET\", request_url=\"http://example.com\",", "import ExceptionCollector from periskop_client.exporter import ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture def collector():", "from periskop_client.collector import ExceptionCollector from periskop_client.exporter import ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture", "periskop_client.models import HTTPContext @pytest.fixture def collector(): return ExceptionCollector() @pytest.fixture def exporter(collector): return ExceptionExporter(collector)", "import pytest from periskop_client.collector import ExceptionCollector from periskop_client.exporter import ExceptionExporter from periskop_client.models import", "from periskop_client.exporter import ExceptionExporter from periskop_client.models import HTTPContext @pytest.fixture def collector(): return ExceptionCollector()", "<gh_stars>0 import pytest from periskop_client.collector import ExceptionCollector from periskop_client.exporter import ExceptionExporter from periskop_client.models" ]
[ "# Model to test the basics def test_model_1(): A, B, C = BaseSpecies(3)", "B | C) MySim.level = 0 MySim.compile() return False except SystemExit: print('Dimensional inconsistency", "0 results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This model tests", "C.c1, C.c2 Zero >> 2 * A[1] MySim = Simulation(B | C) results", "results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model to test species multiplication def", "creation and compilation # It also test the calculation capabilities # It uses", "= Simulation(Cat | Dog | Herbivore) MySim.level = 0 MySim.volume = 1 *", "'') if r != line: return False return True # Model to test", "line.replace('\\n', '') if r != line: return False return True # Model to", ">> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult * The_Smiths MySim", "test well defined orthogonal spaces def orthogonal_spaces(): try: A, B = BaseSpecies(2) A.a,", "be low Zero >> Creator[leaky] MySim = Simulation(mRNA | Protein) return MySim.compile() assert", "* u.meter ** 2 results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model to", "* u.mol / u.meter ** 3) + B(1 * u.mol / u.meter **", "def compare_model(comp_results, file_name): with open(file_name, 'r') as file: for r, line in zip(comp_results.split('\\n'),", "r1: gamma_p if r1.is_a(Protein) else gamma_m] # This is the leaky mRNA expression,", "= Simulation(B | C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt')", "gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA = Mortal *", "| Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test well defined", "| C) MySim.level = 0 MySim.compile() return False except SystemExit: print('Dimensional inconsistency model", "the calculation capabilities # It uses some simple model and assertions import pytest", "as file: for r, line in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '') if", "2 * A >> 3 * A[1] MySim = Simulation(B | C) MySim.level", "| C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model", "referenced in the reactants (we call them Born Species) A = BaseSpecies(1) B", "The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music)", "BaseSpecies(2) Cat, Dog = New(Carnivore, 2) Carnivore + Herbivore(1 * u.mol) >> Carnivore[1]", "BaseSpecies(1) B, C = New(A, 2) A >> 2 * A[1] 2 *", "in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '') if r != line: return False", "B1, B2 = New(Bacteria, 2) V1, V2 = New(Virus, 2) Bacteria.not_infected + Virus", "New(Mortal) # Repression reactions for m, p in zip(['m1', 'm2', 'm3'], ['x2', 'x3',", "A >> 2 * A[1] 2 * A >> 3 * A[1] MySim", "Model to test inheritance queries # All bacterias are infected by any virus", "import sys # TODO Plot has random order for species names # Compare", "MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4,", ">> Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m] # This is the leaky", "MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model to test basic inheritance def test_model_2(): Carnivore,", "mRNA expression, it needs to be low Zero >> Creator[leaky] MySim = Simulation(mRNA", "| C) MySim.level = 0 MySim.compile() return False except SystemExit: return True def", "/ u.meter ** 2) >> C[1] MySim = Simulation(A | B | C)", ">> C[1] MySim = Simulation(A | B | C) MySim.level = 0 MySim.compile()", "zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1", "New(A, 2) A >> 2 * A[1] 2 * A >> 3 *", "leaky mRNA expression, it needs to be low Zero >> Creator[leaky] MySim =", "multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >>", "random order for species names # Compare results with expected file def compare_model(comp_results,", "dimensional inconsistency def dimensional_inconsistency(): try: A, B, C = BaseSpecies(3) A(1 * u.mol", "MySim = Simulation(mRNA | Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to", "= Simulation(A | B | C) MySim.level = 0 MySim.compile() return False except", "MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test well defined orthogonal spaces def", "V1, V2 = New(Virus, 2) Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim = Simulation(B1", "= MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model to test species multiplication def test_model_3():", "to test basic inheritance def test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat, Dog =", "compare_model(results, 'test_tools/model_4.txt') # Model to test round-robin and stoichiometry def test_model_5(): A =", "except SystemExit: return True def test_orthogonal(): assert orthogonal_spaces() # Model to test dimensional", "A.a, A.b C = New(B) C.a, C.b MySim = Simulation(A | C) MySim.level", "for species names # Compare results with expected file def compare_model(comp_results, file_name): with", "+ Virus >> Bacteria.infected[1] MySim = Simulation(B1 | B2 | V1 | V2)", "return False except SystemExit: print('Dimensional inconsistency model Ok') return True def test_dimensional_inconsistency(): assert", "orthogonal spaces def orthogonal_spaces(): try: A, B = BaseSpecies(2) A.a, A.b C =", "u.meter ** 2 results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model to test", "u.mol) >> Carnivore[1] Cat(1 * u.mol), Dog(1 * u.mol) MySim = Simulation(Cat |", "= Simulation(Music) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model", "MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model to test", "Zero >> 2 * A[1] MySim = Simulation(B | C) results = MySim.compile()", "All bacterias are infected by any virus here def test_model_4(): Bacteria, Virus =", "* A[1] MySim = Simulation(B | C) results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt')", "inheritance queries # All bacterias are infected by any virus here def test_model_4():", "A[1] 2 * A >> 3 * A[1] MySim = Simulation(B | C)", "def test_model_5(): A = BaseSpecies(1) B, C = New(A, 2) A >> 2", "test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me", "Model to test round-robin and stoichiometry def test_model_5(): A = BaseSpecies(1) B, C", "line: return False return True # Model to test the basics def test_model_1():", "assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This model tests species that are not", "def test_model_6(): # This model tests species that are not referenced in the", "file def compare_model(comp_results, file_name): with open(file_name, 'r') as file: for r, line in", "def test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat, Dog = New(Carnivore, 2) Carnivore +", "results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model to test inheritance queries #", "= New(Virus, 2) Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim = Simulation(B1 | B2", "u.meter ** 2) >> C[1] MySim = Simulation(A | B | C) MySim.level", "leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA = Mortal * Creator Protein = New(Mortal)", "r, line in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '') if r != line:", "Creator[leaky] MySim = Simulation(mRNA | Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model", "False except SystemExit: return True def test_orthogonal(): assert orthogonal_spaces() # Model to test", "= MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1,", "New(B) C.a, C.b MySim = Simulation(A | C) MySim.level = 0 MySim.compile() return", "= 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This model", "2 results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model to test species multiplication", "test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat, Dog = New(Carnivore, 2) Carnivore + Herbivore(1", "def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1]", "This model tests species that are not referenced in the reactants (we call", "True # Model to test the basics def test_model_1(): A, B, C =", "results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This model tests species", "= 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model to test basic", "to be low Zero >> Creator[leaky] MySim = Simulation(mRNA | Protein) return MySim.compile()", "oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA =", "* u.mol / u.meter ** 2) >> C[1] MySim = Simulation(A | B", "has random order for species names # Compare results with expected file def", "* Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music) MySim.level = 0 results = MySim.compile()", "n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA = Mortal * Creator Protein =", "BaseSpecies(2) A.a, A.b C = New(B) C.a, C.b MySim = Simulation(A | C)", "C = New(B) C.a, C.b MySim = Simulation(A | C) MySim.level = 0", "return True def test_orthogonal(): assert orthogonal_spaces() # Model to test dimensional inconsistency def", "compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This model tests species that are not referenced", "C.b MySim = Simulation(A | C) MySim.level = 0 MySim.compile() return False except", ">> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions for m,", "return False except SystemExit: return True def test_orthogonal(): assert orthogonal_spaces() # Model to", "C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): #", "= MGMT * Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music) MySim.level = 0 results", "them Born Species) A = BaseSpecies(1) B = New(A) C = New(A) B.b1,", "= Mortal * Creator Protein = New(Mortal) # Repression reactions for m, p", "test_orthogonal(): assert orthogonal_spaces() # Model to test dimensional inconsistency def dimensional_inconsistency(): try: A,", "u.mol), Dog(1 * u.mol) MySim = Simulation(Cat | Dog | Herbivore) MySim.level =", "MySim.volume = 1 * u.meter ** 2 results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt')", "C = BaseSpecies(3) A(1 * u.mol / u.meter ** 3) + B(1 *", "| C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6():", "open(file_name, 'r') as file: for r, line in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n',", "Carnivore + Herbivore(1 * u.mol) >> Carnivore[1] Cat(1 * u.mol), Dog(1 * u.mol)", ">> Carnivore[1] Cat(1 * u.mol), Dog(1 * u.mol) MySim = Simulation(Cat | Dog", "u.meter ** 3) + B(1 * u.mol / u.meter ** 2) >> C[1]", "compare_model(results, 'test_tools/model_3.txt') # Model to test inheritance queries # All bacterias are infected", "assert compare_model(results, 'test_tools/model_1.txt') # Model to test basic inheritance def test_model_2(): Carnivore, Herbivore", "| B2 | V1 | V2) MySim.level = 0 results = MySim.compile() assert", "# Model to test species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3)", "This script tests the model creation and compilation # It also test the", "+ Protein.c(p)[beta_p] # We need the rate of degradation to be different from", "B = BaseSpecies(2) A.a, A.b C = New(B) C.a, C.b MySim = Simulation(A", "Bacteria.infected[1] MySim = Simulation(B1 | B2 | V1 | V2) MySim.level = 0", "u.mol / u.meter ** 2) >> C[1] MySim = Simulation(A | B |", "Plot has random order for species names # Compare results with expected file", "p in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda", "= Simulation(B1 | B2 | V1 | V2) MySim.level = 0 results =", "Species) A = BaseSpecies(1) B = New(A) C = New(A) B.b1, B.b2, C.c1,", "mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions for m, p in zip(['m1',", "Simulation(A | B | C) MySim.level = 0 MySim.compile() return False except SystemExit:", "MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model to test inheritance queries # All bacterias", "Simulation(A | C) MySim.level = 0 MySim.compile() return False except SystemExit: return True", "virus here def test_model_4(): Bacteria, Virus = BaseSpecies(2) B1, B2 = New(Bacteria, 2)", "defined orthogonal spaces def orthogonal_spaces(): try: A, B = BaseSpecies(2) A.a, A.b C", "* A >> 3 * A[1] MySim = Simulation(B | C) MySim.level =", "test basic inheritance def test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat, Dog = New(Carnivore,", "B(1 * u.mol / u.meter ** 2) >> C[1] MySim = Simulation(A |", "'r') as file: for r, line in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '')", "that are not referenced in the reactants (we call them Born Species) A", "| B | C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt')", "test_model_4(): Bacteria, Virus = BaseSpecies(2) B1, B2 = New(Bacteria, 2) V1, V2 =", "expected file def compare_model(comp_results, file_name): with open(file_name, 'r') as file: for r, line", "dimensional_inconsistency(): try: A, B, C = BaseSpecies(3) A(1 * u.mol / u.meter **", "# All bacterias are infected by any virus here def test_model_4(): Bacteria, Virus", "MySim.level = 0 MySim.compile() return False except SystemExit: return True def test_orthogonal(): assert", "def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator =", "the leaky mRNA expression, it needs to be low Zero >> Creator[leaky] MySim", "sys # TODO Plot has random order for species names # Compare results", "pytest from mobspy import * import sys # TODO Plot has random order", "uses some simple model and assertions import pytest from mobspy import * import", "# It uses some simple model and assertions import pytest from mobspy import", "Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions for m, p", "assertions import pytest from mobspy import * import sys # TODO Plot has", "def dimensional_inconsistency(): try: A, B, C = BaseSpecies(3) A(1 * u.mol / u.meter", "Simulation(B | C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def", "test_model_6(): # This model tests species that are not referenced in the reactants", "2) >> C[1] MySim = Simulation(A | B | C) MySim.level = 0", "False return True # Model to test the basics def test_model_1(): A, B,", "def test_orthogonal(): assert orthogonal_spaces() # Model to test dimensional inconsistency def dimensional_inconsistency(): try:", "'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] # We need the rate of", "+ B >> C[1] MySim = Simulation(A | B | C) MySim.level =", "Cat(1 * u.mol), Dog(1 * u.mol) MySim = Simulation(Cat | Dog | Herbivore)", "Model to test dimensional inconsistency def dimensional_inconsistency(): try: A, B, C = BaseSpecies(3)", ">> 3 * A[1] MySim = Simulation(B | C) MySim.level = 0 results", "rate of degradation to be different from proteins and mRNA Mortal >> Zero[lambda", "Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music) MySim.level = 0 results = MySim.compile() assert", "def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA", "by any virus here def test_model_4(): Bacteria, Virus = BaseSpecies(2) B1, B2 =", "Virus >> Bacteria.infected[1] MySim = Simulation(B1 | B2 | V1 | V2) MySim.level", "= New(Mortal) # Repression reactions for m, p in zip(['m1', 'm2', 'm3'], ['x2',", "gamma_m] # This is the leaky mRNA expression, it needs to be low", "= line.replace('\\n', '') if r != line: return False return True # Model", "the reactants (we call them Born Species) A = BaseSpecies(1) B = New(A)", "A.b C = New(B) C.a, C.b MySim = Simulation(A | C) MySim.level =", "for m, p in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p)", "assert orthogonal_spaces() # Model to test dimensional inconsistency def dimensional_inconsistency(): try: A, B,", "import * import sys # TODO Plot has random order for species names", "file.readlines()): line = line.replace('\\n', '') if r != line: return False return True", "Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim = Simulation(B1 | B2 | V1 |", "# It also test the calculation capabilities # It uses some simple model", "Mortal * Creator Protein = New(Mortal) # Repression reactions for m, p in", "Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m] # This is the", "** 3) + B(1 * u.mol / u.meter ** 2) >> C[1] MySim", "V2 = New(Virus, 2) Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim = Simulation(B1 |", "assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001):", "species that are not referenced in the reactants (we call them Born Species)", "B.b2, C.c1, C.c2 Zero >> 2 * A[1] MySim = Simulation(B | C)", "C) MySim.level = 0 MySim.compile() return False except SystemExit: return True def test_orthogonal():", "We need the rate of degradation to be different from proteins and mRNA", "MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model to test", "<reponame>ROBACON/mobspy<filename>test_script.py # This script tests the model creation and compilation # It also", "= New(A) B.b1, B.b2, C.c1, C.c2 Zero >> 2 * A[1] MySim =", "MGMT * Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music) MySim.level = 0 results =", "+ Herbivore(1 * u.mol) >> Carnivore[1] Cat(1 * u.mol), Dog(1 * u.mol) MySim", "| Herbivore) MySim.level = 0 MySim.volume = 1 * u.meter ** 2 results", "** 2 results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model to test species", "+ mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions for m, p in", "= BaseSpecies(2) A.a, A.b C = New(B) C.a, C.b MySim = Simulation(A |", "MySim = Simulation(Cat | Dog | Herbivore) MySim.level = 0 MySim.volume = 1", "/ u.meter ** 3) + B(1 * u.mol / u.meter ** 2) >>", "A, B, C = BaseSpecies(3) A + B >> C[1] MySim = Simulation(A", "'m3'], ['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] # We need the", "Model to test basic inheritance def test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat, Dog", ">> Bacteria.infected[1] MySim = Simulation(B1 | B2 | V1 | V2) MySim.level =", "'m2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 +", "= MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model to test basic inheritance def test_model_2():", "f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions for m, p in zip(['m1', 'm2', 'm3'],", "'test_tools/model_4.txt') # Model to test round-robin and stoichiometry def test_model_5(): A = BaseSpecies(1)", "m, p in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) +", "= 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model to test round-robin", "'test_tools/model_7.txt') # Model to test well defined orthogonal spaces def orthogonal_spaces(): try: A,", "tests the model creation and compilation # It also test the calculation capabilities", "0 results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model to test round-robin and", "A, B = BaseSpecies(2) A.a, A.b C = New(B) C.a, C.b MySim =", "r != line: return False return True # Model to test the basics", "BaseSpecies(3) A + B >> C[1] MySim = Simulation(A | B | C)", "orthogonal_spaces() # Model to test dimensional inconsistency def dimensional_inconsistency(): try: A, B, C", "Creator Protein = New(Mortal) # Repression reactions for m, p in zip(['m1', 'm2',", "A[1] MySim = Simulation(B | C) MySim.level = 0 results = MySim.compile() assert", "# Model to test inheritance queries # All bacterias are infected by any", "C = New(A) B.b1, B.b2, C.c1, C.c2 Zero >> 2 * A[1] MySim", "Simulation(Music) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model to", "'test_tools/model_5.txt') def test_model_6(): # This model tests species that are not referenced in", "basic inheritance def test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat, Dog = New(Carnivore, 2)", "B, C = New(A, 2) A >> 2 * A[1] 2 * A", "'test_tools/model_1.txt') # Model to test basic inheritance def test_model_2(): Carnivore, Herbivore = BaseSpecies(2)", "mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] # We need the rate of degradation to", "The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music", "compilation # It also test the calculation capabilities # It uses some simple", "and assertions import pytest from mobspy import * import sys # TODO Plot", "and compilation # It also test the calculation capabilities # It uses some", "Bacteria, Virus = BaseSpecies(2) B1, B2 = New(Bacteria, 2) V1, V2 = New(Virus,", "file_name): with open(file_name, 'r') as file: for r, line in zip(comp_results.split('\\n'), file.readlines()): line", "to test well defined orthogonal spaces def orthogonal_spaces(): try: A, B = BaseSpecies(2)", "Born Species) A = BaseSpecies(1) B = New(A) C = New(A) B.b1, B.b2,", "return False return True # Model to test the basics def test_model_1(): A,", "* u.mol), Dog(1 * u.mol) MySim = Simulation(Cat | Dog | Herbivore) MySim.level", "2 * A[1] 2 * A >> 3 * A[1] MySim = Simulation(B", "B >> C[1] MySim = Simulation(A | B | C) MySim.level = 0", "3 * A[1] MySim = Simulation(B | C) MySim.level = 0 results =", "= New(A, 2) A >> 2 * A[1] 2 * A >> 3", "MySim = Simulation(Music) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') #", "Simulation(Cat | Dog | Herbivore) MySim.level = 0 MySim.volume = 1 * u.meter", "mRNA.c(m) + Protein.c(p)[beta_p] # We need the rate of degradation to be different", "compare_model(comp_results, file_name): with open(file_name, 'r') as file: for r, line in zip(comp_results.split('\\n'), file.readlines()):", "need the rate of degradation to be different from proteins and mRNA Mortal", "here def test_model_4(): Bacteria, Virus = BaseSpecies(2) B1, B2 = New(Bacteria, 2) V1,", "MySim = Simulation(B | C) MySim.level = 0 results = MySim.compile() assert compare_model(results,", "def orthogonal_spaces(): try: A, B = BaseSpecies(2) A.a, A.b C = New(B) C.a,", "BaseSpecies(1) B = New(A) C = New(A) B.b1, B.b2, C.c1, C.c2 Zero >>", "with open(file_name, 'r') as file: for r, line in zip(comp_results.split('\\n'), file.readlines()): line =", "results with expected file def compare_model(comp_results, file_name): with open(file_name, 'r') as file: for", "New(A) B.b1, B.b2, C.c1, C.c2 Zero >> 2 * A[1] MySim = Simulation(B", "BaseSpecies(2) mRNA = Mortal * Creator Protein = New(Mortal) # Repression reactions for", "Herbivore) MySim.level = 0 MySim.volume = 1 * u.meter ** 2 results =", "inconsistency def dimensional_inconsistency(): try: A, B, C = BaseSpecies(3) A(1 * u.mol /", ">> 2 * A[1] 2 * A >> 3 * A[1] MySim =", "Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult * The_Smiths", ">> mRNA.c(m) + Protein.c(p)[beta_p] # We need the rate of degradation to be", "test round-robin and stoichiometry def test_model_5(): A = BaseSpecies(1) B, C = New(A,", "bacterias are infected by any virus here def test_model_4(): Bacteria, Virus = BaseSpecies(2)", "# Production reactions for m, p in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']):", "MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This model tests species that are", "C[1] MySim = Simulation(A | B | C) MySim.level = 0 MySim.compile() return", "inheritance def test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat, Dog = New(Carnivore, 2) Carnivore", "Dog | Herbivore) MySim.level = 0 MySim.volume = 1 * u.meter ** 2", "Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions for", "Dog(1 * u.mol) MySim = Simulation(Cat | Dog | Herbivore) MySim.level = 0", "Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test well defined orthogonal", "'x3', 'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production", "| Dog | Herbivore) MySim.level = 0 MySim.volume = 1 * u.meter **", "| V2) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model", "test species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids", "= New(B) C.a, C.b MySim = Simulation(A | C) MySim.level = 0 MySim.compile()", "0 MySim.compile() return False except SystemExit: return True def test_orthogonal(): assert orthogonal_spaces() #", "False except SystemExit: print('Dimensional inconsistency model Ok') return True def test_dimensional_inconsistency(): assert dimensional_inconsistency()", "Dog = New(Carnivore, 2) Carnivore + Herbivore(1 * u.mol) >> Carnivore[1] Cat(1 *", "Music = MGMT * Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music) MySim.level = 0", "some simple model and assertions import pytest from mobspy import * import sys", "= MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This model tests species that", "Model to test species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell,", "to test round-robin and stoichiometry def test_model_5(): A = BaseSpecies(1) B, C =", "gamma_p if r1.is_a(Protein) else gamma_m] # This is the leaky mRNA expression, it", "MySim = Simulation(A | B | C) MySim.level = 0 MySim.compile() return False", "= BaseSpecies(3) A(1 * u.mol / u.meter ** 3) + B(1 * u.mol", "MySim = Simulation(A | B | C) MySim.level = 0 results = MySim.compile()", "B, C = BaseSpecies(3) A + B >> C[1] MySim = Simulation(A |", "order for species names # Compare results with expected file def compare_model(comp_results, file_name):", "The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music) MySim.level =", "in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] #", "is the leaky mRNA expression, it needs to be low Zero >> Creator[leaky]", "any virus here def test_model_4(): Bacteria, Virus = BaseSpecies(2) B1, B2 = New(Bacteria,", "B, C = BaseSpecies(3) A(1 * u.mol / u.meter ** 3) + B(1", "A[1] MySim = Simulation(B | C) results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def", "Production reactions for m, p in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m)", "= BaseSpecies(3) A + B >> C[1] MySim = Simulation(A | B |", "Simulation(B | C) results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5,", "the model creation and compilation # It also test the calculation capabilities #", "['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] # We need the rate", "reactions for m, p in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m) >>", "B2 = New(Bacteria, 2) V1, V2 = New(Virus, 2) Bacteria.not_infected + Virus >>", "* A[1] MySim = Simulation(B | C) MySim.level = 0 results = MySim.compile()", "it needs to be low Zero >> Creator[leaky] MySim = Simulation(mRNA | Protein)", "# Model to test well defined orthogonal spaces def orthogonal_spaces(): try: A, B", "# Model to test basic inheritance def test_model_2(): Carnivore, Herbivore = BaseSpecies(2) Cat,", "test dimensional inconsistency def dimensional_inconsistency(): try: A, B, C = BaseSpecies(3) A(1 *", "= New(A) C = New(A) B.b1, B.b2, C.c1, C.c2 Zero >> 2 *", "to be different from proteins and mRNA Mortal >> Zero[lambda r1: gamma_p if", "C) results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1,", "proteins and mRNA Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m] #", ">> C[1] MySim = Simulation(A | B | C) MySim.level = 0 results", "2) Carnivore + Herbivore(1 * u.mol) >> Carnivore[1] Cat(1 * u.mol), Dog(1 *", "if r != line: return False return True # Model to test the", "'test_tools/model_2.txt') # Model to test species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths =", "0 MySim.volume = 1 * u.meter ** 2 results = MySim.compile() assert compare_model(results,", "MySim = Simulation(B | C) results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7():", "C[1] MySim = Simulation(A | B | C) MySim.level = 0 results =", "= Simulation(mRNA | Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test", "return True # Model to test the basics def test_model_1(): A, B, C", "(we call them Born Species) A = BaseSpecies(1) B = New(A) C =", "capabilities # It uses some simple model and assertions import pytest from mobspy", "compare_model(results, 'test_tools/model_1.txt') # Model to test basic inheritance def test_model_2(): Carnivore, Herbivore =", "MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult *", "3) + B(1 * u.mol / u.meter ** 2) >> C[1] MySim =", "= Simulation(B | C) results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def", "r1.is_a(Protein) else gamma_m] # This is the leaky mRNA expression, it needs to", "for r, line in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '') if r !=", "results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model to test round-robin and stoichiometry", "= 0 MySim.compile() return False except SystemExit: return True def test_orthogonal(): assert orthogonal_spaces()", "!= line: return False return True # Model to test the basics def", "line = line.replace('\\n', '') if r != line: return False return True #", ">> 2 * A[1] MySim = Simulation(B | C) results = MySim.compile() assert", "import pytest from mobspy import * import sys # TODO Plot has random", "MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT *", "zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '') if r != line: return False return", "Compare results with expected file def compare_model(comp_results, file_name): with open(file_name, 'r') as file:", "SystemExit: return True def test_orthogonal(): assert orthogonal_spaces() # Model to test dimensional inconsistency", "m, p in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p) +", "test the basics def test_model_1(): A, B, C = BaseSpecies(3) A + B", "compare_model(results, 'test_tools/model_2.txt') # Model to test species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths", "low Zero >> Creator[leaky] MySim = Simulation(mRNA | Protein) return MySim.compile() assert compare_model(oscillator(),", "assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test well defined orthogonal spaces def orthogonal_spaces():", "| C) results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10,", "= MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model to test inheritance queries # All", "assert compare_model(results, 'test_tools/model_3.txt') # Model to test inheritance queries # All bacterias are", "C = BaseSpecies(3) A + B >> C[1] MySim = Simulation(A | B", "= Simulation(A | B | C) MySim.level = 0 results = MySim.compile() assert", "2 * A[1] MySim = Simulation(B | C) results = MySim.compile() assert compare_model(results,", "with expected file def compare_model(comp_results, file_name): with open(file_name, 'r') as file: for r,", "Virus = BaseSpecies(2) B1, B2 = New(Bacteria, 2) V1, V2 = New(Virus, 2)", "mRNA Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m] # This is", "# Model to test dimensional inconsistency def dimensional_inconsistency(): try: A, B, C =", "# This model tests species that are not referenced in the reactants (we", ">> Creator[leaky] MySim = Simulation(mRNA | Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') #", "A >> 3 * A[1] MySim = Simulation(B | C) MySim.level = 0", "* The_Smiths MySim = Simulation(Music) MySim.level = 0 results = MySim.compile() assert compare_model(results,", "+ B(1 * u.mol / u.meter ** 2) >> C[1] MySim = Simulation(A", "** 2) >> C[1] MySim = Simulation(A | B | C) MySim.level =", "* A[1] 2 * A >> 3 * A[1] MySim = Simulation(B |", "= MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model to test round-robin and stoichiometry def", "A = BaseSpecies(1) B = New(A) C = New(A) B.b1, B.b2, C.c1, C.c2", "MySim = Simulation(A | C) MySim.level = 0 MySim.compile() return False except SystemExit:", "| V1 | V2) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt')", "reactions for m, p in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p) >>", "model tests species that are not referenced in the reactants (we call them", "reactants (we call them Born Species) A = BaseSpecies(1) B = New(A) C", "to test species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age,", "model creation and compilation # It also test the calculation capabilities # It", "'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions", "0 results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model to test basic inheritance", "results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model to test basic inheritance def", "mRNA = Mortal * Creator Protein = New(Mortal) # Repression reactions for m,", "u.mol / u.meter ** 3) + B(1 * u.mol / u.meter ** 2)", "queries # All bacterias are infected by any virus here def test_model_4(): Bacteria,", "model and assertions import pytest from mobspy import * import sys # TODO", "u.mol) MySim = Simulation(Cat | Dog | Herbivore) MySim.level = 0 MySim.volume =", "and stoichiometry def test_model_5(): A = BaseSpecies(1) B, C = New(A, 2) A", "C.a, C.b MySim = Simulation(A | C) MySim.level = 0 MySim.compile() return False", "It uses some simple model and assertions import pytest from mobspy import *", "from proteins and mRNA Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m]", "infected by any virus here def test_model_4(): Bacteria, Virus = BaseSpecies(2) B1, B2", "be different from proteins and mRNA Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein)", "species names # Compare results with expected file def compare_model(comp_results, file_name): with open(file_name,", "MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model to test species multiplication def test_model_3(): MGMT,", "= 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model to test inheritance", "# This is the leaky mRNA expression, it needs to be low Zero", "the rate of degradation to be different from proteins and mRNA Mortal >>", "Cat, Dog = New(Carnivore, 2) Carnivore + Herbivore(1 * u.mol) >> Carnivore[1] Cat(1", "test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2)", "# Model to test round-robin and stoichiometry def test_model_5(): A = BaseSpecies(1) B,", "MySim.level = 0 MySim.volume = 1 * u.meter ** 2 results = MySim.compile()", "MySim.compile() return False except SystemExit: print('Dimensional inconsistency model Ok') return True def test_dimensional_inconsistency():", "= New(Bacteria, 2) V1, V2 = New(Virus, 2) Bacteria.not_infected + Virus >> Bacteria.infected[1]", "= 0 MySim.volume = 1 * u.meter ** 2 results = MySim.compile() assert", "BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT", "to test dimensional inconsistency def dimensional_inconsistency(): try: A, B, C = BaseSpecies(3) A(1", "compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test well defined orthogonal spaces def orthogonal_spaces(): try:", "= New(Carnivore, 2) Carnivore + Herbivore(1 * u.mol) >> Carnivore[1] Cat(1 * u.mol),", "Herbivore(1 * u.mol) >> Carnivore[1] Cat(1 * u.mol), Dog(1 * u.mol) MySim =", "results = MySim.compile() assert compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01,", "spaces def orthogonal_spaces(): try: A, B = BaseSpecies(2) A.a, A.b C = New(B)", "B | C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') #", "test the calculation capabilities # It uses some simple model and assertions import", "= BaseSpecies(2) Cat, Dog = New(Carnivore, 2) Carnivore + Herbivore(1 * u.mol) >>", "expression, it needs to be low Zero >> Creator[leaky] MySim = Simulation(mRNA |", "A = BaseSpecies(1) B, C = New(A, 2) A >> 2 * A[1]", "# We need the rate of degradation to be different from proteins and", "MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model to test round-robin and stoichiometry def test_model_5():", "the basics def test_model_1(): A, B, C = BaseSpecies(3) A + B >>", "New(Carnivore, 2) Carnivore + Herbivore(1 * u.mol) >> Carnivore[1] Cat(1 * u.mol), Dog(1", "MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult", "A(1 * u.mol / u.meter ** 3) + B(1 * u.mol / u.meter", "not referenced in the reactants (we call them Born Species) A = BaseSpecies(1)", "assert compare_model(results, 'test_tools/model_4.txt') # Model to test round-robin and stoichiometry def test_model_5(): A", "return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test well defined orthogonal spaces", "Simulation(B1 | B2 | V1 | V2) MySim.level = 0 results = MySim.compile()", "0 results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt') # Model to test inheritance queries", "and mRNA Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m] # This", "k=1, n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA = Mortal * Creator Protein", "New(Virus, 2) Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim = Simulation(B1 | B2 |", "MySim = Simulation(B1 | B2 | V1 | V2) MySim.level = 0 results", "Repression reactions for m, p in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p)", "calculation capabilities # It uses some simple model and assertions import pytest from", "Zero[lambda r1: gamma_p if r1.is_a(Protein) else gamma_m] # This is the leaky mRNA", "assert compare_model(results, 'test_tools/model_2.txt') # Model to test species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult,", "from mobspy import * import sys # TODO Plot has random order for", "Carnivore[1] Cat(1 * u.mol), Dog(1 * u.mol) MySim = Simulation(Cat | Dog |", "basics def test_model_1(): A, B, C = BaseSpecies(3) A + B >> C[1]", "2) V1, V2 = New(Virus, 2) Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim =", "MySim.compile() return False except SystemExit: return True def test_orthogonal(): assert orthogonal_spaces() # Model", "are not referenced in the reactants (we call them Born Species) A =", "+ ({pro}/{k})**{n}'] # Production reactions for m, p in zip(['m1', 'm2', 'm3'], ['x1',", "BaseSpecies(3) A(1 * u.mol / u.meter ** 3) + B(1 * u.mol /", "* import sys # TODO Plot has random order for species names #", "['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] #", "MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model to test", "try: A, B, C = BaseSpecies(3) A(1 * u.mol / u.meter ** 3)", "C = New(A, 2) A >> 2 * A[1] 2 * A >>", "test_model_5(): A = BaseSpecies(1) B, C = New(A, 2) A >> 2 *", "tests species that are not referenced in the reactants (we call them Born", "needs to be low Zero >> Creator[leaky] MySim = Simulation(mRNA | Protein) return", "= BaseSpecies(2) B1, B2 = New(Bacteria, 2) V1, V2 = New(Virus, 2) Bacteria.not_infected", "BaseSpecies(2) B1, B2 = New(Bacteria, 2) V1, V2 = New(Virus, 2) Bacteria.not_infected +", "Simulation(mRNA | Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt') # Model to test well", "Mortal, Creator = BaseSpecies(2) mRNA = Mortal * Creator Protein = New(Mortal) #", "# This script tests the model creation and compilation # It also test", "Herbivore = BaseSpecies(2) Cat, Dog = New(Carnivore, 2) Carnivore + Herbivore(1 * u.mol)", "test_model_1(): A, B, C = BaseSpecies(3) A + B >> C[1] MySim =", "mobspy import * import sys # TODO Plot has random order for species", "({pro}/{k})**{n}'] # Production reactions for m, p in zip(['m1', 'm2', 'm3'], ['x1', 'x2',", "Protein.c(p)[beta_p] # We need the rate of degradation to be different from proteins", "pro: f'{beta_m}/(1 + ({pro}/{k})**{n}'] # Production reactions for m, p in zip(['m1', 'm2',", "'m2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] # We need", "orthogonal_spaces(): try: A, B = BaseSpecies(2) A.a, A.b C = New(B) C.a, C.b", "'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator", "def test_model_4(): Bacteria, Virus = BaseSpecies(2) B1, B2 = New(Bacteria, 2) V1, V2", "stoichiometry def test_model_5(): A = BaseSpecies(1) B, C = New(A, 2) A >>", "B = New(A) C = New(A) B.b1, B.b2, C.c1, C.c2 Zero >> 2", "Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1]", "degradation to be different from proteins and mRNA Mortal >> Zero[lambda r1: gamma_p", "B2 | V1 | V2) MySim.level = 0 results = MySim.compile() assert compare_model(results,", "for m, p in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m)", "'test_tools/model_3.txt') # Model to test inheritance queries # All bacterias are infected by", "Creator = BaseSpecies(2) mRNA = Mortal * Creator Protein = New(Mortal) # Repression", "# Compare results with expected file def compare_model(comp_results, file_name): with open(file_name, 'r') as", "# Repression reactions for m, p in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']):", "= Simulation(A | C) MySim.level = 0 MySim.compile() return False except SystemExit: return", "C) MySim.level = 0 MySim.compile() return False except SystemExit: print('Dimensional inconsistency model Ok')", "Model to test well defined orthogonal spaces def orthogonal_spaces(): try: A, B =", "of degradation to be different from proteins and mRNA Mortal >> Zero[lambda r1:", "call them Born Species) A = BaseSpecies(1) B = New(A) C = New(A)", "different from proteins and mRNA Mortal >> Zero[lambda r1: gamma_p if r1.is_a(Protein) else", "line in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '') if r != line: return", "species multiplication def test_model_3(): MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you", "in the reactants (we call them Born Species) A = BaseSpecies(1) B =", "TODO Plot has random order for species names # Compare results with expected", "Carnivore, Herbivore = BaseSpecies(2) Cat, Dog = New(Carnivore, 2) Carnivore + Herbivore(1 *", "test inheritance queries # All bacterias are infected by any virus here def", "simple model and assertions import pytest from mobspy import * import sys #", "= BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music =", "True def test_orthogonal(): assert orthogonal_spaces() # Model to test dimensional inconsistency def dimensional_inconsistency():", "gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA = Mortal * Creator", "def test_model_1(): A, B, C = BaseSpecies(3) A + B >> C[1] MySim", "C.c2 Zero >> 2 * A[1] MySim = Simulation(B | C) results =", "= BaseSpecies(1) B = New(A) C = New(A) B.b1, B.b2, C.c1, C.c2 Zero", "file: for r, line in zip(comp_results.split('\\n'), file.readlines()): line = line.replace('\\n', '') if r", "The_Smiths MySim = Simulation(Music) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_3.txt')", "p in zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p]", "Model to test the basics def test_model_1(): A, B, C = BaseSpecies(3) A", "else gamma_m] # This is the leaky mRNA expression, it needs to be", "well defined orthogonal spaces def orthogonal_spaces(): try: A, B = BaseSpecies(2) A.a, A.b", "Simulation(A | B | C) MySim.level = 0 results = MySim.compile() assert compare_model(results,", "MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_5.txt') def test_model_6(): # This", "if r1.is_a(Protein) else gamma_m] # This is the leaky mRNA expression, it needs", ">> The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult * The_Smiths MySim = Simulation(Music) MySim.level", "MGMT, Blue_Oyster_Cult, The_Smiths = BaseSpecies(3) MGMT.eletric_fell, MGMT.little_dark_age, MGMT.kids Blue_Oyster_Cult.burning_for_you >> Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >>", "A, B, C = BaseSpecies(3) A(1 * u.mol / u.meter ** 3) +", "MySim.level = 0 MySim.compile() return False except SystemExit: print('Dimensional inconsistency model Ok') return", "= BaseSpecies(1) B, C = New(A, 2) A >> 2 * A[1] 2", "V1 | V2) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') #", "names # Compare results with expected file def compare_model(comp_results, file_name): with open(file_name, 'r')", "B.b1, B.b2, C.c1, C.c2 Zero >> 2 * A[1] MySim = Simulation(B |", "script tests the model creation and compilation # It also test the calculation", "V2) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_4.txt') # Model to", "# TODO Plot has random order for species names # Compare results with", "New(Bacteria, 2) V1, V2 = New(Virus, 2) Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim", "0 MySim.compile() return False except SystemExit: print('Dimensional inconsistency model Ok') return True def", "are infected by any virus here def test_model_4(): Bacteria, Virus = BaseSpecies(2) B1,", "to test the basics def test_model_1(): A, B, C = BaseSpecies(3) A +", "= 1 * u.meter ** 2 results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') #", "| B | C) MySim.level = 0 MySim.compile() return False except SystemExit: print('Dimensional", "This is the leaky mRNA expression, it needs to be low Zero >>", "C) MySim.level = 0 results = MySim.compile() assert compare_model(results, 'test_tools/model_1.txt') # Model to", "try: A, B = BaseSpecies(2) A.a, A.b C = New(B) C.a, C.b MySim", "* Creator Protein = New(Mortal) # Repression reactions for m, p in zip(['m1',", "New(A) C = New(A) B.b1, B.b2, C.c1, C.c2 Zero >> 2 * A[1]", "zip(['m1', 'm2', 'm3'], ['x1', 'x2', 'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] # We", "in zip(['m1', 'm2', 'm3'], ['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro:", "= 0 MySim.compile() return False except SystemExit: print('Dimensional inconsistency model Ok') return True", "round-robin and stoichiometry def test_model_5(): A = BaseSpecies(1) B, C = New(A, 2)", "also test the calculation capabilities # It uses some simple model and assertions", "* u.mol) >> Carnivore[1] Cat(1 * u.mol), Dog(1 * u.mol) MySim = Simulation(Cat", "Blue_Oyster_Cult.reaper[1] The_Smiths.stop_me >> The_Smiths.charming_man[1] Music = MGMT * Blue_Oyster_Cult * The_Smiths MySim =", "compare_model(results, 'test_tools/model_6.txt') def test_model_7(): def oscillator(beta_m=5, beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal,", "'m3'], ['x2', 'x3', 'x1']): Protein.c(p) >> Protein.c(p) + mRNA.c(m)[lambda pro: f'{beta_m}/(1 + ({pro}/{k})**{n}']", "2) A >> 2 * A[1] 2 * A >> 3 * A[1]", "It also test the calculation capabilities # It uses some simple model and", "A + B >> C[1] MySim = Simulation(A | B | C) MySim.level", "'x3']): mRNA.c(m) >> mRNA.c(m) + Protein.c(p)[beta_p] # We need the rate of degradation", "beta_p=10, gamma_m=1, gamma_p=0.01, k=1, n=4, leaky=0.0001): Mortal, Creator = BaseSpecies(2) mRNA = Mortal", "* u.mol) MySim = Simulation(Cat | Dog | Herbivore) MySim.level = 0 MySim.volume", "2) Bacteria.not_infected + Virus >> Bacteria.infected[1] MySim = Simulation(B1 | B2 | V1", "Zero >> Creator[leaky] MySim = Simulation(mRNA | Protein) return MySim.compile() assert compare_model(oscillator(), 'test_tools/model_7.txt')", "1 * u.meter ** 2 results = MySim.compile() assert compare_model(results, 'test_tools/model_2.txt') # Model", "Protein = New(Mortal) # Repression reactions for m, p in zip(['m1', 'm2', 'm3'],", "to test inheritance queries # All bacterias are infected by any virus here", "= BaseSpecies(2) mRNA = Mortal * Creator Protein = New(Mortal) # Repression reactions" ]
[ "Nothing to see here. \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" from ._version", "to see here. \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" from ._version import", "= \"<NAME>\" __email__ = \"<EMAIL>\" from ._version import get_versions __version__ = get_versions()['version'] del", "see here. \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" from ._version import get_versions", "#!/usr/bin/env python \"\"\" Nothing to see here. \"\"\" __author__ = \"<NAME>\" __email__ =", "\"<NAME>\" __email__ = \"<EMAIL>\" from ._version import get_versions __version__ = get_versions()['version'] del get_versions", "here. \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" from ._version import get_versions __version__", "<filename>mkauthlist/__init__.py<gh_stars>1-10 #!/usr/bin/env python \"\"\" Nothing to see here. \"\"\" __author__ = \"<NAME>\" __email__", "\"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" from ._version import get_versions __version__ =", "__author__ = \"<NAME>\" __email__ = \"<EMAIL>\" from ._version import get_versions __version__ = get_versions()['version']", "python \"\"\" Nothing to see here. \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\"", "\"\"\" Nothing to see here. \"\"\" __author__ = \"<NAME>\" __email__ = \"<EMAIL>\" from" ]
[ "tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df", "global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for i in {'Best fitness", "= df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with pd.option_context('display.max_rows',", "fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for i in {'Best fitness global', 'Best", "parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args = parser.parse_args() f = open(args.config_file) config =", "= tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters'])", "open(args.config_file) config = yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100,", "copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for", "for i in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best", "import sys import copy import numpy as np import pandas as pd from", "import copy import numpy as np import pandas as pd from lib.constants import", "keys in keys_to_value]) parameters_names = [i[-1] for i in keys_to_value] i = 0", "keys_to_value]) parameters_names = [i[-1] for i in keys_to_value] i = 0 for combination", "zip(keys_to_value,combination): tmp = config for k in keys[:-1]: tmp = tmp[k] tmp[keys[-1]] =", "fitness global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness']", "fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid'])", "= v result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results()", "os from concurrent.futures import ProcessPoolExecutor import itertools import yaml import sys import copy", "global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') #", "from lib.utils import * TOP_N = 15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?:", "fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i +=", "ProcessPoolExecutor import itertools import yaml import sys import copy import numpy as np", "concurrent.futures import ProcessPoolExecutor import itertools import yaml import sys import copy import numpy", "as pd from lib.constants import * from lib.utils import * TOP_N = 15", "fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median", "fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for i in {'Best fitness global',", "in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best", "# print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for", "fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for i in {'Best", "= df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) #", "fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\", "to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{", "1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df)", "parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for keys in keys_to_value]) parameters_names =", "i = 0 for combination in combinations: for keys, v in zip(keys_to_value,combination): tmp", "= [i[-1] for i in keys_to_value] i = 0 for combination in combinations:", "fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) #", "import numpy as np import pandas as pd from lib.constants import * from", "\"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for keys", "help=\"Configuration file.\") args = parser.parse_args() f = open(args.config_file) config = yaml.load(f,Loader=loader) to_search =", "sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean", "[-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\",", "for combination in combinations: for keys, v in zip(keys_to_value,combination): tmp = config for", "AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global'] =", "from lib.constants import * from lib.utils import * TOP_N = 15 loader =", "combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for keys in keys_to_value]) parameters_names = [i[-1] for", "u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser()", "parser.parse_args() f = open(args.config_file) config = yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7],", "yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser", "combination in combinations: for keys, v in zip(keys_to_value,combination): tmp = config for k", "'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best", "= combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best", "'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top", "**config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness", "# print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for i in {'Best fitness','Mean", "type=str, help=\"Configuration file.\") args = parser.parse_args() f = open(args.config_file) config = yaml.load(f,Loader=loader) to_search", "fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\\", "import * from lib.utils import * TOP_N = 15 loader = yaml.SafeLoader loader.add_implicit_resolver(", "v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best", "result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean", "fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness']", "sys import copy import numpy as np import pandas as pd from lib.constants", "= { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ #", "with pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid')", "agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\\ # sort_values(by=[('Mean fitness','mean')],ascending=True).reset_index()[list(set(to_update.keys())-{'eid'})+['Best fitness','Mean", "in keys_to_value]) parameters_names = [i[-1] for i in keys_to_value] i = 0 for", "fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns',", "{'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness", "re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args = parser.parse_args()", "'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df =", "tmp = config for k in keys[:-1]: tmp = tmp[k] tmp[keys[-1]] = v", "global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] =", "config = yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}},", "['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for", "+= 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None): #", "= pd.DataFrame(columns= [keys[-1] for keys in keys_to_value]) parameters_names = [i[-1] for i in", "keys_to_value] i = 0 for combination in combinations: for keys, v in zip(keys_to_value,combination):", "in combinations: for keys, v in zip(keys_to_value,combination): tmp = config for k in", "copy import numpy as np import pandas as pd from lib.constants import *", "# parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for keys in keys_to_value]) parameters_names", "for keys in keys_to_value]) parameters_names = [i[-1] for i in keys_to_value] i =", "in keys[:-1]: tmp = tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v ac =", "import ProcessPoolExecutor import itertools import yaml import sys import copy import numpy as", "numpy as np import pandas as pd from lib.constants import * from lib.utils", "125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value,", "{'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\":", "for k in keys[:-1]: tmp = tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v", "= v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] = combination", "= copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std']", "import pandas as pd from lib.constants import * from lib.utils import * TOP_N", "fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex())", "= argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args = parser.parse_args() f = open(args.config_file)", "pd.DataFrame(columns= [keys[-1] for keys in keys_to_value]) parameters_names = [i[-1] for i in keys_to_value]", "fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for i in {'Best", "itertools import yaml import sys import copy import numpy as np import pandas", "import os from concurrent.futures import ProcessPoolExecutor import itertools import yaml import sys import", "loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser =", "[keys[-1] for keys in keys_to_value]) parameters_names = [i[-1] for i in keys_to_value] i", "df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with pd.option_context('display.max_rows', None,", "i in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness", "fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N)", "result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] =", "result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr',", "= ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best", "list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for keys in", "print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst", "|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\")", "df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness']", "in keys_to_value] i = 0 for combination in combinations: for keys, v in", "= df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness']", "tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for i", "as np import pandas as pd from lib.constants import * from lib.utils import", "= 15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF)", "yaml import sys import copy import numpy as np import pandas as pd", "fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness')", "|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str,", "result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness')", "fitness', 'eid'})).\\ agg({i: ['mean','std'] for i in {'Best fitness global', 'Best fitness','Mean fitness','Median", "selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best", "# agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\\ # sort_values(by=[('Mean fitness','mean')],ascending=True).reset_index()[list(set(to_update.keys())-{'eid'})+['Best", "combinations: for keys, v in zip(keys_to_value,combination): tmp = config for k in keys[:-1]:", "tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df =", "pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',", "* TOP_N = 15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)?", "tmp = tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'],", "fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best", "df = ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global']", "result_df = pd.DataFrame(columns= [keys[-1] for keys in keys_to_value]) parameters_names = [i[-1] for i", "in zip(keys_to_value,combination): tmp = config for k in keys[:-1]: tmp = tmp[k] tmp[keys[-1]]", "df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst", "re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c',", "tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i:", "fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness',", "'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median", "combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness']", "'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'],", "# \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns=", "fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median", "False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\", "[i[-1] for i in keys_to_value] i = 0 for combination in combinations: for", "config for k in keys[:-1]: tmp = tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] =", "keys[:-1]: tmp = tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']],", "fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\\ #", "lib.utils import * TOP_N = 15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)?", "loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X),", "# print('Top best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False)", "pd from lib.constants import * from lib.utils import * TOP_N = 15 loader", "fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best", "pandas as pd from lib.constants import * from lib.utils import * TOP_N =", "argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args = parser.parse_args() f = open(args.config_file) config", "k in keys[:-1]: tmp = tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]] = v ac", "mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for i in", "i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None):", "|\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration", "np import pandas as pd from lib.constants import * from lib.utils import *", "\"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, }", "import yaml import sys import copy import numpy as np import pandas as", "file.\") args = parser.parse_args() f = open(args.config_file) config = yaml.load(f,Loader=loader) to_search = {", "= parser.parse_args() f = open(args.config_file) config = yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\":", "open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std']", "|\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args =", "} # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for keys in keys_to_value])", "print('Top best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp", "= AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global']", "print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness',", "= df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean", "fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ #", "None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean", "v result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names]", "|[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args", "global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean", "df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i", "pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best", "TOP_N = 15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]*", "default=\"config.yaml\", type=str, help=\"Configuration file.\") args = parser.parse_args() f = open(args.config_file) config = yaml.load(f,Loader=loader)", "* from lib.utils import * TOP_N = 15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float',", "ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness global'] = df.iloc[-1]['Best fitness global'] result_df.loc[i,'Best fitness']", "global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean", "fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i:", "from concurrent.futures import ProcessPoolExecutor import itertools import yaml import sys import copy import", "fitness'] = df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top best fitness') with", "fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names)", "lib.constants import * from lib.utils import * TOP_N = 15 loader = yaml.SafeLoader", "= df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness']", "f = open(args.config_file) config = yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\":", "import itertools import yaml import sys import copy import numpy as np import", "df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i += 1 result_df['eid']=pd.to_numeric(result_df['eid']) # print('Top", "keys, v in zip(keys_to_value,combination): tmp = config for k in keys[:-1]: tmp =", "{ 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\":", "None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness", "ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] = combination result_df.loc[i,'Best fitness", "result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] =", "best fitness') with pd.option_context('display.max_rows', None, 'display.max_columns', None): # print(result_df) pd.set_option('display.expand_frame_repr', False) tmp =", "list(u'-+0123456789.')) parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args = parser.parse_args() f", "a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for i in", "global'] result_df.loc[i,'Best fitness'] = df.iloc[-1]['Best fitness'] result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness']", "['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\\ # sort_values(by=[('Mean fitness','mean')],ascending=True).reset_index()[list(set(to_update.keys())-{'eid'})+['Best fitness','Mean fitness']].head(TOP_N))", "result_df.loc[i,keys[-1]] = v ac = AntColony(pheromony_kwargs=config['pheromony_policies'][config['parameters']['pheromony_policy']], selection_policy_kwargs=config['selection'], **config['parameters']) df = ac.load_results() result_df.loc[i,parameters_names] =", "args = parser.parse_args() f = open(args.config_file) config = yaml.load(f,Loader=loader) to_search = { 'pheromony_policies':", "= yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''', re.X), list(u'-+0123456789.'))", "\"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1]", "yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]},", "fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst", "i in keys_to_value] i = 0 for combination in combinations: for keys, v", "agg({i: ['mean','std'] for i in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness',", "parameters_names = [i[-1] for i in keys_to_value] i = 0 for combination in", "for i in keys_to_value] i = 0 for combination in combinations: for keys,", "result_df.loc[i,'Mean fitness'] = df.iloc[-1]['Mean fitness'] result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] =", "[3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df", "import * TOP_N = 15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)", "[75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } #", "= 0 for combination in combinations: for keys, v in zip(keys_to_value,combination): tmp =", "result_df.loc[i,'Median fitness'] = df.iloc[-1]['Median fitness'] result_df.loc[i,'Worst fitness'] = df.iloc[-1]['Worst fitness'] i += 1", "v in zip(keys_to_value,combination): tmp = config for k in keys[:-1]: tmp = tmp[k]", "= open(args.config_file) config = yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75,", "fitness','Worst fitness', 'eid'})).\\ agg({i: ['mean','std'] for i in {'Best fitness global', 'Best fitness','Mean", "print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best fitness','Mean fitness', 'eid'})).\\ # agg({i: ['mean','median','std'] for i", "15 loader = yaml.SafeLoader loader.add_implicit_resolver( u'tag:yaml.org,2002:float', re.compile(u'''^(?: [-+]?(?:[0-9][0-9_]*)\\\\.[0-9_]*(?:[eE][-+]?[0-9]+)? |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) |\\\\.[0-9_]+(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\\\.[0-9_]* |[-+]?\\\\.(?:inf|Inf|INF) |\\\\.(?:nan|NaN|NAN))$''',", "= yaml.load(f,Loader=loader) to_search = { 'pheromony_policies': {'AntSystem':{\"rho\": [0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\":", "0 for combination in combinations: for keys, v in zip(keys_to_value,combination): tmp = config", "keys_to_value, combinations=utils.get_names_combinations(config,to_search) result_df = pd.DataFrame(columns= [keys[-1] for keys in keys_to_value]) parameters_names = [i[-1]", "[0.3,0.5,0.7], \"Q\": [75, 100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))},", "'eid'})).\\ agg({i: ['mean','std'] for i in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst", "# print(result_df) pd.set_option('display.expand_frame_repr', False) tmp = copy.copy(parameters_names) tmp.remove('eid') a=result_df.groupby(list(set(result_df.columns)-{'Best fitness global','Best fitness','Mean fitness','Median", "\"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid'] keys_to_value, combinations=utils.get_names_combinations(config,to_search)", "['mean','std'] for i in {'Best fitness global', 'Best fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\", "fitness global','Best fitness','Mean fitness','Median fitness','Worst fitness',]].head(TOP_N) open(f\"../doc/{config['parameters']['instance_name']}_output.tex\",'w').write(a.to_latex()) # print('Top mean fitness') # print(result_df.groupby(list(set(result_df.columns)-{'Best", "100, 125]}}, \"selection\":{\"beta\": [3,5,7]}, 'parameters':{ # \"instance_name\": ['lau15','sgb128'], \"eid\": list(range(1,NUM_EXECUTIONS+1))}, } # parameters_names=['rho','Q','betas','eid']", "= config for k in keys[:-1]: tmp = tmp[k] tmp[keys[-1]] = v result_df.loc[i,keys[-1]]", "parser = argparse.ArgumentParser() parser.add_argument('--config_file','-c', default=\"config.yaml\", type=str, help=\"Configuration file.\") args = parser.parse_args() f =", "for keys, v in zip(keys_to_value,combination): tmp = config for k in keys[:-1]: tmp", "'eid'})).\\ # agg({i: ['mean','median','std'] for i in {'Best fitness','Mean fitness', 'eid'}}).\\ # sort_values(by=[('Mean", "fitness','Mean fitness','Median fitness','Worst fitness', 'eid'}}).\\ sort_values(by=[('Best fitness global','mean')],ascending=True).reset_index()[tmp+['Best fitness global','Best fitness','Mean fitness','Median fitness','Worst" ]
[ "This file is auto-generated by the root Makefile. Do not edit manually. version", "# This file is auto-generated by the root Makefile. Do not edit manually.", "is auto-generated by the root Makefile. Do not edit manually. version = \"0.4.2\"", "file is auto-generated by the root Makefile. Do not edit manually. version =" ]
[ "from pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from", "Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float val_duration: float pose2d_pck:", "from dataclasses import dataclass from pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from", "float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults = None", "val_duration: float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults =", "Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf:", "= None pose3d: Pose3DValidationResults = None orientation: OrientationValidationResults = None env_position: EnvPositionValidationResults =", "from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults", "import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import", "pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results", "OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults", "pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults = None orientation: OrientationValidationResults = None env_position:", "from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float", "import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float val_duration: float", "dataclasses import dataclass from pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results", "import dataclass from pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import", "Pose2DValidationConfResults = None pose3d: Pose3DValidationResults = None orientation: OrientationValidationResults = None env_position: EnvPositionValidationResults", "from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass()", "pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class", "Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults = None orientation: OrientationValidationResults", "from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from", "ValidationResults(object): loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults = None", "= None pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults = None orientation: OrientationValidationResults =", "Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object): loss:", "<gh_stars>1-10 from dataclasses import dataclass from pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults", "import Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults = None", "dataclass from pedrec.models.validation.env_position_validation_results import EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults", "loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults = None pose3d:", "pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults = None orientation:", "float val_duration: float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults", "class ValidationResults(object): loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults =", "EnvPositionValidationResults from pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults", "pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults =", "import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object):", "pedrec.models.validation.orientation_validation_results import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results", "pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import Pose3DValidationResults @dataclass() class ValidationResults(object): loss: float val_duration:", "@dataclass() class ValidationResults(object): loss: float val_duration: float pose2d_pck: Pose2DValidationPCKResults = None pose2d_conf: Pose2DValidationConfResults", "None pose3d: Pose3DValidationResults = None orientation: OrientationValidationResults = None env_position: EnvPositionValidationResults = None", "None pose2d_conf: Pose2DValidationConfResults = None pose3d: Pose3DValidationResults = None orientation: OrientationValidationResults = None", "import OrientationValidationResults from pedrec.models.validation.pose_2d_validation_conf_results import Pose2DValidationConfResults from pedrec.models.validation.pose_2d_validation_pck_results import Pose2DValidationPCKResults from pedrec.models.validation.pose_3d_validation_results import" ]
[ "None self.remove_old = rem mixer.init() def to_mp3(self, txt, language=None): if not language: language", "Wed May 9 2018 @author: Glu(<NAME> \"\"\" import os from random import randint", "DetectorFactory DetectorFactory.seed = 0 class Voice_Synth: def __init__(self, rem=False): self.tts = None self.last_path", "synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек!", "synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth = Voice_Synth() while True: print(\"Если хотите выйти", "Voice_Synth() while True: print(\"Если хотите выйти из программы - введите 'q'. Иначе -", "mixer import time from gtts import gTTS from langdetect import detect_langs from langdetect", "дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else:", "rem=False): self.tts = None self.last_path = None self.remove_old = rem mixer.init() def to_mp3(self,", "def play_mp3(self, path=None): if not path: path = self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy():", "key=lambda x: x.prob).lang self.tts = gTTS(text=txt, lang=language) def save_mp3(self, path): if self.tts: self.tts.save(path)", "пустую строку\") if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt =", "= None self.last_path = None self.remove_old = rem mixer.init() def to_mp3(self, txt, language=None):", "Glu(<NAME> \"\"\" import os from random import randint from pygame import mixer import", "__init__(self, rem=False): self.tts = None self.last_path = None self.remove_old = rem mixer.init() def", "path): if self.tts: self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path)", "class Voice_Synth: def __init__(self, rem=False): self.tts = None self.last_path = None self.remove_old =", "print(\"Что-то пошло не так:\", e) print(\"Введите другой путь, по которому я сохраню mp3\")", "print(\"Хотите просмотреть возможности программы?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth =", "import time from gtts import gTTS from langdetect import detect_langs from langdetect import", "if \"y\" in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя дела?')", "путь, по которому я сохраню mp3\") path = input(\">>\") print(\"Проиграть Вам mp3 файл?\")", "on Wed May 9 2018 @author: Glu(<NAME> \"\"\" import os from random import", "= max(detect_langs(txt), key=lambda x: x.prob).lang self.tts = gTTS(text=txt, lang=language) def save_mp3(self, path): if", "in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3()", "сохраню mp3\") path = input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer = input(\"[y/n]:\") if", "'*'- я не знаю)?\") language = input(\">>\") if \"*\" in language: language =", "0 class Voice_Synth: def __init__(self, rem=False): self.tts = None self.last_path = None self.remove_old", "self.tts = gTTS(text=txt, lang=language) def save_mp3(self, path): if self.tts: self.tts.save(path) if self.remove_old: if", "= input(\">>\") while True: try: synth.save_mp3(path) break except BaseException as e: print(\"Что-то пошло", "\"*\" in language: language = None synth.to_mp3(txt, language) print(\"Введите путь, по которому я", "in language: language = None synth.to_mp3(txt, language) print(\"Введите путь, по которому я сохраню", "== \"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt = input(\">>\") print(\"Какой это язык", "to_mp3(self, txt, language=None): if not language: language = max(detect_langs(txt), key=lambda x: x.prob).lang self.tts", "synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth", "synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt = input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'-", "self.remove_old = rem mixer.init() def to_mp3(self, txt, language=None): if not language: language =", "time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\"", "path = self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам", "\"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt = input(\">>\") print(\"Какой это язык ('ru'-русский,", "if not language: language = max(detect_langs(txt), key=lambda x: x.prob).lang self.tts = gTTS(text=txt, lang=language)", "while True: try: synth.save_mp3(path) break except BaseException as e: print(\"Что-то пошло не так:\",", "path: path = self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что", "import mixer import time from gtts import gTTS from langdetect import detect_langs from", "name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите просмотреть возможности программы?\") answer =", "скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth = Voice_Synth() while True: print(\"Если", "txt = input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'- ангийский, '*'- я не знаю)?\")", "программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите просмотреть возможности программы?\")", "break print(\"Введите текст:\") txt = input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'- ангийский, '*'-", "print(\"Какой это язык ('ru'-русский, 'en'- ангийский, '*'- я не знаю)?\") language = input(\">>\")", "print(\"Если хотите выйти из программы - введите 'q'. Иначе - пустую строку\") if", "Voice_Synth: def __init__(self, rem=False): self.tts = None self.last_path = None self.remove_old = rem", "язык ('ru'-русский, 'en'- ангийский, '*'- я не знаю)?\") language = input(\">>\") if \"*\"", "import DetectorFactory DetectorFactory.seed = 0 class Voice_Synth: def __init__(self, rem=False): self.tts = None", "randint from pygame import mixer import time from gtts import gTTS from langdetect", "которому я сохраню mp3\") path = input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer =", "mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия программа\", language=\"ru\")", "программы?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек!", "= None synth.to_mp3(txt, language) print(\"Введите путь, по которому я сохраню mp3\") path =", "gTTS from langdetect import detect_langs from langdetect import DetectorFactory DetectorFactory.seed = 0 class", "self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path", "synth.play_mp3() break print(\"Введите текст:\") txt = input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'- ангийский,", "while True: print(\"Если хотите выйти из программы - введите 'q'. Иначе - пустую", "input(\">>\") if \"*\" in language: language = None synth.to_mp3(txt, language) print(\"Введите путь, по", "я сохраню mp3\") path = input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer = input(\"[y/n]:\")", "mp3\") path = input(\">>\") while True: try: synth.save_mp3(path) break except BaseException as e:", "другой путь, по которому я сохраню mp3\") path = input(\">>\") print(\"Проиграть Вам mp3", "self.save_mp3(name) pass def main(): print(\"Хотите просмотреть возможности программы?\") answer = input(\"[y/n]:\") if \"y\"", "= Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До", "import os from random import randint from pygame import mixer import time from", "x.prob).lang self.tts = gTTS(text=txt, lang=language) def save_mp3(self, path): if self.tts: self.tts.save(path) if self.remove_old:", "x: x.prob).lang self.tts = gTTS(text=txt, lang=language) def save_mp3(self, path): if self.tts: self.tts.save(path) if", "mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия программа\", language=\"ru\") name =", "import detect_langs from langdetect import DetectorFactory DetectorFactory.seed = 0 class Voice_Synth: def __init__(self,", "kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass", "= path def play_mp3(self, path=None): if not path: path = self.last_path mixer.music.load(path) mixer.music.play()", "synth.kill() synth.play_mp3() else: synth = Voice_Synth() while True: print(\"Если хотите выйти из программы", "<reponame>2017Kirill2017/Python_Audio_Synth<gh_stars>0 # -*- coding: utf-8 -*- \"\"\" Created on Wed May 9 2018", "def save_mp3(self, path): if self.tts: self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit", "= input(\"[y/n]:\") if \"y\" in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как у", "from langdetect import detect_langs from langdetect import DetectorFactory DetectorFactory.seed = 0 class Voice_Synth:", "if not path: path = self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self):", "возможности программы?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет,", "gTTS(text=txt, lang=language) def save_mp3(self, path): if self.tts: self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path)", "сохраню mp3\") path = input(\">>\") while True: try: synth.save_mp3(path) break except BaseException as", "self.tts: self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path =", "по которому я сохраню mp3\") path = input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer", "= rem mixer.init() def to_mp3(self, txt, language=None): if not language: language = max(detect_langs(txt),", "not language: language = max(detect_langs(txt), key=lambda x: x.prob).lang self.tts = gTTS(text=txt, lang=language) def", "time from gtts import gTTS from langdetect import detect_langs from langdetect import DetectorFactory", "pygame import mixer import time from gtts import gTTS from langdetect import detect_langs", "тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3()", "из программы - введите 'q'. Иначе - пустую строку\") if input(\">>\").lower() == \"q\":", "понравилась сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите просмотреть", "synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth = Voice_Synth()", "break except BaseException as e: print(\"Что-то пошло не так:\", e) print(\"Введите другой путь,", "- пустую строку\") if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt", "= input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower():", "synth.play_mp3() synth.kill() synth.play_mp3() else: synth = Voice_Synth() while True: print(\"Если хотите выйти из", "print(\"Проиграть Вам mp3 файл?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth.play_mp3() pass", "main(): print(\"Хотите просмотреть возможности программы?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth", "строку\") if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt = input(\">>\")", "я сохраню mp3\") path = input(\">>\") while True: try: synth.save_mp3(path) break except BaseException", "путь, по которому я сохраню mp3\") path = input(\">>\") while True: try: synth.save_mp3(path)", "# -*- coding: utf-8 -*- \"\"\" Created on Wed May 9 2018 @author:", "language=None): if not language: language = max(detect_langs(txt), key=lambda x: x.prob).lang self.tts = gTTS(text=txt,", "input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth.play_mp3()", "os from random import randint from pygame import mixer import time from gtts", "True: try: synth.save_mp3(path) break except BaseException as e: print(\"Что-то пошло не так:\", e)", "May 9 2018 @author: Glu(<NAME> \"\"\" import os from random import randint from", "2018 @author: Glu(<NAME> \"\"\" import os from random import randint from pygame import", "введите 'q'. Иначе - пустую строку\") if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break", "e: print(\"Что-то пошло не так:\", e) print(\"Введите другой путь, по которому я сохраню", "просмотреть возможности программы?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth = Voice_Synth(True)", "это язык ('ru'-русский, 'en'- ангийский, '*'- я не знаю)?\") language = input(\">>\") if", "ангийский, '*'- я не знаю)?\") language = input(\">>\") if \"*\" in language: language", "print(\"Введите другой путь, по которому я сохраню mp3\") path = input(\">>\") print(\"Проиграть Вам", "answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth.play_mp3() pass if __name__ == \"__main__\":", "mp3 файл?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth.play_mp3() pass if __name__", "synth.save_mp3(path) break except BaseException as e: print(\"Что-то пошло не так:\", e) print(\"Введите другой", "-*- \"\"\" Created on Wed May 9 2018 @author: Glu(<NAME> \"\"\" import os", "synth = Voice_Synth() while True: print(\"Если хотите выйти из программы - введите 'q'.", "if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def play_mp3(self, path=None):", "coding: utf-8 -*- \"\"\" Created on Wed May 9 2018 @author: Glu(<NAME> \"\"\"", "random import randint from pygame import mixer import time from gtts import gTTS", "('ru'-русский, 'en'- ангийский, '*'- я не знаю)?\") language = input(\">>\") if \"*\" in", "None synth.to_mp3(txt, language) print(\"Введите путь, по которому я сохраню mp3\") path = input(\">>\")", "@author: Glu(<NAME> \"\"\" import os from random import randint from pygame import mixer", "что вам понравилась сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main():", "language) print(\"Введите путь, по которому я сохраню mp3\") path = input(\">>\") while True:", "сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите просмотреть возможности", "не так:\", e) print(\"Введите другой путь, по которому я сохраню mp3\") path =", "так:\", e) print(\"Введите другой путь, по которому я сохраню mp3\") path = input(\">>\")", "встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth = Voice_Synth() while True: print(\"Если хотите", "= \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите просмотреть возможности программы?\") answer = input(\"[y/n]:\")", "path def play_mp3(self, path=None): if not path: path = self.last_path mixer.music.load(path) mixer.music.play() while", "= Voice_Synth() while True: print(\"Если хотите выйти из программы - введите 'q'. Иначе", "gtts import gTTS from langdetect import detect_langs from langdetect import DetectorFactory DetectorFactory.seed =", "self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def play_mp3(self, path=None): if", "DetectorFactory.seed = 0 class Voice_Synth: def __init__(self, rem=False): self.tts = None self.last_path =", "synth.to_mp3(txt, language) print(\"Введите путь, по которому я сохраню mp3\") path = input(\">>\") while", "self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def play_mp3(self,", "До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth = Voice_Synth() while True:", "import gTTS from langdetect import detect_langs from langdetect import DetectorFactory DetectorFactory.seed = 0", "else: synth = Voice_Synth() while True: print(\"Если хотите выйти из программы - введите", "Created on Wed May 9 2018 @author: Glu(<NAME> \"\"\" import os from random", "mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия программа\",", "import randint from pygame import mixer import time from gtts import gTTS from", "language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите просмотреть возможности программы?\") answer", "текст:\") txt = input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'- ангийский, '*'- я не", "\"y\" in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\")", "path = input(\">>\") while True: try: synth.save_mp3(path) break except BaseException as e: print(\"Что-то", "txt, language=None): if not language: language = max(detect_langs(txt), key=lambda x: x.prob).lang self.tts =", "= gTTS(text=txt, lang=language) def save_mp3(self, path): if self.tts: self.tts.save(path) if self.remove_old: if self.last_path:", "True: print(\"Если хотите выйти из программы - введите 'q'. Иначе - пустую строку\")", "mixer.init() def to_mp3(self, txt, language=None): if not language: language = max(detect_langs(txt), key=lambda x:", "def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name)", "synth.play_mp3() else: synth = Voice_Synth() while True: print(\"Если хотите выйти из программы -", "if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def play_mp3(self, path=None): if not path: path =", "def to_mp3(self, txt, language=None): if not language: language = max(detect_langs(txt), key=lambda x: x.prob).lang", "я не знаю)?\") language = input(\">>\") if \"*\" in language: language = None", "\"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите просмотреть возможности программы?\") answer = input(\"[y/n]:\") if", "Иначе - пустую строку\") if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\")", "max(detect_langs(txt), key=lambda x: x.prob).lang self.tts = gTTS(text=txt, lang=language) def save_mp3(self, path): if self.tts:", "= input(\"[y/n]:\") if \"y\" in answer.lower(): synth.play_mp3() pass if __name__ == \"__main__\": main()", "print(\"Введите текст:\") txt = input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'- ангийский, '*'- я", "input(\"[y/n]:\") if \"y\" in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя", "path=None): if not path: path = self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def", "e) print(\"Введите другой путь, по которому я сохраню mp3\") path = input(\">>\") print(\"Проиграть", "'q'. Иначе - пустую строку\") if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break print(\"Введите", "lang=language) def save_mp3(self, path): if self.tts: self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop", "try: synth.save_mp3(path) break except BaseException as e: print(\"Что-то пошло не так:\", e) print(\"Введите", "self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия", "language = None synth.to_mp3(txt, language) print(\"Введите путь, по которому я сохраню mp3\") path", "программы - введите 'q'. Иначе - пустую строку\") if input(\">>\").lower() == \"q\": synth.kill()", "language = max(detect_langs(txt), key=lambda x: x.prob).lang self.tts = gTTS(text=txt, lang=language) def save_mp3(self, path):", "detect_langs from langdetect import DetectorFactory DetectorFactory.seed = 0 class Voice_Synth: def __init__(self, rem=False):", "None self.last_path = None self.remove_old = rem mixer.init() def to_mp3(self, txt, language=None): if", "language: language = max(detect_langs(txt), key=lambda x: x.prob).lang self.tts = gTTS(text=txt, lang=language) def save_mp3(self,", "человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\")", "человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth = Voice_Synth() while", "которому я сохраню mp3\") path = input(\">>\") while True: try: synth.save_mp3(path) break except", "def __init__(self, rem=False): self.tts = None self.last_path = None self.remove_old = rem mixer.init()", "langdetect import detect_langs from langdetect import DetectorFactory DetectorFactory.seed = 0 class Voice_Synth: def", "Вам mp3 файл?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth.play_mp3() pass if", "= None self.remove_old = rem mixer.init() def to_mp3(self, txt, language=None): if not language:", "-*- coding: utf-8 -*- \"\"\" Created on Wed May 9 2018 @author: Glu(<NAME>", "from random import randint from pygame import mixer import time from gtts import", "rem mixer.init() def to_mp3(self, txt, language=None): if not language: language = max(detect_langs(txt), key=lambda", "synth.to_mp3('Привет, человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!')", "выйти из программы - введите 'q'. Иначе - пустую строку\") if input(\">>\").lower() ==", "знаю)?\") language = input(\">>\") if \"*\" in language: language = None synth.to_mp3(txt, language)", "if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt = input(\">>\") print(\"Какой", "хотите выйти из программы - введите 'q'. Иначе - пустую строку\") if input(\">>\").lower()", "langdetect import DetectorFactory DetectorFactory.seed = 0 class Voice_Synth: def __init__(self, rem=False): self.tts =", "if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def", "def main(): print(\"Хотите просмотреть возможности программы?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower():", "не знаю)?\") language = input(\">>\") if \"*\" in language: language = None synth.to_mp3(txt,", "language = input(\">>\") if \"*\" in language: language = None synth.to_mp3(txt, language) print(\"Введите", "os.remove(self.last_path) self.last_path = path def play_mp3(self, path=None): if not path: path = self.last_path", "utf-8 -*- \"\"\" Created on Wed May 9 2018 @author: Glu(<NAME> \"\"\" import", "mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def play_mp3(self, path=None): if not path:", "9 2018 @author: Glu(<NAME> \"\"\" import os from random import randint from pygame", "path = input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer = input(\"[y/n]:\") if \"y\" in", "\"\"\" import os from random import randint from pygame import mixer import time", "\"\"\" Created on Wed May 9 2018 @author: Glu(<NAME> \"\"\" import os from", "input(\">>\") while True: try: synth.save_mp3(path) break except BaseException as e: print(\"Что-то пошло не", "self.last_path = None self.remove_old = rem mixer.init() def to_mp3(self, txt, language=None): if not", "save_mp3(self, path): if self.tts: self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)):", "play_mp3(self, path=None): if not path: path = self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1)", "self.last_path = path def play_mp3(self, path=None): if not path: path = self.last_path mixer.music.load(path)", "while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась сия программа\", language=\"ru\") name", "print(\"Введите путь, по которому я сохраню mp3\") path = input(\">>\") while True: try:", "as e: print(\"Что-то пошло не так:\", e) print(\"Введите другой путь, по которому я", "пошло не так:\", e) print(\"Введите другой путь, по которому я сохраню mp3\") path", "файл?\") answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth.play_mp3() pass if __name__ ==", "self.to_mp3(\"Надеюсь, что вам понравилась сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def", "from gtts import gTTS from langdetect import detect_langs from langdetect import DetectorFactory DetectorFactory.seed", "= 0 class Voice_Synth: def __init__(self, rem=False): self.tts = None self.last_path = None", "'en'- ангийский, '*'- я не знаю)?\") language = input(\">>\") if \"*\" in language:", "answer = input(\"[y/n]:\") if \"y\" in answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как", "= self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь, что вам понравилась", "по которому я сохраню mp3\") path = input(\">>\") while True: try: synth.save_mp3(path) break", "input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'- ангийский, '*'- я не знаю)?\") language =", "if self.tts: self.tts.save(path) if self.remove_old: if self.last_path: mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path", "mp3\") path = input(\">>\") print(\"Проиграть Вам mp3 файл?\") answer = input(\"[y/n]:\") if \"y\"", "answer.lower(): synth = Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока,", "Voice_Synth(True) synth.to_mp3('Привет, человек! Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой", "from pygame import mixer import time from gtts import gTTS from langdetect import", "from langdetect import DetectorFactory DetectorFactory.seed = 0 class Voice_Synth: def __init__(self, rem=False): self.tts", "mixer.music.load(path) mixer.stop mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def play_mp3(self, path=None): if not", "synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill() synth.play_mp3() else: synth =", "= input(\">>\") print(\"Какой это язык ('ru'-русский, 'en'- ангийский, '*'- я не знаю)?\") language", "= input(\">>\") if \"*\" in language: language = None synth.to_mp3(txt, language) print(\"Введите путь,", "BaseException as e: print(\"Что-то пошло не так:\", e) print(\"Введите другой путь, по которому", "у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3() synth.kill()", "вам понравилась сия программа\", language=\"ru\") name = \"Bye\"+str(randint(0,999999))+\".sn.mp3\" self.save_mp3(name) pass def main(): print(\"Хотите", "language: language = None synth.to_mp3(txt, language) print(\"Введите путь, по которому я сохраню mp3\")", "except BaseException as e: print(\"Что-то пошло не так:\", e) print(\"Введите другой путь, по", "mixer.quit if(os.path.exists(self.last_path)): os.remove(self.last_path) self.last_path = path def play_mp3(self, path=None): if not path: path", "if \"*\" in language: language = None synth.to_mp3(txt, language) print(\"Введите путь, по которому", "- введите 'q'. Иначе - пустую строку\") if input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3()", "not path: path = self.last_path mixer.music.load(path) mixer.music.play() while mixer.music.get_busy(): time.sleep(0.1) def kill(self): self.to_mp3(\"Надеюсь,", "Как у тебя дела?') synth.save_mp3(\"1.mp3\") synth.play_mp3() synth.to_mp3('Пока, человек! До скорой встречи!') synth.save_mp3(\"2.mp3\") synth.play_mp3()", "pass def main(): print(\"Хотите просмотреть возможности программы?\") answer = input(\"[y/n]:\") if \"y\" in", "self.tts = None self.last_path = None self.remove_old = rem mixer.init() def to_mp3(self, txt,", "input(\">>\").lower() == \"q\": synth.kill() synth.play_mp3() break print(\"Введите текст:\") txt = input(\">>\") print(\"Какой это" ]
[]
[ "exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0: raise ValidationError(", "NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0: raise ValidationError( f\"There is a NoExam record", "def validate_noexam(exam_pk): from .models import Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course,", "from .models import Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if", "if qs.count() > 0: raise ValidationError( f\"There is a NoExam record for {exam}.\",", "validate_noexam(exam_pk): from .models import Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period)", "NoExam exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0: raise", "import ValidationError def validate_noexam(exam_pk): from .models import Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs", "import Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() >", "period=exam.period) if qs.count() > 0: raise ValidationError( f\"There is a NoExam record for", "qs.count() > 0: raise ValidationError( f\"There is a NoExam record for {exam}.\", params={\"exam\":", "> 0: raise ValidationError( f\"There is a NoExam record for {exam}.\", params={\"exam\": exam}", "Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0: raise ValidationError( f\"There is", ".models import Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count()", "Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0:", "django.core.exceptions import ValidationError def validate_noexam(exam_pk): from .models import Exam, NoExam exam = Exam.objects.get(pk=exam_pk)", "0: raise ValidationError( f\"There is a NoExam record for {exam}.\", params={\"exam\": exam} )", "= NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0: raise ValidationError( f\"There is a NoExam", "<gh_stars>0 from django.core.exceptions import ValidationError def validate_noexam(exam_pk): from .models import Exam, NoExam exam", "= Exam.objects.get(pk=exam_pk) qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0: raise ValidationError( f\"There", "qs = NoExam.objects.filter(course=exam.offering.course, period=exam.period) if qs.count() > 0: raise ValidationError( f\"There is a", "from django.core.exceptions import ValidationError def validate_noexam(exam_pk): from .models import Exam, NoExam exam =", "ValidationError def validate_noexam(exam_pk): from .models import Exam, NoExam exam = Exam.objects.get(pk=exam_pk) qs =" ]
[ "'<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest'", "TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL = False", "ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow for arbitrary number of", "= '<PASSWORD>' # TODO: allow for arbitrary number of remote zones class FEDERATION(object):", "'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault'", "HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL = False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD =", "LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT", "False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow for arbitrary number", "= socket.gethostname() USE_SSL = False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO:", "USE_SSL = False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow for", "of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION = (4,", "REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE = 64*1024*1024 TEST_FILE_COUNT", "= 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE =", "arbitrary number of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION", "False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL = False ICAT_HOSTNAME =", "= (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR =", "= '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE =", "'<PASSWORD>' # TODO: allow for arbitrary number of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION", "HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL = False ICAT_HOSTNAME = socket.gethostname()", "0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE =", "= 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT =", "= HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL = False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD", "2, 0) REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST =", "socket.gethostname() USE_SSL = False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow", "IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE", "import socket import os RUN_IN_TOPOLOGY = False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2", "False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL =", "socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow for arbitrary number of remote zones", "'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024", "0) REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = []", "HOSTNAME_3 = socket.gethostname() USE_SSL = False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' #", "REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE = 64*1024*1024 TEST_FILE_COUNT = 300 MAX_THREADS", "= HOSTNAME_3 = socket.gethostname() USE_SSL = False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>'", "REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE", "[('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE =", "socket import os RUN_IN_TOPOLOGY = False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2 =", "FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST =", "TODO: allow for arbitrary number of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4,", "2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE", "number of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION =", "= socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow for arbitrary number of remote", "<filename>tests/pydevtest/configuration.py import socket import os RUN_IN_TOPOLOGY = False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 =", "RUN_IN_TOPOLOGY = False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname()", "for arbitrary number of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0)", "(4, 2, 0) REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST", "RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev'", "'/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE = 64*1024*1024 TEST_FILE_COUNT = 300 MAX_THREADS = 16", "PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow for arbitrary number of remote zones class", "allow for arbitrary number of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2,", "zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION = (4, 2, 0)", "class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST", "'/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc'", "= False ICAT_HOSTNAME = socket.gethostname() PREEXISTING_ADMIN_PASSWORD = '<PASSWORD>' # TODO: allow for arbitrary", "= '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE = 64*1024*1024 TEST_FILE_COUNT = 300 MAX_THREADS =", "[] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest'", "remote zones class FEDERATION(object): LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION = (4, 2,", "= [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST =", "os RUN_IN_TOPOLOGY = False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 =", "= False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL", "# TODO: allow for arbitrary number of remote zones class FEDERATION(object): LOCAL_IRODS_VERSION =", "= 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE = 64*1024*1024 TEST_FILE_COUNT =", "'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE = 64*1024*1024 TEST_FILE_COUNT = 300", "= 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE =", "= False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3 = socket.gethostname() USE_SSL = False ICAT_HOSTNAME", "= (4, 2, 0) REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')]", "(4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS'", "REMOTE_ZONE = 'buntest' REMOTE_HOST = 'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE", "LOCAL_IRODS_VERSION = (4, 2, 0) REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper',", "REMOTE_IRODS_VERSION = (4, 2, 0) RODSUSER_NAME_PASSWORD_LIST = [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR", "= [('zonehopper', '<PASSWORD>')] RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE", "RODSADMIN_NAME_PASSWORD_LIST = [] IRODS_DIR = '/var/lib/irods/iRODS' LOCAL_ZONE = 'dev' REMOTE_ZONE = 'buntest' REMOTE_HOST", "import os RUN_IN_TOPOLOGY = False TOPOLOGY_FROM_RESOURCE_SERVER = False HOSTNAME_1 = HOSTNAME_2 = HOSTNAME_3", "'buntest' REMOTE_RESOURCE = 'demoResc' REMOTE_VAULT = '/var/lib/irods/iRODS/Vault' TEST_FILE_SIZE = 4*1024*1024 LARGE_FILE_SIZE = 64*1024*1024" ]
[ "as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key =", "in reader: json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False) jsonfile.write('\\n') jsonfile.close() csvfile.close()", "key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys = tuple(fieldnames1) reader = csv.DictReader(csvfile,", "pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv',", "pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys = tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for", "import json import codecs import pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile:", "json import codecs import pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile", "csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys", "row in reader: json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False) jsonfile.write('\\n') jsonfile.close()", "csv.DictReader(csvfile, keys) for row in reader: json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',', ': '),", "keys) for row in reader: json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False)", "reader = csv.DictReader(csvfile, keys) for row in reader: json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',',", "as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns", "encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys = tuple(fieldnames1) reader =", "for row in reader: json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False) jsonfile.write('\\n')", "codecs import pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w',", "<filename>python/csvToJson.py import csv import json import codecs import pandas as pd with codecs.open('E:/Demo/python/enum.csv',", "= csv.DictReader(csvfile, keys) for row in reader: json.dump(row, jsonfile, sort_keys=True, indent=2, separators=(',', ':", "= tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for row in reader: json.dump(row, jsonfile, sort_keys=True,", "= key.columns keys = tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for row in reader:", "fieldnames1 = key.columns keys = tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for row in", "import codecs import pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile =", "keys = tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for row in reader: json.dump(row, jsonfile,", "import pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8')", "with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk')", "csv import json import codecs import pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as", "key.columns keys = tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for row in reader: json.dump(row,", "codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1", "open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys = tuple(fieldnames1) reader", "= open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys = tuple(fieldnames1)", "= pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys = tuple(fieldnames1) reader = csv.DictReader(csvfile, keys)", "jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 = key.columns keys =", "pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key", "tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for row in reader: json.dump(row, jsonfile, sort_keys=True, indent=2,", "'r') as csvfile: jsonfile = open('E:/Demo/python/enum.json','w', encoding='utf-8') key = pd.read_csv('E:/Demo/python/enum.csv', encoding='gbk') fieldnames1 =", "import csv import json import codecs import pandas as pd with codecs.open('E:/Demo/python/enum.csv', 'r')", "encoding='gbk') fieldnames1 = key.columns keys = tuple(fieldnames1) reader = csv.DictReader(csvfile, keys) for row" ]
[ "send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_)", "if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout: user = User.get_by_name(receiver) user.status = \"no\"", "in receivers: thread = threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread) for thread in", "Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout: user = User.get_by_name(receiver) user.status = \"no\" send_all_need_update", "\"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message = Message(rcv_msg.content, rcv_msg.from_,", "user.save_or_update() if existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(),", "send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock)", "msg.to_message(receiver).save() return sock def broadcast(msg: Msg): receivers = msg.get_receiver_list() threads = [] for", "rcv_msg.__dict__) type_ = rcv_msg.type_ if type_ == MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content))", "def send(msg: Msg, receiver): global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to =", ")) thread.start() threads.append(thread) for thread in threads: thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg", "existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock)", "from ChatApp.models import Message, User from ChatApp.msg import Msg, MsgType from ChatApp.settings import", "def broadcast(msg: Msg): receivers = msg.get_receiver_list() threads = [] for receiver in receivers:", "receiver): global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if", "threading from socket import * from ChatApp.models import Message, User from ChatApp.msg import", "Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ ==", "type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST,", "addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL:", "[] for receiver in receivers: thread = threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread)", "!= existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name,", "type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update()", "rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if user: if user.status == \"yes\":", "Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try: test_sock", "= socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True: msg, addr = sock.recvfrom(2048) server_handle_received_msg(sock, msg,", "user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message =", "socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True: msg, addr = sock.recvfrom(2048) server_handle_received_msg(sock, msg, addr)", "send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg: Msg): receivers = msg.get_receiver_list()", "== MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(),", "user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user != user)", "if type_ == MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update", "type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return", "== MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg)", "Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update", "= Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update: user.save_or_update() if existing_user and user.addr !=", "return except timeout: pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user", "print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if type_ == MsgType.REG: user = User(rcv_msg.from_, addr[0],", "receiver, )) thread.start() threads.append(thread) for thread in threads: thread.join() def server_handle_received_msg(sock, msg, addr):", "True user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg: Msg): receivers = msg.get_receiver_list() threads =", "type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if user: if user.status == \"yes\": user.status", "server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True: msg, addr =", "timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if user: if user.status == \"yes\": user.status =", "user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port:", "user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock =", "msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update: user.save_or_update() if existing_user and user.addr", "sock def broadcast(msg: Msg): receivers = msg.get_receiver_list() threads = [] for receiver in", "\"\" msg.send(sock) if need_update: user.save_or_update() if existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_,", "ChatApp.models import Message, User from ChatApp.msg import Msg, MsgType from ChatApp.settings import DEBUG,", "addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ ==", "except timeout: pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user =", "msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status", "message.save() user = User.get_by_name(rcv_msg.to) if user: if user.status == \"yes\": user.status = \"no\"", "to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp())", "rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if", "test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_,", "= User.get_by_name(rcv_msg.to) if user: if user.status == \"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK,", "type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt =", "type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update =", "== \"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg)", "MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user !=", "= socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass", "msg.get_receiver_list() threads = [] for receiver in receivers: thread = threading.Thread(target=send, args=(msg, receiver,", "threads.append(thread) for thread in threads: thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg)", "rcv_msg.type_ if type_ == MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_)", "False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg", "existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE)", "import DEBUG, TIMEOUT from ChatApp.utils import get_timestamp def send(msg: Msg, receiver): global send_all_need_update", "to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_", "Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to)", "Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update: user.save_or_update() if existing_user and user.addr != existing_user.addr:", "type_ = rcv_msg.type_ if type_ == MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user", "broadcast(msg) def server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True: msg,", "DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if type_ == MsgType.REG: user = User(rcv_msg.from_,", "int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user != user) msg = Msg(to=rcv_msg.from_, addr=addr)", "type_ == MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update =", "sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_ ==", "user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK,", "= False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict", "user = User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout:", "= msg.get_receiver_list() threads = [] for receiver in receivers: thread = threading.Thread(target=send, args=(msg,", "if existing_user == None: msg.type_ = MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content =", "= User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg)", "if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG: user =", "User.get_by_name(rcv_msg.from_) need_update = (existing_user != user) msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user ==", "from ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils import get_timestamp def send(msg: Msg, receiver):", "except timeout: user = User.get_by_name(receiver) user.status = \"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save()", "Msg, MsgType from ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils import get_timestamp def send(msg:", "= Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try:", "import threading from socket import * from ChatApp.models import Message, User from ChatApp.msg", "= send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST,", "(existing_user != user) msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user == None: msg.type_ =", "if need_update: user.save_or_update() if existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg", "<reponame>xckomorebi/ChatApp<gh_stars>0 import threading from socket import * from ChatApp.models import Message, User from", "receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL: try: ack_msg", "ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout: user = User.get_by_name(receiver)", "in threads: thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg) if DEBUG: print(addr,", "addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user != user) msg = Msg(to=rcv_msg.from_,", "from ChatApp.utils import get_timestamp def send(msg: Msg, receiver): global send_all_need_update sock = socket(AF_INET,", "msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE:", "from ChatApp.msg import Msg, MsgType from ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils import", "to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt", "type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save()", "broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg =", "if existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name,", "pass except timeout: user = User.get_by_name(receiver) user.status = \"no\" send_all_need_update = True user.save_or_update()", "msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update: user.save_or_update() if", "= \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int):", "Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while", "if msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL: try: ack_msg = sock.recv(2048)", "Msg): receivers = msg.get_receiver_list() threads = [] for receiver in receivers: thread =", "type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False", "addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM)", "to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user", "sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout: user = User.get_by_name(receiver) user.status =", "= threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread) for thread in threads: thread.join() def", "def server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True: msg, addr", "= Msg(to=rcv_msg.from_, addr=addr) if existing_user == None: msg.type_ = MsgType.CREATED else: msg.type_ =", "= Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global send_all_need_update", "= User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user != user) msg", "addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server", "User.get_by_name(receiver) user.status = \"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg:", "receivers = msg.get_receiver_list() threads = [] for receiver in receivers: thread = threading.Thread(target=send,", "\"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif", "import Message, User from ChatApp.msg import Msg, MsgType from ChatApp.settings import DEBUG, TIMEOUT", "broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False", "user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_", "MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__,", "user) msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user == None: msg.type_ = MsgType.CREATED else:", "User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG:", "ChatApp.utils import get_timestamp def send(msg: Msg, receiver): global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM)", "MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ ==", "get_timestamp def send(msg: Msg, receiver): global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to", "== None: msg.type_ = MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or", "SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_", "elif type_ == MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048)", "msg.to = receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL:", "MsgType from ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils import get_timestamp def send(msg: Msg,", "import get_timestamp def send(msg: Msg, receiver): global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT)", "= sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout: user = User.get_by_name(receiver) user.status", "Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update()", "if user: if user.status == \"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg", "msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\",", "None: msg.type_ = MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\"", "MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update: user.save_or_update() if existing_user and", "= \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message = Message(rcv_msg.content,", "user.status = \"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg: Msg):", "int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True: msg, addr = sock.recvfrom(2048)", "timeout: pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to)", "broadcast(msg: Msg): receivers = msg.get_receiver_list() threads = [] for receiver in receivers: thread", "user = User.get_by_name(rcv_msg.to) if user: if user.status == \"yes\": user.status = \"no\" user.save_or_update()", "Msg(to=rcv_msg.from_, addr=addr) if existing_user == None: msg.type_ = MsgType.CREATED else: msg.type_ = MsgType.REG_ACK", "TIMEOUT from ChatApp.utils import get_timestamp def send(msg: Msg, receiver): global send_all_need_update sock =", "receiver in receivers: thread = threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread) for thread", "= True user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg: Msg): receivers = msg.get_receiver_list() threads", "thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_", "= \"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg: Msg): receivers", "= User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass", "message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if user:", "import Msg, MsgType from ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils import get_timestamp def", "User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user != user) msg =", "\"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else:", "ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils import get_timestamp def send(msg: Msg, receiver): global", "msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user == None: msg.type_ = MsgType.CREATED else: msg.type_", "send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for", "rcv_msg = Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if type_ ==", "rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to)", "Message, User from ChatApp.msg import Msg, MsgType from ChatApp.settings import DEBUG, TIMEOUT from", "ChatApp.msg import Msg, MsgType from ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils import get_timestamp", "test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status =", "global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG:", "from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try: test_sock =", "msg.send(sock) if need_update: user.save_or_update() if existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock)", "User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def", "server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_", "DEBUG, TIMEOUT from ChatApp.utils import get_timestamp def send(msg: Msg, receiver): global send_all_need_update sock", "= (existing_user != user) msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user == None: msg.type_", "addr=addr) if existing_user == None: msg.type_ = MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content", "MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_,", "in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ ==", "!= user) msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user == None: msg.type_ = MsgType.CREATED", "timeout: user = User.get_by_name(receiver) user.status = \"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return", "False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict in", "elif type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock)", "MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update:", "args=(msg, receiver, )) thread.start() threads.append(thread) for thread in threads: thread.join() def server_handle_received_msg(sock, msg,", "sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE:", "elif msg.type_ == MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass", "Msg, receiver): global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock)", "= User.get_by_name(rcv_msg.from_) need_update = (existing_user != user) msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user", "Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users():", "== MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout:", "elif type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server =", "= Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port))", "thread.start() threads.append(thread) for thread in threads: thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg =", "msg.type_ == MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except", "pass elif msg.type_ == MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK:", "Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock)", "= receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL: try:", "User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message", "Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock = socket(AF_INET,", "= Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if user: if", "return sock def broadcast(msg: Msg): receivers = msg.get_receiver_list() threads = [] for receiver", "\"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg: Msg): receivers =", "Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if user: if user.status", "threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread) for thread in threads: thread.join() def server_handle_received_msg(sock,", "MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout: user", "send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users())", "== MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user", "Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif", "else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK,", "receivers: thread = threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread) for thread in threads:", "* from ChatApp.models import Message, User from ChatApp.msg import Msg, MsgType from ChatApp.settings", "DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE)", "socket import * from ChatApp.models import Message, User from ChatApp.msg import Msg, MsgType", "MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ ==", "= Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if type_ == MsgType.REG:", "msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if", "msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_ == MsgType.SEND_ALL: global", "if user.status == \"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__,", "user.save_or_update() msg.to_message(receiver).save() return sock def broadcast(msg: Msg): receivers = msg.get_receiver_list() threads = []", "User.get_by_name(rcv_msg.to) if user: if user.status == \"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock)", "= [] for receiver in receivers: thread = threading.Thread(target=send, args=(msg, receiver, )) thread.start()", "if DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(),", "Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status = \"no\"", "user.status == \"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE)", "if DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if type_ == MsgType.REG: user =", "existing_user = User.get_by_name(rcv_msg.from_) need_update = (existing_user != user) msg = Msg(to=rcv_msg.from_, addr=addr) if", "addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save()", "= \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock)", "rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG: user", "def server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_ =", "Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\",", "user = User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE)", "== MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL: try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_", "for receiver in receivers: thread = threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread) for", "addr): rcv_msg = Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if type_", "msg.type_ = MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock)", "type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg", "rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user =", "= User.get_by_name(receiver) user.status = \"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return sock def", "print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg)", "pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if", "addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except", "\"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock", "== MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK, addr=addr).send(sock) msg =", "try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST:", "rcv_msg.from_, rcv_msg.to, type_=\"send\", timestamp=get_timestamp()) message.save() user = User.get_by_name(rcv_msg.to) if user: if user.status ==", "= test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status", "= MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update: user.save_or_update() if existing_user", "try: ack_msg = sock.recv(2048) if Msg.unpack(ack_msg).type_ == MsgType.SEND_ALL_ACK: pass except timeout: user =", "socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass elif", "Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if type_ == MsgType.REG: user", "for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update: msg = Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif", "from socket import * from ChatApp.models import Message, User from ChatApp.msg import Msg,", "= Msg(content=User.get_all(), type_=MsgType.UPDATE_TABLE) broadcast(msg) elif type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status =", "threads: thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__)", "User from ChatApp.msg import Msg, MsgType from ChatApp.settings import DEBUG, TIMEOUT from ChatApp.utils", "thread in threads: thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg) if DEBUG:", "== MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if Msg.unpack(rcv_pkt).type_", "broadcast(msg) Msg(content=User.get_all(), to=user.name, type_=MsgType.UPDATE_TABLE).send(sock) elif type_ == MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to),", "send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if msg.type_ ==", "= rcv_msg.type_ if type_ == MsgType.REG: user = User(rcv_msg.from_, addr[0], int(rcv_msg.content)) existing_user =", "= False broadcast(rcv_msg) if DEBUG: print(User.get_all_inactive_users()) for user_dict in User.get_all_inactive_users(): rcv_msg.to_message(user_dict.get(\"name\")).save() if send_all_need_update:", "type_=MsgType.UPDATE_TABLE) broadcast(msg) def server_main(port: int): sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True:", "MsgType.SEND_ALL_ACK: pass except timeout: user = User.get_by_name(receiver) user.status = \"no\" send_all_need_update = True", "broadcast(msg) elif type_ == MsgType.DEREG: user = User.get_by_name(rcv_msg.from_) user.status = \"no\" user.save_or_update() Msg(type_=MsgType.DEREG_ACK,", "Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status = \"yes\" user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send()", "else: msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if need_update: user.save_or_update()", "need_update: user.save_or_update() if existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg =", "and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg)", "global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver msg.send(sock) if msg.type_", "== MsgType.SEND_ALL_ACK: pass except timeout: user = User.get_by_name(receiver) user.status = \"no\" send_all_need_update =", "import * from ChatApp.models import Message, User from ChatApp.msg import Msg, MsgType from", "msg, addr): rcv_msg = Msg.unpack(msg) if DEBUG: print(addr, rcv_msg.__dict__) type_ = rcv_msg.type_ if", "type_ == MsgType.STORE: try: test_sock = send(Msg(type_=MsgType.TEST, to=rcv_msg.to), rcv_msg.to) rcv_pkt = test_sock.recv(2048) if", "user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg = Msg(content=user.__dict__, type_=MsgType.UPDATE_TABLE) broadcast(msg) else: Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) elif type_", "user: if user.status == \"yes\": user.status = \"no\" user.save_or_update() Msg(type_=MsgType.STORE_ACK, addr=addr).send(sock) msg =", "sock = socket(AF_INET, SOCK_DGRAM) sock.bind((\"\", port)) while True: msg, addr = sock.recvfrom(2048) server_handle_received_msg(sock,", "send(msg: Msg, receiver): global send_all_need_update sock = socket(AF_INET, SOCK_DGRAM) sock.settimeout(TIMEOUT) msg.to = receiver", "for thread in threads: thread.join() def server_handle_received_msg(sock, msg, addr): rcv_msg = Msg.unpack(msg) if", "or \"\" msg.send(sock) if need_update: user.save_or_update() if existing_user and user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT,", "= MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name) or \"\" msg.send(sock) if", "user = User.get_by_name(receiver) user.status = \"no\" send_all_need_update = True user.save_or_update() msg.to_message(receiver).save() return sock", "need_update = (existing_user != user) msg = Msg(to=rcv_msg.from_, addr=addr) if existing_user == None:", "user.addr != existing_user.addr: Msg(type_=MsgType.LOGOUT, to=rcv_msg.from_, addr=existing_user.addr).send(sock) msg = Msg(content=User.get_all(), from_=user.name, type_=MsgType.UPDATE_TABLE) broadcast(msg) Msg(content=User.get_all(),", "if Msg.unpack(rcv_pkt).type_ == MsgType.TEST: Msg(to=rcv_msg.from_, type_=MsgType.USER_EXIST, addr=addr).send(sock) user = User.get_by_name(rcv_msg.to) user.status = \"yes\"", "thread = threading.Thread(target=send, args=(msg, receiver, )) thread.start() threads.append(thread) for thread in threads: thread.join()", "threads = [] for receiver in receivers: thread = threading.Thread(target=send, args=(msg, receiver, ))", "user.save_or_update() Msg(content=User.get_all(), to=rcv_msg.from_, type_=MsgType.UPDATE_TABLE).send() return except timeout: pass message = Message(rcv_msg.content, rcv_msg.from_, rcv_msg.to,", "MsgType.SEND_ALL: global send_all_need_update send_all_need_update = False Msg(type_=MsgType.SEND_ALL_SERVER_ACK, addr=addr).send(sock) rcv_msg.to_server = False broadcast(rcv_msg) if", "existing_user == None: msg.type_ = MsgType.CREATED else: msg.type_ = MsgType.REG_ACK msg.content = Message.retrieve_by_name(user.name)", "msg.send(sock) if msg.type_ == MsgType.UPDATE_TABLE: pass elif msg.type_ == MsgType.SEND_ALL: try: ack_msg =" ]
[ "and the following disclaimer in the documentation # and/or other materials provided with", "# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "without # modification, are permitted provided that the following conditions are met: #", "# list of conditions and the following disclaimer. # # 2. Redistributions in", "this # list of conditions and the following disclaimer. # # 2. Redistributions", "in binary form must reproduce the above copyright notice, # this list of", "All rights reserved. # # Redistribution and use in source and binary forms,", "software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "continue if op_type == 'embedding_lookup': for big in [False, True]: for hit_rate_estimation in", "# this software without specific prior written permission. # # THIS SOFTWARE IS", "THE POSSIBILITY OF SUCH DAMAGE. import argparse from analysis.inference import infer if __name__", "[False, True]: for hit_rate_estimation in [False, True]: for fbgemm in [False, True]: infer(op_type,", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "# # Copyright (c) 2021, The Regents of the University of California, Davis", "[args.op_type] pass_list = ['backward' if args.backward else 'forward'] for op_type in op_list: for", "p in pass_list: if (op_type == 'fully_connected' or \\ op_type == 'transpose' or", "provided that the following conditions are met: # # 1. Redistributions of source", "code must retain the above copyright notice, this # list of conditions and", "True]: for fbgemm in [False, True]: infer(op_type, p=='backward', big=big, hit_rate_estimation=hit_rate_estimation, fbgemm=fbgemm) else: infer(op_type,", "are met: # # 1. Redistributions of source code must retain the above", "and/or other materials provided with the distribution. # # 3. Neither the name", "in [False, True]: for hit_rate_estimation in [False, True]: for fbgemm in [False, True]:", "__name__ == '__main__': parser = argparse.ArgumentParser('Get performance model error for ops.') parser.add_argument('--op-type', type=str,", "[False, True]: for fbgemm in [False, True]: infer(op_type, p=='backward', big=big, hit_rate_estimation=hit_rate_estimation, fbgemm=fbgemm) else:", "'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list", "'concat' or \\ op_type == 'memcpy') and \\ p == 'backward': # No", "# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING,", "permitted provided that the following conditions are met: # # 1. Redistributions of", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "# BSD 3-Clause License # # Copyright (c) 2021, The Regents of the", "for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args() if args.op_type", "this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "and \\ p == 'backward': # No backward for these ops continue if", "with or without # modification, are permitted provided that the following conditions are", "args.op_type == 'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn',", "following conditions are met: # # 1. Redistributions of source code must retain", "# # 1. Redistributions of source code must retain the above copyright notice,", "disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright", "endorse or promote products derived from # this software without specific prior written", "# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND", "# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "Redistributions of source code must retain the above copyright notice, this # list", "modification, are permitted provided that the following conditions are met: # # 1.", "USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY", "'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list = ['forward', 'backward'] else: op_list = [args.op_type]", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF", "rights reserved. # # Redistribution and use in source and binary forms, with", "ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args() if args.op_type ==", "or \\ op_type == 'memcpy') and \\ p == 'backward': # No backward", "permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "copyright holder nor the names of its # contributors may be used to", "action='store_true', default=False) args = parser.parse_args() if args.op_type == 'all': op_list = ['embedding_lookup', 'fully_connected',", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE,", "following disclaimer. # # 2. Redistributions in binary form must reproduce the above", "THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "for op_type in op_list: for p in pass_list: if (op_type == 'fully_connected' or", "reproduce the above copyright notice, # this list of conditions and the following", "the University of California, Davis # All rights reserved. # # Redistribution and", "argparse.ArgumentParser('Get performance model error for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args", "== 'fully_connected' or \\ op_type == 'transpose' or \\ op_type == 'concat' or", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "<gh_stars>0 # BSD 3-Clause License # # Copyright (c) 2021, The Regents of", "GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "if args.backward else 'forward'] for op_type in op_list: for p in pass_list: if", "['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list = ['forward', 'backward']", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from analysis.inference", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "(c) 2021, The Regents of the University of California, Davis # All rights", "or without # modification, are permitted provided that the following conditions are met:", "CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "op_list = [args.op_type] pass_list = ['backward' if args.backward else 'forward'] for op_type in", "args.backward else 'forward'] for op_type in op_list: for p in pass_list: if (op_type", "hit_rate_estimation in [False, True]: for fbgemm in [False, True]: infer(op_type, p=='backward', big=big, hit_rate_estimation=hit_rate_estimation,", "3-Clause License # # Copyright (c) 2021, The Regents of the University of", "without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE", "op_type == 'embedding_lookup': for big in [False, True]: for hit_rate_estimation in [False, True]:", "True]: for hit_rate_estimation in [False, True]: for fbgemm in [False, True]: infer(op_type, p=='backward',", "met: # # 1. Redistributions of source code must retain the above copyright", "3. Neither the name of the copyright holder nor the names of its", "or \\ op_type == 'concat' or \\ op_type == 'memcpy') and \\ p", "copyright notice, # this list of conditions and the following disclaimer in the", "\\ op_type == 'concat' or \\ op_type == 'memcpy') and \\ p ==", "BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #", "= ['forward', 'backward'] else: op_list = [args.op_type] pass_list = ['backward' if args.backward else", "Copyright (c) 2021, The Regents of the University of California, Davis # All", "1. Redistributions of source code must retain the above copyright notice, this #", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "= ['backward' if args.backward else 'forward'] for op_type in op_list: for p in", "'fully_connected' or \\ op_type == 'transpose' or \\ op_type == 'concat' or \\", "distribution. # # 3. Neither the name of the copyright holder nor the", "in op_list: for p in pass_list: if (op_type == 'fully_connected' or \\ op_type", "INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS", "this list of conditions and the following disclaimer in the documentation # and/or", "of conditions and the following disclaimer in the documentation # and/or other materials", "'backward'] else: op_list = [args.op_type] pass_list = ['backward' if args.backward else 'forward'] for", "type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args() if args.op_type == 'all': op_list", "list of conditions and the following disclaimer in the documentation # and/or other", "for p in pass_list: if (op_type == 'fully_connected' or \\ op_type == 'transpose'", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "name of the copyright holder nor the names of its # contributors may", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF", "conditions and the following disclaimer. # # 2. Redistributions in binary form must", "and use in source and binary forms, with or without # modification, are", "FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "Regents of the University of California, Davis # All rights reserved. # #", "# this list of conditions and the following disclaimer in the documentation #", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED", "\\ p == 'backward': # No backward for these ops continue if op_type", "conditions are met: # # 1. Redistributions of source code must retain the", "OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from analysis.inference import infer if", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #", "# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE", "of its # contributors may be used to endorse or promote products derived", "its # contributors may be used to endorse or promote products derived from", "== 'concat' or \\ op_type == 'memcpy') and \\ p == 'backward': #", "if __name__ == '__main__': parser = argparse.ArgumentParser('Get performance model error for ops.') parser.add_argument('--op-type',", "OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR", "if (op_type == 'fully_connected' or \\ op_type == 'transpose' or \\ op_type ==", "Davis # All rights reserved. # # Redistribution and use in source and", "contributors may be used to endorse or promote products derived from # this", "pass_list = ['forward', 'backward'] else: op_list = [args.op_type] pass_list = ['backward' if args.backward", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY", "Redistribution and use in source and binary forms, with or without # modification,", "source and binary forms, with or without # modification, are permitted provided that", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "== 'backward': # No backward for these ops continue if op_type == 'embedding_lookup':", "the name of the copyright holder nor the names of its # contributors", "FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from analysis.inference import", "infer if __name__ == '__main__': parser = argparse.ArgumentParser('Get performance model error for ops.')", "parser = argparse.ArgumentParser('Get performance model error for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true',", "import argparse from analysis.inference import infer if __name__ == '__main__': parser = argparse.ArgumentParser('Get", "of California, Davis # All rights reserved. # # Redistribution and use in", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #", "POSSIBILITY OF SUCH DAMAGE. import argparse from analysis.inference import infer if __name__ ==", "above copyright notice, this # list of conditions and the following disclaimer. #", "derived from # this software without specific prior written permission. # # THIS", "HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "['forward', 'backward'] else: op_list = [args.op_type] pass_list = ['backward' if args.backward else 'forward']", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING", "# contributors may be used to endorse or promote products derived from #", "== '__main__': parser = argparse.ArgumentParser('Get performance model error for ops.') parser.add_argument('--op-type', type=str, default='all')", "HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "these ops continue if op_type == 'embedding_lookup': for big in [False, True]: for", "'forward'] for op_type in op_list: for p in pass_list: if (op_type == 'fully_connected'", "DAMAGE. import argparse from analysis.inference import infer if __name__ == '__main__': parser =", "parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args() if args.op_type == 'all':", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO", "forms, with or without # modification, are permitted provided that the following conditions", "above copyright notice, # this list of conditions and the following disclaimer in", "(op_type == 'fully_connected' or \\ op_type == 'transpose' or \\ op_type == 'concat'", "['backward' if args.backward else 'forward'] for op_type in op_list: for p in pass_list:", "= ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list = ['forward',", "in source and binary forms, with or without # modification, are permitted provided", "# # 2. Redistributions in binary form must reproduce the above copyright notice,", "'memcpy') and \\ p == 'backward': # No backward for these ops continue", "'tril'] pass_list = ['forward', 'backward'] else: op_list = [args.op_type] pass_list = ['backward' if", "IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF", "the following disclaimer in the documentation # and/or other materials provided with the", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY,", "parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args() if args.op_type == 'all': op_list = ['embedding_lookup',", "op_type == 'transpose' or \\ op_type == 'concat' or \\ op_type == 'memcpy')", "# # Redistribution and use in source and binary forms, with or without", "default=False) args = parser.parse_args() if args.op_type == 'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d',", "for these ops continue if op_type == 'embedding_lookup': for big in [False, True]:", "other materials provided with the distribution. # # 3. Neither the name of", "the above copyright notice, this # list of conditions and the following disclaimer.", "else 'forward'] for op_type in op_list: for p in pass_list: if (op_type ==", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "provided with the distribution. # # 3. Neither the name of the copyright", "used to endorse or promote products derived from # this software without specific", "# No backward for these ops continue if op_type == 'embedding_lookup': for big", "backward for these ops continue if op_type == 'embedding_lookup': for big in [False,", "== 'memcpy') and \\ p == 'backward': # No backward for these ops", "promote products derived from # this software without specific prior written permission. #", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "pass_list = ['backward' if args.backward else 'forward'] for op_type in op_list: for p", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #", "for hit_rate_estimation in [False, True]: for fbgemm in [False, True]: infer(op_type, p=='backward', big=big,", "'embedding_lookup': for big in [False, True]: for hit_rate_estimation in [False, True]: for fbgemm", "args = parser.parse_args() if args.op_type == 'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d',", "'bn', 'tril'] pass_list = ['forward', 'backward'] else: op_list = [args.op_type] pass_list = ['backward'", "list of conditions and the following disclaimer. # # 2. Redistributions in binary", "== 'transpose' or \\ op_type == 'concat' or \\ op_type == 'memcpy') and", "op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list =", "use in source and binary forms, with or without # modification, are permitted", "parser.parse_args() if args.op_type == 'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy',", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED.", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "= [args.op_type] pass_list = ['backward' if args.backward else 'forward'] for op_type in op_list:", "OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "must reproduce the above copyright notice, # this list of conditions and the", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse", "# All rights reserved. # # Redistribution and use in source and binary", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #", "disclaimer in the documentation # and/or other materials provided with the distribution. #", "to endorse or promote products derived from # this software without specific prior", "OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "op_list: for p in pass_list: if (op_type == 'fully_connected' or \\ op_type ==", "The Regents of the University of California, Davis # All rights reserved. #", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO,", "\\ op_type == 'memcpy') and \\ p == 'backward': # No backward for", "# 2. Redistributions in binary form must reproduce the above copyright notice, #", "'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list = ['forward', 'backward'] else: op_list =", "University of California, Davis # All rights reserved. # # Redistribution and use", "# # 3. Neither the name of the copyright holder nor the names", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT,", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "No backward for these ops continue if op_type == 'embedding_lookup': for big in", "'transpose', 'bn', 'tril'] pass_list = ['forward', 'backward'] else: op_list = [args.op_type] pass_list =", "in the documentation # and/or other materials provided with the distribution. # #", "'backward': # No backward for these ops continue if op_type == 'embedding_lookup': for", "California, Davis # All rights reserved. # # Redistribution and use in source", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR", "analysis.inference import infer if __name__ == '__main__': parser = argparse.ArgumentParser('Get performance model error", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "products derived from # this software without specific prior written permission. # #", "SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "else: op_list = [args.op_type] pass_list = ['backward' if args.backward else 'forward'] for op_type", "the above copyright notice, # this list of conditions and the following disclaimer", "in [False, True]: for fbgemm in [False, True]: infer(op_type, p=='backward', big=big, hit_rate_estimation=hit_rate_estimation, fbgemm=fbgemm)", "from # this software without specific prior written permission. # # THIS SOFTWARE", "of conditions and the following disclaimer. # # 2. Redistributions in binary form", "== 'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril']", "be used to endorse or promote products derived from # this software without", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF", "'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list = ['forward', 'backward'] else: op_list", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "documentation # and/or other materials provided with the distribution. # # 3. Neither", "BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT", "USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "'memcpy', 'transpose', 'bn', 'tril'] pass_list = ['forward', 'backward'] else: op_list = [args.op_type] pass_list", "EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "the distribution. # # 3. Neither the name of the copyright holder nor", "IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "and binary forms, with or without # modification, are permitted provided that the", "conditions and the following disclaimer in the documentation # and/or other materials provided", "op_type in op_list: for p in pass_list: if (op_type == 'fully_connected' or \\", "names of its # contributors may be used to endorse or promote products", "copyright notice, this # list of conditions and the following disclaimer. # #", "# 1. Redistributions of source code must retain the above copyright notice, this", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "error for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args() if", "op_type == 'memcpy') and \\ p == 'backward': # No backward for these", "'transpose' or \\ op_type == 'concat' or \\ op_type == 'memcpy') and \\", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "pass_list: if (op_type == 'fully_connected' or \\ op_type == 'transpose' or \\ op_type", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "notice, this # list of conditions and the following disclaimer. # # 2.", "must retain the above copyright notice, this # list of conditions and the", "SUCH DAMAGE. import argparse from analysis.inference import infer if __name__ == '__main__': parser", "CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "OF SUCH DAMAGE. import argparse from analysis.inference import infer if __name__ == '__main__':", "from analysis.inference import infer if __name__ == '__main__': parser = argparse.ArgumentParser('Get performance model", "OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "and the following disclaimer. # # 2. Redistributions in binary form must reproduce", "ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED", "the copyright holder nor the names of its # contributors may be used", "ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN", "for big in [False, True]: for hit_rate_estimation in [False, True]: for fbgemm in", "PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS;", "COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "that the following conditions are met: # # 1. Redistributions of source code", "with the distribution. # # 3. Neither the name of the copyright holder", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT,", "# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #", "if op_type == 'embedding_lookup': for big in [False, True]: for hit_rate_estimation in [False,", "the following conditions are met: # # 1. Redistributions of source code must", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY", "BSD 3-Clause License # # Copyright (c) 2021, The Regents of the University", "Redistributions in binary form must reproduce the above copyright notice, # this list", "ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "for fbgemm in [False, True]: infer(op_type, p=='backward', big=big, hit_rate_estimation=hit_rate_estimation, fbgemm=fbgemm) else: infer(op_type, p=='backward')", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT", "'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose', 'bn', 'tril'] pass_list = ['forward', 'backward'] else:", "CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "# 3. Neither the name of the copyright holder nor the names of", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA,", "p == 'backward': # No backward for these ops continue if op_type ==", "== 'embedding_lookup': for big in [False, True]: for hit_rate_estimation in [False, True]: for", "LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import", "of the copyright holder nor the names of its # contributors may be", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "of source code must retain the above copyright notice, this # list of", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT", "the documentation # and/or other materials provided with the distribution. # # 3.", "notice, # this list of conditions and the following disclaimer in the documentation", "# Copyright (c) 2021, The Regents of the University of California, Davis #", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "the names of its # contributors may be used to endorse or promote", "= argparse.ArgumentParser('Get performance model error for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False)", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES;", "op_type == 'concat' or \\ op_type == 'memcpy') and \\ p == 'backward':", "argparse from analysis.inference import infer if __name__ == '__main__': parser = argparse.ArgumentParser('Get performance", "\\ op_type == 'transpose' or \\ op_type == 'concat' or \\ op_type ==", "= parser.parse_args() if args.op_type == 'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat',", "reserved. # # Redistribution and use in source and binary forms, with or", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "# Redistribution and use in source and binary forms, with or without #", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "2021, The Regents of the University of California, Davis # All rights reserved.", "nor the names of its # contributors may be used to endorse or", "source code must retain the above copyright notice, this # list of conditions", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse from analysis.inference import infer", "retain the above copyright notice, this # list of conditions and the following", "holder nor the names of its # contributors may be used to endorse", "are permitted provided that the following conditions are met: # # 1. Redistributions", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF", "default='all') parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args() if args.op_type == 'all': op_list =", "the following disclaimer. # # 2. Redistributions in binary form must reproduce the", "if args.op_type == 'all': op_list = ['embedding_lookup', 'fully_connected', 'conv2d', 'conv1d', 'concat', 'memcpy', 'transpose',", "STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "binary forms, with or without # modification, are permitted provided that the following", "License # # Copyright (c) 2021, The Regents of the University of California,", "2. Redistributions in binary form must reproduce the above copyright notice, # this", "ops continue if op_type == 'embedding_lookup': for big in [False, True]: for hit_rate_estimation", "may be used to endorse or promote products derived from # this software", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "binary form must reproduce the above copyright notice, # this list of conditions", "following disclaimer in the documentation # and/or other materials provided with the distribution.", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE,", "of the University of California, Davis # All rights reserved. # # Redistribution", "form must reproduce the above copyright notice, # this list of conditions and", "LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES", "import infer if __name__ == '__main__': parser = argparse.ArgumentParser('Get performance model error for", "in pass_list: if (op_type == 'fully_connected' or \\ op_type == 'transpose' or \\", "Neither the name of the copyright holder nor the names of its #", "PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "'__main__': parser = argparse.ArgumentParser('Get performance model error for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward',", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN", "materials provided with the distribution. # # 3. Neither the name of the", "big in [False, True]: for hit_rate_estimation in [False, True]: for fbgemm in [False,", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR", "performance model error for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args =", "OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "# and/or other materials provided with the distribution. # # 3. Neither the", "# modification, are permitted provided that the following conditions are met: # #", "written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "or \\ op_type == 'transpose' or \\ op_type == 'concat' or \\ op_type", "or promote products derived from # this software without specific prior written permission.", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "model error for ops.') parser.add_argument('--op-type', type=str, default='all') parser.add_argument('--backward', action='store_true', default=False) args = parser.parse_args()" ]
[ "meta_training_query[:, 0] = np.random.rand(num_remain) * config_width meta_training_query[:, 1] = np.random.rand(num_remain) * config_length meta_training_query[:,", "for i in range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt in meta_type:", "GCN with the meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the", "in range(epi + 1)] fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi +", "import * import matplotlib.pyplot as plt from copy import deepcopy from torch.optim import", "True: meta_training_support[:, 0] = np.random.rand(num_remain) * config_width meta_training_support[:, 1] = np.random.rand(num_remain) * config_length", "config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf:", "epi >= 1: x_axis = [i for i in range(epi + 1)] fig", ">= 1: x_axis = [i for i in range(epi + 1)] fig =", "[i for i in range(epi + 1)] fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0)", "= mt meta_seed = 0 loss_list = [] for epi in range(config_meta_training_epi): #", "the query set and return the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d", "and return the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain", "the number of remained UAVs meta_type = [i for i in range(2, 201)]", "parameter for key in param_name: meta_params[key].data += gradient[key].data if epi >= 1: x_axis", "support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query set of the training task", "from Configurations import * import matplotlib.pyplot as plt from copy import deepcopy from", "loss_list = [] for epi in range(config_meta_training_epi): # create the training gcn training_cr_gcm_n", "training task meta_training_query = np.zeros((num_remain, 3)) while True: meta_training_query[:, 0] = np.random.rand(num_remain) *", "training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) #", "= np.zeros((num_remain, 3)) while True: meta_training_query[:, 0] = np.random.rand(num_remain) * config_width meta_training_query[:, 1]", "= np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain)", "-- loss %f\" % ( epi, num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) #", "CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning rate as the meta", "= dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain", "meta parameter for key in param_name: meta_params[key].data += gradient[key].data if epi >= 1:", "[('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys()", "of the training task meta_training_query = np.zeros((num_remain, 3)) while True: meta_training_query[:, 0] =", "# generate the query set of the training task meta_training_query = np.zeros((num_remain, 3))", "> 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support set of the", "destroy %d UAVs -- loss %f\" % ( epi, num_remain, config_num_of_agents - num_remain,", "list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain = mt meta_seed = 0", "on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query set of the", "config_length meta_training_support[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc", "[] for epi in range(config_meta_training_epi): # create the training gcn training_cr_gcm_n = CR_MGC()", "set of the training task meta_training_query = np.zeros((num_remain, 3)) while True: meta_training_query[:, 0]", "meta_training_support = np.zeros((num_remain, 3)) while True: meta_training_support[:, 0] = np.random.rand(num_remain) * config_width meta_training_support[:,", "num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) # update the meta parameter for key", "for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on the support set", "# param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain = mt", "mt in meta_type: meta_cr_gcm_n = CR_MGC() # list of tuples [('', ...), ('',...)]", "import Utils # the range of the number of remained UAVs meta_type =", "deepcopy from torch.optim import Adam import Utils # the range of the number", "meta learning moves on if epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if", "= np.random.rand(num_remain) * config_width meta_training_support[:, 1] = np.random.rand(num_remain) * config_length meta_training_support[:, 2] =", "rate as the meta learning moves on if epi > 100: training_cr_gcm_n.optimizer =", "* config_length meta_training_support[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf,", "config_width meta_training_query[:, 1] = np.random.rand(num_remain) * config_length meta_training_query[:, 2] = np.random.rand(num_remain) * config_height", "0] = np.random.rand(num_remain) * config_width meta_training_support[:, 1] = np.random.rand(num_remain) * config_length meta_training_support[:, 2]", "task meta_training_query = np.zeros((num_remain, 3)) while True: meta_training_query[:, 0] = np.random.rand(num_remain) * config_width", "loss_list, linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() #", "training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate", "True: meta_training_query[:, 0] = np.random.rand(num_remain) * config_width meta_training_query[:, 1] = np.random.rand(num_remain) * config_length", "1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show() for key in meta_params.keys():", "the training task meta_training_support = np.zeros((num_remain, 3)) while True: meta_training_support[:, 0] = np.random.rand(num_remain)", "the range of the number of remained UAVs meta_type = [i for i", "= 0 loss_list = [] for epi in range(config_meta_training_epi): # create the training", "from copy import deepcopy from torch.optim import Adam import Utils # the range", "plt.close() # plt.show() for key in meta_params.keys(): meta_params[key] = meta_params[key].cpu().data.numpy() np.save('Meta_Learning_Results/meta_parameters/meta_%d.npy' % num_remain,", "of the GCN with the meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) #", "meta_cr_gcm_n = CR_MGC() # list of tuples [('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters())", "= [] for epi in range(config_meta_training_epi): # create the training gcn training_cr_gcm_n =", "copy import deepcopy from torch.optim import Adam import Utils # the range of", "generate the support set of the training task meta_training_support = np.zeros((num_remain, 3)) while", "the training task meta_training_query = np.zeros((num_remain, 3)) while True: meta_training_query[:, 0] = np.random.rand(num_remain)", "Learning Starts...\") print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n = CR_MGC() # list of", "of tuples [('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name", "1)] fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0, 1400))", "if not cf: # print(cf) break # endow the initial values of the", "0] = np.random.rand(num_remain) * config_width meta_training_query[:, 1] = np.random.rand(num_remain) * config_length meta_training_query[:, 2]", "1] = np.random.rand(num_remain) * config_length meta_training_support[:, 2] = np.random.rand(num_remain) * config_height meta_seed +=", "remained UAVs meta_type = [i for i in range(2, 201)] print(\"Meta Learning Starts...\")", "meta_training_query[:, 1] = np.random.rand(num_remain) * config_length meta_training_query[:, 2] = np.random.rand(num_remain) * config_height meta_seed", "train on the query set and return the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query,", "training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain UAVs -- destroy %d UAVs -- loss", "while True: meta_training_query[:, 0] = np.random.rand(num_remain) * config_width meta_training_query[:, 1] = np.random.rand(num_remain) *", "if not cf: # print(cf) break # train on the query set and", "# train the network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the", "initial values of the GCN with the meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys():", "the GCN with the meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train", "num_remain) if not cf: # print(cf) break # train on the query set", "of remained UAVs meta_type = [i for i in range(2, 201)] print(\"Meta Learning", "100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001)", "...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() #", "set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query set of the training task meta_training_query", "cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: # print(cf) break # endow", "+= 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: # print(cf)", "from Main_algorithm_GCN.CR_MGC import CR_MGC from Configurations import * import matplotlib.pyplot as plt from", "+ 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show() for key in", "* import matplotlib.pyplot as plt from copy import deepcopy from torch.optim import Adam", "np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if", "train the network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query", "the meta parameter for key in param_name: meta_params[key].data += gradient[key].data if epi >=", "for i in range(epi + 1)] fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0,", "0 loss_list = [] for epi in range(config_meta_training_epi): # create the training gcn", "loss_list.append(deepcopy(loss)) # update the meta parameter for key in param_name: meta_params[key].data += gradient[key].data", "print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n = CR_MGC() # list of tuples [('',", "* config_width meta_training_support[:, 1] = np.random.rand(num_remain) * config_length meta_training_support[:, 2] = np.random.rand(num_remain) *", "for mt in meta_type: meta_cr_gcm_n = CR_MGC() # list of tuples [('', ...),", "UAVs meta_type = [i for i in range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\")", "fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png'", "plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show() for key in meta_params.keys(): meta_params[key] = meta_params[key].cpu().data.numpy()", "% num_remain) plt.close() # plt.show() for key in meta_params.keys(): meta_params[key] = meta_params[key].cpu().data.numpy() np.save('Meta_Learning_Results/meta_parameters/meta_%d.npy'", "moves on if epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi >", "meta_training_query = np.zeros((num_remain, 3)) while True: meta_training_query[:, 0] = np.random.rand(num_remain) * config_width meta_training_query[:,", "# plt.show() for key in meta_params.keys(): meta_params[key] = meta_params[key].cpu().data.numpy() np.save('Meta_Learning_Results/meta_parameters/meta_%d.npy' % num_remain, meta_params)", "config_length meta_training_query[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc", "x_axis = [i for i in range(epi + 1)] fig = plt.figure() plt.plot(x_axis,", "# the range of the number of remained UAVs meta_type = [i for", "cf: # print(cf) break # endow the initial values of the GCN with", "Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the", "+= gradient[key].data if epi >= 1: x_axis = [i for i in range(epi", "print(cf) break # train on the query set and return the gradient gradient,", "num_remain = mt meta_seed = 0 loss_list = [] for epi in range(config_meta_training_epi):", "= CR_MGC() # list of tuples [('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) #", "set of the training task meta_training_support = np.zeros((num_remain, 3)) while True: meta_training_support[:, 0]", "if epi >= 1: x_axis = [i for i in range(epi + 1)]", "training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query set of the training task meta_training_query =", "key in param_name: meta_params[key].data += gradient[key].data if epi >= 1: x_axis = [i", "num_remain) print(\"%d episode %d remain UAVs -- destroy %d UAVs -- loss %f\"", "import CR_MGC from Configurations import * import matplotlib.pyplot as plt from copy import", "epi, num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) # update the meta parameter for", "Configurations import * import matplotlib.pyplot as plt from copy import deepcopy from torch.optim", "np.zeros((num_remain, 3)) while True: meta_training_support[:, 0] = np.random.rand(num_remain) * config_width meta_training_support[:, 1] =", "% ( epi, num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) # update the meta", "break # endow the initial values of the GCN with the meta parameter", "= Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: # print(cf) break # train on the", "> 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(),", "of the number of remained UAVs meta_type = [i for i in range(2,", "meta_seed = 0 loss_list = [] for epi in range(config_meta_training_epi): # create the", "meta_training_query[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc =", "meta_training_support[:, 1] = np.random.rand(num_remain) * config_length meta_training_support[:, 2] = np.random.rand(num_remain) * config_height meta_seed", "the initial values of the GCN with the meta parameter for key in", "print(cf) break # endow the initial values of the GCN with the meta", "Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: # print(cf) break # endow the initial values", "mt meta_seed = 0 loss_list = [] for epi in range(config_meta_training_epi): # create", "in meta_type: meta_cr_gcm_n = CR_MGC() # list of tuples [('', ...), ('',...)] meta_params", "# train on the query set and return the gradient gradient, loss =", "1: x_axis = [i for i in range(epi + 1)] fig = plt.figure()", "plt.xlim((0, epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show() for", "* config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not", "param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain = mt meta_seed = 0 loss_list", "meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain = mt meta_seed = 0 loss_list = []", "# create the training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) #", "* config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not", "nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: # print(cf) break # train on", "values of the GCN with the meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data)", "loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain UAVs -- destroy %d UAVs", "= [i for i in range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt", "if epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer", "3)) while True: meta_training_query[:, 0] = np.random.rand(num_remain) * config_width meta_training_query[:, 1] = np.random.rand(num_remain)", "episode %d remain UAVs -- destroy %d UAVs -- loss %f\" % (", "# generate the support set of the training task meta_training_support = np.zeros((num_remain, 3))", "network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query set of", "config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) # update the meta parameter for key in", "name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain = mt meta_seed =", "not cf: # print(cf) break # endow the initial values of the GCN", "+= 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: # print(cf)", "parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on the support", "the training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the", "= np.random.rand(num_remain) * config_length meta_training_query[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1", "import deepcopy from torch.optim import Adam import Utils # the range of the", "UAVs -- loss %f\" % ( epi, num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss))", "learning moves on if epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi", "in range(config_meta_training_epi): # create the training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(),", "create the training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease", "dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain =", "+ 1)] fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0,", "num_remain) if not cf: # print(cf) break # endow the initial values of", "on the query set and return the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain)", "in range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n =", "decrease the learning rate as the meta learning moves on if epi >", "the support set of the training task meta_training_support = np.zeros((num_remain, 3)) while True:", "epi in range(config_meta_training_epi): # create the training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer =", "Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning rate as the meta learning moves on", "if epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support set", "set and return the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d", "torch.optim import Adam import Utils # the range of the number of remained", "epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show() for key", "Main_algorithm_GCN.CR_MGC import CR_MGC from Configurations import * import matplotlib.pyplot as plt from copy", "Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support set of the training task meta_training_support =", "range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n = CR_MGC()", "meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: #", "import matplotlib.pyplot as plt from copy import deepcopy from torch.optim import Adam import", "= np.random.rand(num_remain) * config_width meta_training_query[:, 1] = np.random.rand(num_remain) * config_length meta_training_query[:, 2] =", "support set of the training task meta_training_support = np.zeros((num_remain, 3)) while True: meta_training_support[:,", "[i for i in range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt in", "= plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' %", "for epi in range(config_meta_training_epi): # create the training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer", "query set and return the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode", "the learning rate as the meta learning moves on if epi > 100:", "np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: # print(cf) break #", "config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf:", "CR_MGC from Configurations import * import matplotlib.pyplot as plt from copy import deepcopy", "1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show() for key in meta_params.keys(): meta_params[key] =", "%d remain UAVs -- destroy %d UAVs -- loss %f\" % ( epi,", "linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show()", "training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning rate as", "learning rate as the meta learning moves on if epi > 100: training_cr_gcm_n.optimizer", "endow the initial values of the GCN with the meta parameter for key", "num_remain, loss)) loss_list.append(deepcopy(loss)) # update the meta parameter for key in param_name: meta_params[key].data", "lr=0.001) # decrease the learning rate as the meta learning moves on if", "print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n = CR_MGC() # list", "cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: # print(cf) break # train", "update the meta parameter for key in param_name: meta_params[key].data += gradient[key].data if epi", "as the meta learning moves on if epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(),", "= training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain UAVs -- destroy %d UAVs --", "in param_name: meta_params[key].data += gradient[key].data if epi >= 1: x_axis = [i for", "training num_remain = mt meta_seed = 0 loss_list = [] for epi in", "= CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning rate as the", "on if epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250:", "training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support set of the training task", "gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain UAVs -- destroy", "# print(cf) break # train on the query set and return the gradient", "Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: # print(cf) break # train on the query", "meta_training_support[:, 0] = np.random.rand(num_remain) * config_width meta_training_support[:, 1] = np.random.rand(num_remain) * config_length meta_training_support[:,", "meta_params[key].data += gradient[key].data if epi >= 1: x_axis = [i for i in", "of the training task meta_training_support = np.zeros((num_remain, 3)) while True: meta_training_support[:, 0] =", "= [i for i in range(epi + 1)] fig = plt.figure() plt.plot(x_axis, loss_list,", "the meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on", "cf: # print(cf) break # train on the query set and return the", "= Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning rate as the meta learning moves", "np.random.rand(num_remain) * config_length meta_training_query[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed)", "np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: # print(cf) break #", "# meta training num_remain = mt meta_seed = 0 loss_list = [] for", "for key in param_name: meta_params[key].data += gradient[key].data if epi >= 1: x_axis =", "i in range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n", "# print(cf) break # endow the initial values of the GCN with the", "plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close()", "generate the query set of the training task meta_training_query = np.zeros((num_remain, 3)) while", "config_width meta_training_support[:, 1] = np.random.rand(num_remain) * config_length meta_training_support[:, 2] = np.random.rand(num_remain) * config_height", "meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: #", "# list of tuples [('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name", "return the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain UAVs", "# endow the initial values of the GCN with the meta parameter for", "%d UAVs -- loss %f\" % ( epi, num_remain, config_num_of_agents - num_remain, loss))", "2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support,", "meta training num_remain = mt meta_seed = 0 loss_list = [] for epi", "meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on the", "plt from copy import deepcopy from torch.optim import Adam import Utils # the", "query set of the training task meta_training_query = np.zeros((num_remain, 3)) while True: meta_training_query[:,", "np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if", "%f\" % ( epi, num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) # update the", "from torch.optim import Adam import Utils # the range of the number of", "lr=0.00001) # generate the support set of the training task meta_training_support = np.zeros((num_remain,", "Adam import Utils # the range of the number of remained UAVs meta_type", "1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: # print(cf) break", "the network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query set", "# update the meta parameter for key in param_name: meta_params[key].data += gradient[key].data if", "tuples [('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name =", "* config_width meta_training_query[:, 1] = np.random.rand(num_remain) * config_length meta_training_query[:, 2] = np.random.rand(num_remain) *", "training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning", "the meta learning moves on if epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001)", "plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi + 1)) plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain)", "list of tuples [('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list", "training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning rate as the meta learning", "gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain UAVs -- destroy %d", "= Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate", "import Adam import Utils # the range of the number of remained UAVs", "2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query,", "the gradient gradient, loss = training_cr_gcm_n.train_query_set_single(meta_training_query, num_remain) print(\"%d episode %d remain UAVs --", "CR_MGC() # list of tuples [('', ...), ('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param", "with the meta parameter for key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network", "= meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain = mt meta_seed = 0 loss_list =", "nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: # print(cf) break # endow the", "not cf: # print(cf) break # train on the query set and return", "= Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support set of the training task meta_training_support", "param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training num_remain = mt meta_seed", "in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain)", "num_remain) plt.close() # plt.show() for key in meta_params.keys(): meta_params[key] = meta_params[key].cpu().data.numpy() np.save('Meta_Learning_Results/meta_parameters/meta_%d.npy' %", "UAVs -- destroy %d UAVs -- loss %f\" % ( epi, num_remain, config_num_of_agents", "print(\"%d episode %d remain UAVs -- destroy %d UAVs -- loss %f\" %", "epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support set of", "param_name: meta_params[key].data += gradient[key].data if epi >= 1: x_axis = [i for i", "range(epi + 1)] fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi + 1))", "250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support set of the training", "training task meta_training_support = np.zeros((num_remain, 3)) while True: meta_training_support[:, 0] = np.random.rand(num_remain) *", "Starts...\") print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n = CR_MGC() # list of tuples", "meta_type: meta_cr_gcm_n = CR_MGC() # list of tuples [('', ...), ('',...)] meta_params =", "np.random.rand(num_remain) * config_width meta_training_query[:, 1] = np.random.rand(num_remain) * config_length meta_training_query[:, 2] = np.random.rand(num_remain)", "remain UAVs -- destroy %d UAVs -- loss %f\" % ( epi, num_remain,", "break # train on the query set and return the gradient gradient, loss", "np.random.rand(num_remain) * config_length meta_training_support[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed)", "* config_length meta_training_query[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf,", "the query set of the training task meta_training_query = np.zeros((num_remain, 3)) while True:", "range(config_meta_training_epi): # create the training gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001)", "loss)) loss_list.append(deepcopy(loss)) # update the meta parameter for key in param_name: meta_params[key].data +=", "gradient[key].data if epi >= 1: x_axis = [i for i in range(epi +", "= np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_support, num_remain)", "epi > 100: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer =", "meta_type = [i for i in range(2, 201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for", "1] = np.random.rand(num_remain) * config_length meta_training_query[:, 2] = np.random.rand(num_remain) * config_height meta_seed +=", "np.zeros((num_remain, 3)) while True: meta_training_query[:, 0] = np.random.rand(num_remain) * config_width meta_training_query[:, 1] =", "np.random.rand(num_remain) * config_width meta_training_support[:, 1] = np.random.rand(num_remain) * config_length meta_training_support[:, 2] = np.random.rand(num_remain)", "plt.ylim((0, 1400)) plt.savefig('Meta_Learning_Results/meta_loss_pic/meta_%d.png' % num_remain) plt.close() # plt.show() for key in meta_params.keys(): meta_params[key]", "201)] print(\"Meta Learning Starts...\") print(\"-----------------------------------\") for mt in meta_type: meta_cr_gcm_n = CR_MGC() #", "('',...)] meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta", "matplotlib.pyplot as plt from copy import deepcopy from torch.optim import Adam import Utils", "training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) #", "number of remained UAVs meta_type = [i for i in range(2, 201)] print(\"Meta", "while True: meta_training_support[:, 0] = np.random.rand(num_remain) * config_width meta_training_support[:, 1] = np.random.rand(num_remain) *", "num_remain) # generate the query set of the training task meta_training_query = np.zeros((num_remain,", "3)) while True: meta_training_support[:, 0] = np.random.rand(num_remain) * config_width meta_training_support[:, 1] = np.random.rand(num_remain)", "loss %f\" % ( epi, num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) # update", "the support set training_cr_gcm_n.train_support_set_single(meta_training_support, num_remain) # generate the query set of the training", "key in training_cr_gcm_n.gcn_network.state_dict().keys(): training_cr_gcm_n.gcn_network.state_dict()[key].copy_(meta_params[key].data) # train the network on the support set training_cr_gcm_n.train_support_set_single(meta_training_support,", "= Utils.check_if_a_connected_graph(meta_training_support, num_remain) if not cf: # print(cf) break # endow the initial", "i in range(epi + 1)] fig = plt.figure() plt.plot(x_axis, loss_list, linewidth=2.0) plt.xlim((0, epi", "= np.random.rand(num_remain) * config_length meta_training_support[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1", "as plt from copy import deepcopy from torch.optim import Adam import Utils #", "( epi, num_remain, config_num_of_agents - num_remain, loss)) loss_list.append(deepcopy(loss)) # update the meta parameter", "gcn training_cr_gcm_n = CR_MGC() training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.001) # decrease the learning rate", "# decrease the learning rate as the meta learning moves on if epi", "task meta_training_support = np.zeros((num_remain, 3)) while True: meta_training_support[:, 0] = np.random.rand(num_remain) * config_width", "meta_training_support[:, 2] = np.random.rand(num_remain) * config_height meta_seed += 1 np.random.seed(meta_seed) cf, nc =", "= np.zeros((num_remain, 3)) while True: meta_training_support[:, 0] = np.random.rand(num_remain) * config_width meta_training_support[:, 1]", "1 np.random.seed(meta_seed) cf, nc = Utils.check_if_a_connected_graph(meta_training_query, num_remain) if not cf: # print(cf) break", "Utils # the range of the number of remained UAVs meta_type = [i", "lr=0.0001) if epi > 250: training_cr_gcm_n.optimizer = Adam(training_cr_gcm_n.gcn_network.parameters(), lr=0.00001) # generate the support", "range of the number of remained UAVs meta_type = [i for i in", "meta_params = dict(meta_cr_gcm_n.gcn_network.named_parameters()) # param name list param_name = meta_cr_gcm_n.gcn_network.state_dict().keys() # meta training", "-- destroy %d UAVs -- loss %f\" % ( epi, num_remain, config_num_of_agents -", "- num_remain, loss)) loss_list.append(deepcopy(loss)) # update the meta parameter for key in param_name:" ]
[ "= raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args): if cmd_args.conf_file is", ") parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan means to create template instances/files but", "conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__ == \"__main__\":", "__name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args() _conf = process_args(datalake_args) logging.debug('break point')", "'azure'], help='Cloud provider will only mean something if not local:' ) parser.add_argument( '-edb',", "raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args): if", "'-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider will only mean something if", "Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full path to config file with", "'-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python logging', default=LogLevel.INFO ) return parser", ") return parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project =", "gcp gcp_services = raw['services'] if 'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc =", "return conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args() _conf =", ") parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider will only mean", "= named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev =", "not local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug ports for remote debugging:'", "something if not local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug ports for", "default='qe', help='Deployment environment local means dev refers to git branches ...' ) parser.add_argument(", "conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment environment local means", "= False logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items() for k, v in config_items:", "= os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s", "'--enable_debug', type=bool, help='Enabling Debug ports for remote debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel,", "is not None: conf_args[k] = v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf()", "help='LogLevel: as in Python logging', default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf): raw =", "dev refers to git branches ...' ) parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime", "raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args): if cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__))", "choices=['gcp', 'aws', 'azure'], help='Cloud provider will only mean something if not local:' )", "in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags =", "conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args() _conf = process_args(datalake_args)", "parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan means to create template instances/files but not", "= named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch =", "= raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc =", "named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy", "def process_args(cmd_args): if cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path +", "named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file", "raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size']", "= raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type =", "raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone']", "should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for k, v in items] return", "dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size", "items = self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for k,", "import yaml from collections import namedtuple import logging from wielder.util.arguer import LogLevel, convert_log_level", "type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider will only mean something if not local:'", "named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider", "= dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y", "conf.yml.' ) parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment environment local means dev refers", "= raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp =", "gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp", "open(cmd_args.conf_file, 'r') as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan']", "= gcp gcp_services = raw['services'] if 'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc", "gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone", "v in config_items: if v is not None: conf_args[k] = v named_tuple =", "extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args()", "local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug ports for remote debugging:' )", "import setup_logging class Conf: def __init__(self): self.template_ignore_dirs = [] def attr_list(self, should_print=False): items", "eg local-docker, local, gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws',", "self.template_ignore_dirs = [] def attr_list(self, should_print=False): items = self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\")", "= named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy =", "import argparse import yaml from collections import namedtuple import logging from wielder.util.arguer import", "convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file, 'r')", "conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration File", "logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for k, v in items] return items", "= conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__ ==", ") parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment environment local means dev refers to", "branches ...' ) parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment eg local-docker, local,", "= raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone =", "type=bool, default=False, help='plan means to create template instances/files but not deploy them e.g.", "parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider will only mean something", "conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs", "= Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region =", "if 'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability']", "LogLevel, convert_log_level from wielder.util.log_util import setup_logging class Conf: def __init__(self): self.template_ignore_dirs = []", "class Conf: def __init__(self): self.template_ignore_dirs = [] def attr_list(self, should_print=False): items = self.__dict__.items()", "format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file, 'r') as yaml_file:", "gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags']", "[] def attr_list(self, should_print=False): items = self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k}", "= v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan = named_tuple.plan conf.conf_file", "= raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size =", "will only mean something if not local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling", "config_items: if v is not None: conf_args[k] = v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values())", "conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc']", "git branches ...' ) parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment eg local-docker,", "= raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork =", "= raw['subnetwork'] conf.gcp = gcp gcp_services = raw['services'] if 'dataproc' in gcp_services: raw_dataproc", "raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only']", "conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables", "raw_dataproc = gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region", "named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs", "if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG)", "import LogLevel, convert_log_level from wielder.util.log_util import setup_logging class Conf: def __init__(self): self.template_ignore_dirs =", "Arguments:') config_items = cmd_args.__dict__.items() for k, v in config_items: if v is not", "in Python logging', default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp", "etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider will only", "attr_list(self, should_print=False): items = self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\")", "= named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs =", "parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...'", "[logging.debug(f\"attribute: {k} value: {v}\") for k, v in items] return items def get_datalake_parser():", "conf.gcp = gcp gcp_services = raw['services'] if 'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc']", "+ '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p'", "raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone = raw['zone']", "only mean something if not local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug", "means to create template instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.'", "gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts", "= raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes =", "items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for k, v in items] return items def", "v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan = named_tuple.plan conf.conf_file =", "cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level,", "cmd_args.__dict__.items() for k, v in config_items: if v is not None: conf_args[k] =", "= conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc =", ") parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug ports for remote debugging:' ) parser.add_argument(", "wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util import setup_logging class Conf: def __init__(self): self.template_ignore_dirs", ") with open(cmd_args.conf_file, 'r') as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args,", "= Conf() conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug =", "import logging from wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util import setup_logging class Conf:", "= convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file,", "= raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp gcp_services =", "File Arguments:') config_items = cmd_args.__dict__.items() for k, v in config_items: if v is", "dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args): if cmd_args.conf_file", "{v}\") for k, v in items] return items def get_datalake_parser(): parser = argparse.ArgumentParser(description=", "= dataproc def process_args(cmd_args): if cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file =", "config_items = cmd_args.__dict__.items() for k, v in config_items: if v is not None:", "value: {v}\") for k, v in items] return items def get_datalake_parser(): parser =", "raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts']", "wielder.util.log_util import setup_logging class Conf: def __init__(self): self.template_ignore_dirs = [] def attr_list(self, should_print=False):", ") parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python logging', default=LogLevel.INFO )", "conf_args[k] = v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan = named_tuple.plan", "= named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf", "help='Enabling Debug ports for remote debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel:", "Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration File Arguments:') config_items =", "raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type']", "items] return items def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument(", "to config file with all arguments.\\nCommandline args override those in the file.' )", "self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for k, v in", "= raw['services'] if 'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability", "conf = Conf() conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug", "the file.' ) parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan means to create template", "= named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider", "raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network']", "Debug ports for remote debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as", "gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp gcp_services = raw['services'] if 'dataproc' in gcp_services:", "conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch", "= named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo =", "dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type", "create template instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument(", "them e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment environment", "help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str,", "'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args =", "def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str,", "arguments.\\nCommandline args override those in the file.' ) parser.add_argument( '-pl', '--plan', type=bool, default=False,", "instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e', '--env',", "= raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone =", "= raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp gcp_services = raw['services'] if 'dataproc'", "named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if", "logging from wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util import setup_logging class Conf: def", "provider will only mean something if not local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool,", "Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full path to config file with all arguments.\\nCommandline", "logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items() for k, v in config_items: if v", "deploy them e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment", "= [] def attr_list(self, should_print=False): items = self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute:", "conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev", "def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone =", "namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env =", "'-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...' )", "help='plan means to create template instances/files but not deploy them e.g. conf.yml.tmpl =>", "%p' ) with open(cmd_args.conf_file, 'r') as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not", "= raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args):", "named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables", "=> conf.yml.' ) parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment environment local means dev", "e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment environment local", "= raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args): if cmd_args.conf_file is None: dir_path =", "dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s", "in the file.' ) parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan means to create", "ports for remote debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in", "raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp gcp_services = raw['services']", "k, v in config_items: if v is not None: conf_args[k] = v named_tuple", "conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env", "with all arguments.\\nCommandline args override those in the file.' ) parser.add_argument( '-pl', '--plan',", "__init__(self): self.template_ignore_dirs = [] def attr_list(self, should_print=False): items = self.__dict__.items() if should_print: logging.debug(\"Conf", "yaml from collections import namedtuple import logging from wielder.util.arguer import LogLevel, convert_log_level from", "def __init__(self): self.template_ignore_dirs = [] def attr_list(self, should_print=False): items = self.__dict__.items() if should_print:", "conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit", "%I:%M:%S %p' ) with open(cmd_args.conf_file, 'r') as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if", "named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True)", "named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider ==", "but not deploy them e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e', '--env', type=str,", "return parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project = raw['project']", "debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python logging', default=LogLevel.INFO", "def attr_list(self, should_print=False): items = self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value:", "for k, v in config_items: if v is not None: conf_args[k] = v", "= named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables =", "raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size']", "dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type", "conf_args['plan'] = False logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items() for k, v in", "'--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python logging', default=LogLevel.INFO ) return parser def", "with open(cmd_args.conf_file, 'r') as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'):", "dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args): if cmd_args.conf_file is None: dir_path", "'plan'): conf_args['plan'] = False logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items() for k, v", "Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone']", "choices=list(LogLevel), help='LogLevel: as in Python logging', default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf): raw", "raw = conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc", "override those in the file.' ) parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan means", "not deploy them e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e', '--env', type=str, default='qe',", "Conf: def __init__(self): self.template_ignore_dirs = [] def attr_list(self, should_print=False): items = self.__dict__.items() if", "'Data Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full path to config file", "gcp_services = raw['services'] if 'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc = Conf()", "v is not None: conf_args[k] = v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf =", "not hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items() for", "'-edb', '--enable_debug', type=bool, help='Enabling Debug ports for remote debugging:' ) parser.add_argument( '-ll', '--log_level',", "conf.raw_config_args = conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__", "python __author__ = '<NAME>' import os import argparse import yaml from collections import", "None: conf_args[k] = v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan =", "conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args if", "gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone", "= named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables =", "v in items] return items def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive", "'-e', '--env', type=str, default='qe', help='Deployment environment local means dev refers to git branches", "= named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf)", "'--runtime_env', type=str, default='local-docker', help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...' ) parser.add_argument(", "datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file, 'r') as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader)", "file.' ) parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan means to create template instances/files", "conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args,", "gcp.region = raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network", "__author__ = '<NAME>' import os import argparse import yaml from collections import namedtuple", "help='Deployment environment local means dev refers to git branches ...' ) parser.add_argument( '-re',", "= named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args =", "type=bool, help='Enabling Debug ports for remote debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel),", "dataproc def process_args(cmd_args): if cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path", "cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level =", "conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo", "'r') as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan'] =", "'--plan', type=bool, default=False, help='plan means to create template instances/files but not deploy them", "as in Python logging', default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp']", "raw['services'] if 'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability =", "conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider", "template instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.' ) parser.add_argument( '-e',", "'-pl', '--plan', type=bool, default=False, help='plan means to create template instances/files but not deploy", "as yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan'] = False", "if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for k, v in items]", "= gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region =", "logging', default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp = Conf()", "'<NAME>' import os import argparse import yaml from collections import namedtuple import logging", "from wielder.util.log_util import setup_logging class Conf: def __init__(self): self.template_ignore_dirs = [] def attr_list(self,", "parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python logging', default=LogLevel.INFO ) return", "from wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util import setup_logging class Conf: def __init__(self):", "local means dev refers to git branches ...' ) parser.add_argument( '-re', '--runtime_env', type=str,", "raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork']", "if cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level", "conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy", "named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch", "gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider", "gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp gcp_services = raw['services'] if", "log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with", "refers to git branches ...' ) parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment", "raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc", "'/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' )", "Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full path to config file with all", "dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def", "= cmd_args.__dict__.items() for k, v in config_items: if v is not None: conf_args[k]", "type=str, default='qe', help='Deployment environment local means dev refers to git branches ...' )", "None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig(", "= raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type =", "== \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args() _conf = process_args(datalake_args) logging.debug('break point') logging.info(f\"datalake_args:\\n{datalake_args}\\n\")", "environment eg local-docker, local, gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp',", "k, v in items] return items def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration", "conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider == 'gcp':", "get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full", "= raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only =", "to create template instances/files but not deploy them e.g. conf.yml.tmpl => conf.yml.' )", "collections import namedtuple import logging from wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util import", "dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S", "dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes", "setup_logging class Conf: def __init__(self): self.template_ignore_dirs = [] def attr_list(self, should_print=False): items =", "gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider will", "default=False, help='plan means to create template instances/files but not deploy them e.g. conf.yml.tmpl", "local-docker, local, gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'],", ":%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file, 'r') as yaml_file: conf_args =", "raw['subnetwork'] conf.gcp = gcp gcp_services = raw['services'] if 'dataproc' in gcp_services: raw_dataproc =", "items def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file',", "help='Full path to config file with all arguments.\\nCommandline args override those in the", "mean something if not local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug ports", "yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration File Arguments:') config_items", "= self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for k, v", "conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables", "'--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud provider will only mean something if not", "False logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items() for k, v in config_items: if", "= '<NAME>' import os import argparse import yaml from collections import namedtuple import", "'-cf', '--conf_file', type=str, help='Full path to config file with all arguments.\\nCommandline args override", "gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp gcp_services", "named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo", "= argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full path to", "not None: conf_args[k] = v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan", "'dataproc' in gcp_services: raw_dataproc = gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags", "raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc = dataproc def process_args(cmd_args): if cmd_args.conf_file is None:", "process_args(cmd_args): if cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml'", "import os import argparse import yaml from collections import namedtuple import logging from", "dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only", "gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork", "if v is not None: conf_args[k] = v named_tuple = namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf", "dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone", "{k} value: {v}\") for k, v in items] return items def get_datalake_parser(): parser", "config file with all arguments.\\nCommandline args override those in the file.' ) parser.add_argument(", "= yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration File Arguments:')", "convert_log_level from wielder.util.log_util import setup_logging class Conf: def __init__(self): self.template_ignore_dirs = [] def", "in items] return items def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.')", "for k, v in items] return items def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data", "= named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs =", "conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args if conf.cloud_provider == 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return", "from collections import namedtuple import logging from wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util", "type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python logging', default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf):", "should_print=False): items = self.__dict__.items() if should_print: logging.debug(\"Conf items:\\n______\\n\") [logging.debug(f\"attribute: {k} value: {v}\") for", "= named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit =", "logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file, 'r') as", "named_tuple.template_variables conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit", "to git branches ...' ) parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment eg", "#!/usr/bin/env python __author__ = '<NAME>' import os import argparse import yaml from collections", "Python logging', default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp =", "= raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network =", "'--conf_file', type=str, help='Full path to config file with all arguments.\\nCommandline args override those", "local, gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider', type=str, choices=['gcp', 'aws', 'azure'], help='Cloud", "type=str, help='Full path to config file with all arguments.\\nCommandline args override those in", "conf.attr_list(True) return conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args() _conf", "= raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size =", "return items def get_datalake_parser(): parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument( '-cf',", "named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs conf.template_variables = named_tuple.template_variables conf.script_variables = named_tuple.script_variables", "those in the file.' ) parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan means to", "if not hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items()", "environment local means dev refers to git branches ...' ) parser.add_argument( '-re', '--runtime_env',", ") parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc", "parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug ports for remote debugging:' ) parser.add_argument( '-ll',", "default='local-docker', help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr', '--cloud_provider',", "argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full path to config", "gcp = Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region", "named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args = conf_args", "yaml_file: conf_args = yaml.load(yaml_file, Loader=yaml.FullLoader) if not hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration", "all arguments.\\nCommandline args override those in the file.' ) parser.add_argument( '-pl', '--plan', type=bool,", "if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args() _conf = process_args(datalake_args) logging.debug('break", "dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes'] conf.gcp.dataproc", "%(levelname)s :%(message)s', level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file, 'r') as yaml_file: conf_args", "namedtuple import logging from wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util import setup_logging class", "Conf() conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug", "conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs", "in config_items: if v is not None: conf_args[k] = v named_tuple = namedtuple(\"Conf1\",", "= namedtuple(\"Conf1\", conf_args.keys())(*conf_args.values()) conf = Conf() conf.plan = named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env", "file with all arguments.\\nCommandline args override those in the file.' ) parser.add_argument( '-pl',", "default=LogLevel.INFO ) return parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project", "named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider = named_tuple.cloud_provider conf.template_ignore_dirs = named_tuple.template_ignore_dirs", "means dev refers to git branches ...' ) parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker',", "\"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args = get_datalake_parser().parse_known_args() _conf = process_args(datalake_args) logging.debug('break point') logging.info(f\"datalake_args:\\n{datalake_args}\\n\") logging.info(f\"other_args:\\n{other_args}\")", "Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone'] gcp.is_shared_vpc = raw['is_shared_vpc'] gcp.region = raw['region']", "'--env', type=str, default='qe', help='Deployment environment local means dev refers to git branches ...'", "for remote debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python", "is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level)", "conf.gcp.dataproc = dataproc def process_args(cmd_args): if cmd_args.conf_file is None: dir_path = os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file", "dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size", "help='Cloud provider will only mean something if not local:' ) parser.add_argument( '-edb', '--enable_debug',", "hasattr(conf_args, 'plan'): conf_args['plan'] = False logging.debug('Configuration File Arguments:') config_items = cmd_args.__dict__.items() for k,", "import namedtuple import logging from wielder.util.arguer import LogLevel, convert_log_level from wielder.util.log_util import setup_logging", "if not local:' ) parser.add_argument( '-edb', '--enable_debug', type=bool, help='Enabling Debug ports for remote", "conf.script_variables = named_tuple.script_variables conf.git_super_repo = named_tuple.git_super_repo conf.git_branch = named_tuple.git_branch conf.git_commit = named_tuple.git_commit conf.raw_config_args", "'aws', 'azure'], help='Cloud provider will only mean something if not local:' ) parser.add_argument(", "type=str, default='local-docker', help='Runtime environment eg local-docker, local, gcp, gcp-shared-vpc etc...' ) parser.add_argument( '-cpr',", "path to config file with all arguments.\\nCommandline args override those in the file.'", "os.path.dirname(os.path.realpath(__file__)) cmd_args.conf_file = dir_path + '/data_conf.yaml' log_level = convert_log_level(cmd_args.log_level) logging.basicConfig( format='%(asctime)s %(levelname)s :%(message)s',", "extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone = raw['image_repo_zone']", "= raw['is_shared_vpc'] gcp.region = raw['region'] gcp.zone = raw['zone'] gcp.image_repo_zone = raw['image_repo_zone'] gcp.service_accounts =", "os import argparse import yaml from collections import namedtuple import logging from wielder.util.arguer", "parser = argparse.ArgumentParser(description= 'Data Orchestration Reactive Framework.') parser.add_argument( '-cf', '--conf_file', type=str, help='Full path", "...' ) parser.add_argument( '-re', '--runtime_env', type=str, default='local-docker', help='Runtime environment eg local-docker, local, gcp,", "gcp_services['dataproc'] dataproc = Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region']", "argparse import yaml from collections import namedtuple import logging from wielder.util.arguer import LogLevel,", "raw_dataproc['master_machine_type'] dataproc.worker_machine_type = raw_dataproc['worker_machine_type'] dataproc.master_boot_disk_size = raw_dataproc['master_boot_disk_size'] dataproc.worker_boot_disk_size = raw_dataproc['worker_boot_disk_size'] dataproc.num_worker_nodes = raw_dataproc['num_worker_nodes']", "level=log_level, datefmt='%m/%d/%Y %I:%M:%S %p' ) with open(cmd_args.conf_file, 'r') as yaml_file: conf_args = yaml.load(yaml_file,", "== 'gcp': extract_gcp_to_conf(conf) conf.attr_list(True) return conf if __name__ == \"__main__\": setup_logging(log_level=logging.DEBUG) datalake_args, other_args", "parser def extract_gcp_to_conf(conf): raw = conf.raw_config_args['gcp'] gcp = Conf() gcp.gcp_project = raw['project'] gcp.gcp_image_repo_zone", "parser.add_argument( '-e', '--env', type=str, default='qe', help='Deployment environment local means dev refers to git", "raw['image_repo_zone'] gcp.service_accounts = raw['service_accounts'] gcp.network = raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp", "= named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev conf.deploy_strategy = named_tuple.deploy_strategy conf.supported_deploy_envs = named_tuple.supported_deploy_envs conf.cloud_provider =", "args override those in the file.' ) parser.add_argument( '-pl', '--plan', type=bool, default=False, help='plan", "raw['network'] gcp.subnetwork = raw['subnetwork'] conf.gcp = gcp gcp_services = raw['services'] if 'dataproc' in", "named_tuple.plan conf.conf_file = named_tuple.conf_file conf.deploy_env = named_tuple.deploy_env conf.enable_debug = named_tuple.enable_debug conf.enable_dev = named_tuple.enable_dev", "parser.add_argument( '-cf', '--conf_file', type=str, help='Full path to config file with all arguments.\\nCommandline args", "raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone = raw_dataproc['zone'] dataproc.internal_ip_only = raw_dataproc['internal_ip_only'] dataproc.master_machine_type = raw_dataproc['master_machine_type']", "remote debugging:' ) parser.add_argument( '-ll', '--log_level', type=LogLevel, choices=list(LogLevel), help='LogLevel: as in Python logging',", "= Conf() dataproc.high_availability = raw_dataproc['high_availability'] dataproc.extra_tags = raw_dataproc['extra_tags'] dataproc.region = raw_dataproc['region'] dataproc.zone =" ]
[ "arguments in a # differing order cache and return differing values. assert (", "# ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test errors, avoid", "to memoize a callable accepting one or more # variadic positional parameters fails", "# these parameters to exercise this decorator's conditional caching of # exceptions. if", "variadic positional parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args):", "of # exceptions. if len(lies) == 6: raise ValueError(lies) # Else, return a", "lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that memoizing a call expected to", "imports. from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that attempting", "an exception whose value depends on # these parameters to exercise this decorator's", "returns the same value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies))", "raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info is exception_next_info # Assert that", "call reraises the same exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust) assert", "and returns the same value. assert ( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted,", "'in', 'history',) twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies = ('You', 'may',", "bitter=bitter)) # Assert that passing one or more unhashable parameters to this callable", "'like', 'suns',), ('With the certainty of tides',), ) == ( 'Just', 'like', 'moons',", "exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info is exception_next_info # Assert that memoizing two", "values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert", "a callable accepting one or more # variadic keyword parameters fails with the", "value. assert ( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted, lies)) # Assert that", "dust) # Assert that repeating that call reraises the same exception. with raises(ValueError)", "Assert that repeating that call reraises the same exception. with raises(ValueError) as exception_next_info:", "arguments in the # same order cache and return the same value. assert", "import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that attempting to memoize a", "keyword parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def my_soulful_cries(**kwargs): return", "that memoizing two calls passed the same keyword arguments in the # same", "import raises # ....................{ TESTS }.................... # Prevent pytest from capturing and displaying", "for further details. ''' **Beartype callable caching utility unit tests.** This submodule unit", "and return the same value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted,", "from capturing and displaying all expected non-fatal # beartype-specific warnings emitted by the", "on these parameters to exercise this # decorator's conditional caching of return values.", "twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies = ('You', 'may', 'trod,', 'me,',", "the certainty of tides', ) def test_callable_cached_fail() -> None: ''' Test unsuccessful usage", "package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import", "caching of # exceptions. if len(lies) == 6: raise ValueError(lies) # Else, return", "'like', 'suns', 'With the certainty of tides', ) def test_callable_cached_fail() -> None: '''", "accepting one or more # variadic keyword parameters fails with the expected exception.", "''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test errors,", "certainty of tides',), ) == ( 'Just', 'like', 'moons', 'and', 'like', 'suns', 'With", "and returns the same value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted,", "value depends on # these parameters to exercise this decorator's conditional caching of", "tides', ) def test_callable_cached_fail() -> None: ''' Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached`", "if len(lies) == 6: raise ValueError(lies) # Else, return a value depending on", "that attempting to memoize a callable accepting one or more # variadic positional", "_BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from pytest import raises # ....................{ TESTS }....................", "== ( 'Just', 'like', 'moons', 'and', 'like', 'suns', 'With the certainty of tides',", "''' **Beartype callable caching utility unit tests.** This submodule unit tests the public", "variadic keyword parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def my_soulful_cries(**kwargs):", "to exercise this # decorator's conditional caching of return values. return bitter +", "lies = ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',) dust = ('But',", "positional arguments # caches and returns the same value. assert ( still_i_rise(bitter, twisted,", "<gh_stars>1000+ #!/usr/bin/env python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors.", "parameters to exercise this # decorator's conditional caching of return values. return bitter", "If an arbitrary condition, raise an exception whose value depends on # these", "arguments in the same order caches and returns the same value. assert (", "return a value depending on these parameters to exercise this # decorator's conditional", ":func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc", "test errors, avoid importing from # package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from", "import ignore_warnings from pytest import raises # ....................{ TESTS }.................... # Prevent pytest", "ValueError(lies) # Else, return a value depending on these parameters to exercise this", "caches and returns the same value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter,", "test_callable_cached_fail() -> None: ''' Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' #", "'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',) dust = ('But', 'still,', 'like', 'dust,',", "positional and keyword # arguments in the same order caches and returns the", "from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from pytest import raises #", "'your', 'bitter,', 'twisted,', 'lies.',) lies = ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very',", "raise an exception does so. with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust) #", "# Assert that memoizing two calls passed the same keyword arguments in the", "or more # variadic positional parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException):", "return args # Assert that attempting to memoize a callable accepting one or", "# Callable memoized by this decorator. @callable_cached def still_i_rise(bitter, twisted, lies): # If", "args # Assert that attempting to memoize a callable accepting one or more", "this callable # succeeds with the expected return value. assert still_i_rise( ('Just', 'like',", ":mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable", "of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached", "as parameters below. bitter = ('You', 'may', 'write', 'me', 'down', 'in', 'history',) twisted", "this decorator. @callable_cached def still_i_rise(bitter, twisted, lies): # If an arbitrary condition, raise", "passed the same positional arguments # caches and returns the same value. assert", "beartype_test.util.mark.pytmark import ignore_warnings from pytest import raises # ....................{ TESTS }.................... # Prevent", "'moons',), ('and', 'like', 'suns',), ('With the certainty of tides',), ) == ( 'Just',", "twisted, dust) assert exception_first_info is exception_next_info # Assert that memoizing two calls passed", "To raise human-readable test errors, avoid importing from # package-specific submodules at module", "# beartype-specific warnings emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None:", "same keyword arguments in the # same order cache and return the same", "tests.** This submodule unit tests the public API of the private :mod:`beartype._util.cache.utilcachecall` submodule.", "twisted=twisted, lies=lies)) # Assert that memoizing a call expected to raise an exception", "return the same value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies))", "submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test", "in the # same order cache and return the same value. assert (", "on # these parameters to exercise this decorator's conditional caching of # exceptions.", "the same positional and keyword # arguments in the same order caches and", "that memoizing two calls passed the same positional arguments # caches and returns", "# --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\"", "lies=lies)) # Assert that memoizing two calls passed the same keyword arguments in", "a call expected to raise an exception does so. with raises(ValueError) as exception_first_info:", ") def test_callable_cached_fail() -> None: ''' Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator.", "return differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter))", "ignore_warnings from pytest import raises # ....................{ TESTS }.................... # Prevent pytest from", "Assert that passing one or more unhashable parameters to this callable # succeeds", "return bitter + twisted + lies # Objects to be passed as parameters", "private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise", "# ....................{ TESTS }.................... # Prevent pytest from capturing and displaying all expected", "import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from pytest import raises # ....................{ TESTS", "twisted, lies): # If an arbitrary condition, raise an exception whose value depends", "# same order cache and return the same value. assert ( still_i_rise(bitter=bitter, twisted=twisted,", "submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings", "--------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\" for", "still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that passing one", "\"LICENSE\" for further details. ''' **Beartype callable caching utility unit tests.** This submodule", "''' Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports.", "passed as parameters below. bitter = ('You', 'may', 'write', 'me', 'down', 'in', 'history',)", "is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that passing one or more unhashable", "''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException", "twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that memoizing two calls passed", "capturing and displaying all expected non-fatal # beartype-specific warnings emitted by the @callable_cached", "def test_callable_cached_pass() -> None: ''' Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. '''", "is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that memoizing a call expected to raise", "# Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached # Callable memoized by this", "that memoizing a call expected to raise an exception does so. with raises(ValueError)", "caching utility unit tests.** This submodule unit tests the public API of the", "with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust) # Assert that repeating that call", "a callable accepting one or more # variadic positional parameters fails with the", "# variadic keyword parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def", "callable accepting one or more # variadic keyword parameters fails with the expected", "the same value. assert ( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted, lies)) #", "the same keyword arguments in a # differing order cache and return differing", "tests the public API of the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS", "( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that memoizing two", "cache and return the same value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter,", "== 6: raise ValueError(lies) # Else, return a value depending on these parameters", "'rise',) # Assert that memoizing two calls passed the same positional arguments #", "memoizing two calls passed the same positional and keyword # arguments in the", "still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that memoizing two calls passed the same keyword", "all expected non-fatal # beartype-specific warnings emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def", "( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that memoizing a", "still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that memoizing two calls", "of tides',), ) == ( 'Just', 'like', 'moons', 'and', 'like', 'suns', 'With the", "Else, return a value depending on these parameters to exercise this # decorator's", "2014-2021 Beartype authors. # See \"LICENSE\" for further details. ''' **Beartype callable caching", "'suns',), ('With the certainty of tides',), ) == ( 'Just', 'like', 'moons', 'and',", "Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert", "from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that attempting to memoize a callable accepting", "# differing order cache and return differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies)", "of return values. return bitter + twisted + lies # Objects to be", "'twisted,', 'lies.',) lies = ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',) dust", "as exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info is exception_next_info # Assert that memoizing", "emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: ''' Test successful", "assert exception_first_info is exception_next_info # Assert that memoizing two calls passed the same", "module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from pytest", "same positional arguments # caches and returns the same value. assert ( still_i_rise(bitter,", "6: raise ValueError(lies) # Else, return a value depending on these parameters to", "arbitrary condition, raise an exception whose value depends on # these parameters to", "....................{ TESTS }.................... # Prevent pytest from capturing and displaying all expected non-fatal", "# Else, return a value depending on these parameters to exercise this #", "differing order cache and return differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is", "decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: ''' Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached`", "human-readable test errors, avoid importing from # package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", "an arbitrary condition, raise an exception whose value depends on # these parameters", "lies)) # Assert that memoizing two calls passed the same positional and keyword", "#!/usr/bin/env python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. #", "memoizing two calls passed the same positional arguments # caches and returns the", "+ twisted + lies # Objects to be passed as parameters below. bitter", "# Objects to be passed as parameters below. bitter = ('You', 'may', 'write',", "still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that memoizing a call", "conditional caching of return values. return bitter + twisted + lies # Objects", "that attempting to memoize a callable accepting one or more # variadic keyword", "# Assert that attempting to memoize a callable accepting one or more #", "expected return value. assert still_i_rise( ('Just', 'like', 'moons',), ('and', 'like', 'suns',), ('With the", "TESTS }.................... # Prevent pytest from capturing and displaying all expected non-fatal #", "keyword # arguments in the same order caches and returns the same value.", "'Just', 'like', 'moons', 'and', 'like', 'suns', 'With the certainty of tides', ) def", "arguments # caches and returns the same value. assert ( still_i_rise(bitter, twisted, lies)", "'dust,', \"I'll\", 'rise',) # Assert that memoizing two calls passed the same positional", "a value depending on these parameters to exercise this # decorator's conditional caching", "caching of return values. return bitter + twisted + lies # Objects to", "\"I'll\", 'rise',) # Assert that memoizing two calls passed the same positional arguments", "succeeds with the expected return value. assert still_i_rise( ('Just', 'like', 'moons',), ('and', 'like',", "unit tests the public API of the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{", "parameters below. bitter = ('You', 'may', 'write', 'me', 'down', 'in', 'history',) twisted =", "'trod,', 'me,', 'in', 'the', 'very', 'dirt',) dust = ('But', 'still,', 'like', 'dust,', \"I'll\",", "imports. from beartype._util.cache.utilcachecall import callable_cached # Callable memoized by this decorator. @callable_cached def", "in the same order caches and returns the same value. assert ( still_i_rise(bitter,", "whose value depends on # these parameters to exercise this decorator's conditional caching", "that repeating that call reraises the same exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter,", "depending on these parameters to exercise this # decorator's conditional caching of return", "python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See", "('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',) dust = ('But', 'still,', 'like',", "decorator's conditional caching of # exceptions. if len(lies) == 6: raise ValueError(lies) #", "'very', 'dirt',) dust = ('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',) # Assert that", "that memoizing two calls passed the same keyword arguments in a # differing", "not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that passing one or more unhashable parameters", "'moons', 'and', 'like', 'suns', 'With the certainty of tides', ) def test_callable_cached_fail() ->", "beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from pytest import raises # ....................{", "to this callable # succeeds with the expected return value. assert still_i_rise( ('Just',", ":func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached # Callable", "the public API of the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS }....................", "at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from", "def test_callable_cached_fail() -> None: ''' Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. '''", "value depending on these parameters to exercise this # decorator's conditional caching of", "'history',) twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies = ('You', 'may', 'trod,',", "'may', 'write', 'me', 'down', 'in', 'history',) twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',)", "twisted, dust) # Assert that repeating that call reraises the same exception. with", "Callable memoized by this decorator. @callable_cached def still_i_rise(bitter, twisted, lies): # If an", "still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that memoizing a call expected to raise an", "exception_first_info: still_i_rise(bitter, twisted, dust) # Assert that repeating that call reraises the same", "Assert that memoizing two calls passed the same positional and keyword # arguments", "unit tests.** This submodule unit tests the public API of the private :mod:`beartype._util.cache.utilcachecall`", "and displaying all expected non-fatal # beartype-specific warnings emitted by the @callable_cached decorator.", "Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\" for further details. ''' **Beartype", "raises # ....................{ TESTS }.................... # Prevent pytest from capturing and displaying all", "passed the same keyword arguments in the # same order cache and return", ")-------------------- # Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\" for further details.", "('With the certainty of tides',), ) == ( 'Just', 'like', 'moons', 'and', 'like',", "memoize a callable accepting one or more # variadic keyword parameters fails with", "exception whose value depends on # these parameters to exercise this decorator's conditional", "is still_i_rise(bitter, twisted, lies)) # Assert that memoizing two calls passed the same", "to be passed as parameters below. bitter = ('You', 'may', 'write', 'me', 'down',", "exception_next_info # Assert that memoizing two calls passed the same keyword arguments in", "or more # variadic keyword parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException):", "= ('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies = ('You', 'may', 'trod,', 'me,', 'in',", "( 'Just', 'like', 'moons', 'and', 'like', 'suns', 'With the certainty of tides', )", "IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test errors, avoid importing from", "dust) assert exception_first_info is exception_next_info # Assert that memoizing two calls passed the", "displaying all expected non-fatal # beartype-specific warnings emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning)", "the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: ''' Test successful usage of", "twisted, lies) is still_i_rise(bitter, twisted, lies)) # Assert that memoizing two calls passed", "= ('You', 'may', 'write', 'me', 'down', 'in', 'history',) twisted = ('With', 'your', 'bitter,',", "warnings emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: ''' Test", "# exceptions. if len(lies) == 6: raise ValueError(lies) # Else, return a value", "assert still_i_rise( ('Just', 'like', 'moons',), ('and', 'like', 'suns',), ('With the certainty of tides',),", "memoize a callable accepting one or more # variadic positional parameters fails with", "twisted + lies # Objects to be passed as parameters below. bitter =", "so. with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust) # Assert that repeating that", "raise ValueError(lies) # Else, return a value depending on these parameters to exercise", "Assert that memoizing two calls passed the same keyword arguments in the #", "two calls passed the same keyword arguments in a # differing order cache", "('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies = ('You', 'may', 'trod,', 'me,', 'in', 'the',", "depends on # these parameters to exercise this decorator's conditional caching of #", "parameters to exercise this decorator's conditional caching of # exceptions. if len(lies) ==", "see_me_broken(*args): return args # Assert that attempting to memoize a callable accepting one", "positional parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return", "Assert that memoizing two calls passed the same keyword arguments in a #", "same keyword arguments in a # differing order cache and return differing values.", "callable accepting one or more # variadic positional parameters fails with the expected", "return values. return bitter + twisted + lies # Objects to be passed", "twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that passing one or", "with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args # Assert that attempting to memoize", "attempting to memoize a callable accepting one or more # variadic keyword parameters", "this # decorator's conditional caching of return values. return bitter + twisted +", "'bitter,', 'twisted,', 'lies.',) lies = ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',)", "value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that", "beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that attempting to memoize a callable accepting one", "# Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException #", "# WARNING: To raise human-readable test errors, avoid importing from # package-specific submodules", "( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that passing", "('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',) # Assert that memoizing two calls passed", "returns the same value. assert ( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted, lies))", "'lies.',) lies = ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',) dust =", "# Assert that memoizing two calls passed the same keyword arguments in a", "pytest from capturing and displaying all expected non-fatal # beartype-specific warnings emitted by", "Prevent pytest from capturing and displaying all expected non-fatal # beartype-specific warnings emitted", "twisted=twisted, lies=lies)) # Assert that memoizing two calls passed the same keyword arguments", "call expected to raise an exception does so. with raises(ValueError) as exception_first_info: still_i_rise(bitter,", "reraises the same exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info", "exercise this decorator's conditional caching of # exceptions. if len(lies) == 6: raise", "same positional and keyword # arguments in the same order caches and returns", ") == ( 'Just', 'like', 'moons', 'and', 'like', 'suns', 'With the certainty of", "submodule unit tests the public API of the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' #", "order caches and returns the same value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is", "cache and return differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted,", "decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import", "keyword arguments in a # differing order cache and return differing values. assert", "these parameters to exercise this decorator's conditional caching of # exceptions. if len(lies)", "....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test errors, avoid importing", "avoid importing from # package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import", "''' Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports.", "still_i_rise(bitter, twisted, lies): # If an arbitrary condition, raise an exception whose value", "return value. assert still_i_rise( ('Just', 'like', 'moons',), ('and', 'like', 'suns',), ('With the certainty", "@callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: ''' Test successful usage of the", "''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached # Callable memoized by", "lies # Objects to be passed as parameters below. bitter = ('You', 'may',", "# See \"LICENSE\" for further details. ''' **Beartype callable caching utility unit tests.**", "is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that memoizing two calls passed the same", "'down', 'in', 'history',) twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies = ('You',", "still_i_rise( ('Just', 'like', 'moons',), ('and', 'like', 'suns',), ('With the certainty of tides',), )", "'dirt',) dust = ('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',) # Assert that memoizing", "Beartype authors. # See \"LICENSE\" for further details. ''' **Beartype callable caching utility", "import callable_cached # Callable memoized by this decorator. @callable_cached def still_i_rise(bitter, twisted, lies):", "= ('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',) # Assert that memoizing two calls", "the expected return value. assert still_i_rise( ('Just', 'like', 'moons',), ('and', 'like', 'suns',), ('With", "Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached # Callable memoized by this decorator.", "from beartype_test.util.mark.pytmark import ignore_warnings from pytest import raises # ....................{ TESTS }.................... #", "that passing one or more unhashable parameters to this callable # succeeds with", "same value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert", "heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that", "fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args #", "in a # differing order cache and return differing values. assert ( still_i_rise(bitter=bitter,", "errors, avoid importing from # package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn", "utility unit tests.** This submodule unit tests the public API of the private", "-> None: ''' Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer", "raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args # Assert that attempting to memoize a", "with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info is exception_next_info # Assert", "same order cache and return the same value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies)", "exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args # Assert that attempting to", "callable_cached # Callable memoized by this decorator. @callable_cached def still_i_rise(bitter, twisted, lies): #", "exceptions. if len(lies) == 6: raise ValueError(lies) # Else, return a value depending", "conditional caching of # exceptions. if len(lies) == 6: raise ValueError(lies) # Else,", "keyword arguments in the # same order cache and return the same value.", "order cache and return the same value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is", "twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that memoizing a call expected", "'write', 'me', 'down', 'in', 'history',) twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies", "assert ( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted, lies)) # Assert that memoizing", "to exercise this decorator's conditional caching of # exceptions. if len(lies) == 6:", "beartype._util.cache.utilcachecall import callable_cached # Callable memoized by this decorator. @callable_cached def still_i_rise(bitter, twisted,", "two calls passed the same positional arguments # caches and returns the same", "accepting one or more # variadic positional parameters fails with the expected exception.", "the same value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) #", "'like', 'moons', 'and', 'like', 'suns', 'With the certainty of tides', ) def test_callable_cached_fail()", "'like', 'dust,', \"I'll\", 'rise',) # Assert that memoizing two calls passed the same", "callable # succeeds with the expected return value. assert still_i_rise( ('Just', 'like', 'moons',),", "-> None: ''' Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer", "this decorator's conditional caching of # exceptions. if len(lies) == 6: raise ValueError(lies)", "}.................... # Prevent pytest from capturing and displaying all expected non-fatal # beartype-specific", "# Assert that memoizing two calls passed the same positional arguments # caches", "('You', 'may', 'write', 'me', 'down', 'in', 'history',) twisted = ('With', 'your', 'bitter,', 'twisted,',", "Objects to be passed as parameters below. bitter = ('You', 'may', 'write', 'me',", "raise an exception whose value depends on # these parameters to exercise this", "decorator's conditional caching of return values. return bitter + twisted + lies #", "authors. # See \"LICENSE\" for further details. ''' **Beartype callable caching utility unit", "See \"LICENSE\" for further details. ''' **Beartype callable caching utility unit tests.** This", "len(lies) == 6: raise ValueError(lies) # Else, return a value depending on these", "expected non-fatal # beartype-specific warnings emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass()", "of tides', ) def test_callable_cached_fail() -> None: ''' Test unsuccessful usage of the", "'still,', 'like', 'dust,', \"I'll\", 'rise',) # Assert that memoizing two calls passed the", "by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: ''' Test successful usage", "calls passed the same positional arguments # caches and returns the same value.", "one or more # variadic positional parameters fails with the expected exception. with", "Assert that attempting to memoize a callable accepting one or more # variadic", "certainty of tides', ) def test_callable_cached_fail() -> None: ''' Test unsuccessful usage of", "these parameters to exercise this # decorator's conditional caching of return values. return", "decorator. @callable_cached def still_i_rise(bitter, twisted, lies): # If an arbitrary condition, raise an", "Assert that memoizing a call expected to raise an exception does so. with", "('Just', 'like', 'moons',), ('and', 'like', 'suns',), ('With the certainty of tides',), ) ==", "below. bitter = ('You', 'may', 'write', 'me', 'down', 'in', 'history',) twisted = ('With',", "of the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING:", "@ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: ''' Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator.", "decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached # Callable memoized", "that memoizing two calls passed the same positional and keyword # arguments in", "#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test errors, avoid importing from # package-specific", "unhashable parameters to this callable # succeeds with the expected return value. assert", "lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that memoizing two calls passed the", "None: ''' Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight", "public API of the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!", "the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached from", "more unhashable parameters to this callable # succeeds with the expected return value.", "# succeeds with the expected return value. assert still_i_rise( ('Just', 'like', 'moons',), ('and',", "# Assert that memoizing a call expected to raise an exception does so.", "parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def my_soulful_cries(**kwargs): return kwargs", "twisted, lies)) # Assert that memoizing two calls passed the same positional and", "bitter + twisted + lies # Objects to be passed as parameters below.", "from beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that attempting to", "parameters to this callable # succeeds with the expected return value. assert still_i_rise(", "lies) is still_i_rise(bitter, twisted, lies)) # Assert that memoizing two calls passed the", "caches and returns the same value. assert ( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter,", "the same keyword arguments in the # same order cache and return the", "pytest import raises # ....................{ TESTS }.................... # Prevent pytest from capturing and", "# Assert that passing one or more unhashable parameters to this callable #", "'With the certainty of tides', ) def test_callable_cached_fail() -> None: ''' Test unsuccessful", "with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args # Assert", "memoizing two calls passed the same keyword arguments in a # differing order", "condition, raise an exception whose value depends on # these parameters to exercise", "beartype._util.cache.utilcachecall import callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that attempting to memoize", "callable caching utility unit tests.** This submodule unit tests the public API of", "the same order caches and returns the same value. assert ( still_i_rise(bitter, twisted=twisted,", "def see_me_broken(*args): return args # Assert that attempting to memoize a callable accepting", "**Beartype callable caching utility unit tests.** This submodule unit tests the public API", "same value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert", "# variadic positional parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def", "raise human-readable test errors, avoid importing from # package-specific submodules at module scope.", "callable_cached from beartype.roar._roarexc import _BeartypeUtilCallableCachedException # Assert that attempting to memoize a callable", "non-fatal # beartype-specific warnings emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() ->", "lies=lies, bitter=bitter)) # Assert that passing one or more unhashable parameters to this", "one or more unhashable parameters to this callable # succeeds with the expected", "one or more # variadic keyword parameters fails with the expected exception. with", "exception does so. with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust) # Assert that", "an exception does so. with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust) # Assert", "'me', 'down', 'in', 'history',) twisted = ('With', 'your', 'bitter,', 'twisted,', 'lies.',) lies =", "values. return bitter + twisted + lies # Objects to be passed as", "same order caches and returns the same value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies)", "Assert that memoizing two calls passed the same positional arguments # caches and", "assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that memoizing", "# Assert that repeating that call reraises the same exception. with raises(ValueError) as", "# package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark", "# arguments in the same order caches and returns the same value. assert", "the certainty of tides',), ) == ( 'Just', 'like', 'moons', 'and', 'like', 'suns',", "the # same order cache and return the same value. assert ( still_i_rise(bitter=bitter,", "two calls passed the same keyword arguments in the # same order cache", "calls passed the same keyword arguments in a # differing order cache and", "exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info is exception_next_info #", "to raise an exception does so. with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust)", "('and', 'like', 'suns',), ('With the certainty of tides',), ) == ( 'Just', 'like',", "that call reraises the same exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust)", "memoizing two calls passed the same keyword arguments in the # same order", "@callable_cached def see_me_broken(*args): return args # Assert that attempting to memoize a callable", "lies): # If an arbitrary condition, raise an exception whose value depends on", "parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args", "_BeartypeUtilCallableCachedException # Assert that attempting to memoize a callable accepting one or more", "be passed as parameters below. bitter = ('You', 'may', 'write', 'me', 'down', 'in',", "importing from # package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning", "order cache and return differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not", "memoized by this decorator. @callable_cached def still_i_rise(bitter, twisted, lies): # If an arbitrary", "assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that", "memoizing a call expected to raise an exception does so. with raises(ValueError) as", "value. assert ( still_i_rise(bitter, twisted=twisted, lies=lies) is still_i_rise(bitter, twisted=twisted, lies=lies)) # Assert that", "from # package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from", "# If an arbitrary condition, raise an exception whose value depends on #", "# caches and returns the same value. assert ( still_i_rise(bitter, twisted, lies) is", "This submodule unit tests the public API of the private :mod:`beartype._util.cache.utilcachecall` submodule. '''", "'in', 'the', 'very', 'dirt',) dust = ('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',) #", "beartype-specific warnings emitted by the @callable_cached decorator. @ignore_warnings(_BeartypeUtilCallableCachedKwargsWarning) def test_callable_cached_pass() -> None: '''", "same value. assert ( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted, lies)) # Assert", "to memoize a callable accepting one or more # variadic keyword parameters fails", "= ('You', 'may', 'trod,', 'me,', 'in', 'the', 'very', 'dirt',) dust = ('But', 'still,',", "and keyword # arguments in the same order caches and returns the same", "more # variadic positional parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached", "or more unhashable parameters to this callable # succeeds with the expected return", "API of the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #", "the same positional arguments # caches and returns the same value. assert (", "still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted, lies)) # Assert that memoizing two calls", "Test unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from", "'and', 'like', 'suns', 'With the certainty of tides', ) def test_callable_cached_fail() -> None:", "@callable_cached def still_i_rise(bitter, twisted, lies): # If an arbitrary condition, raise an exception", "import _BeartypeUtilCallableCachedException # Assert that attempting to memoize a callable accepting one or", "the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args # Assert that", "attempting to memoize a callable accepting one or more # variadic positional parameters", "LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\" for further", "# Assert that memoizing two calls passed the same positional and keyword #", "#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from pytest import raises", "exception_first_info is exception_next_info # Assert that memoizing two calls passed the same keyword", "test_callable_cached_pass() -> None: ''' Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' #", "exercise this # decorator's conditional caching of return values. return bitter + twisted", "expected to raise an exception does so. with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted,", "with the expected return value. assert still_i_rise( ('Just', 'like', 'moons',), ('and', 'like', 'suns',),", "from pytest import raises # ....................{ TESTS }.................... # Prevent pytest from capturing", "value. assert still_i_rise( ('Just', 'like', 'moons',), ('and', 'like', 'suns',), ('With the certainty of", "# decorator's conditional caching of return values. return bitter + twisted + lies", "None: ''' Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight", "still_i_rise(bitter, twisted, lies)) # Assert that memoizing two calls passed the same positional", "unsuccessful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall", "calls passed the same positional and keyword # arguments in the same order", "(c) 2014-2021 Beartype authors. # See \"LICENSE\" for further details. ''' **Beartype callable", "usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import", "passed the same keyword arguments in a # differing order cache and return", "and return differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies,", "'like', 'moons',), ('and', 'like', 'suns',), ('With the certainty of tides',), ) == (", "( still_i_rise(bitter, twisted, lies) is still_i_rise(bitter, twisted, lies)) # Assert that memoizing two", "'suns', 'With the certainty of tides', ) def test_callable_cached_fail() -> None: ''' Test", "by this decorator. @callable_cached def still_i_rise(bitter, twisted, lies): # If an arbitrary condition,", "scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.roar._roarwarn import _BeartypeUtilCallableCachedKwargsWarning from beartype_test.util.mark.pytmark import ignore_warnings from pytest import", "def still_i_rise(bitter, twisted, lies): # If an arbitrary condition, raise an exception whose", "as exception_first_info: still_i_rise(bitter, twisted, dust) # Assert that repeating that call reraises the", "the same exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info is", "two calls passed the same positional and keyword # arguments in the same", "Test successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from", "'the', 'very', 'dirt',) dust = ('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',) # Assert", "tides',), ) == ( 'Just', 'like', 'moons', 'and', 'like', 'suns', 'With the certainty", "still_i_rise(bitter, twisted, dust) # Assert that repeating that call reraises the same exception.", "details. ''' **Beartype callable caching utility unit tests.** This submodule unit tests the", "still_i_rise(bitter, twisted, dust) assert exception_first_info is exception_next_info # Assert that memoizing two calls", "calls passed the same keyword arguments in the # same order cache and", "more # variadic keyword parameters fails with the expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached", "still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that passing one or more unhashable parameters to", "the private :mod:`beartype._util.cache.utilcachecall` submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To", "the same value. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) #", "further details. ''' **Beartype callable caching utility unit tests.** This submodule unit tests", "assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is still_i_rise(bitter=bitter, twisted=twisted, lies=lies)) # Assert that memoizing", "dust = ('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',) # Assert that memoizing two", "expected exception. with raises(_BeartypeUtilCallableCachedException): @callable_cached def see_me_broken(*args): return args # Assert that attempting", "# Copyright (c) 2014-2021 Beartype authors. # See \"LICENSE\" for further details. '''", "repeating that call reraises the same exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted,", "from beartype._util.cache.utilcachecall import callable_cached # Callable memoized by this decorator. @callable_cached def still_i_rise(bitter,", "successful usage of the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall", "}.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test errors, avoid importing from #", "lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) # Assert that passing one or more", "a # differing order cache and return differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted,", "bitter = ('You', 'may', 'write', 'me', 'down', 'in', 'history',) twisted = ('With', 'your',", "WARNING: To raise human-readable test errors, avoid importing from # package-specific submodules at", "same exception. with raises(ValueError) as exception_next_info: still_i_rise(bitter, twisted, dust) assert exception_first_info is exception_next_info", "does so. with raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust) # Assert that repeating", "passing one or more unhashable parameters to this callable # succeeds with the", "differing values. assert ( still_i_rise(bitter=bitter, twisted=twisted, lies=lies) is not still_i_rise(twisted=twisted, lies=lies, bitter=bitter)) #", "passed the same positional and keyword # arguments in the same order caches", "# Prevent pytest from capturing and displaying all expected non-fatal # beartype-specific warnings", "'me,', 'in', 'the', 'very', 'dirt',) dust = ('But', 'still,', 'like', 'dust,', \"I'll\", 'rise',)", "the :func:`beartype._util.cache.utilcachecall.callable_cached` decorator. ''' # Defer heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached #", "heavyweight imports. from beartype._util.cache.utilcachecall import callable_cached # Callable memoized by this decorator. @callable_cached", "lies=lies)) # Assert that memoizing a call expected to raise an exception does", "is exception_next_info # Assert that memoizing two calls passed the same keyword arguments", "raises(ValueError) as exception_first_info: still_i_rise(bitter, twisted, dust) # Assert that repeating that call reraises", "+ lies # Objects to be passed as parameters below. bitter = ('You'," ]
[ "particle.beached = 4 else: particle.beached = 1 else: particle.beached = 0 # Beaching", "please unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class", "0 sea, 1 beached, 2 after non-beach dyn, 3 after beach dyn, 4", "fieldset, time): lon, lat = (particle.lon, particle.lat) if lon > 65. or lon", "# Freeze particles that get out of bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon,", "+ v4) / 6. * particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted:", "v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3 =", "2 def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: (u1, v1) = fieldset.UV[time,", "time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() # Beaching dynamics from", "(u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3", "2*v3 + v4) / 6. * particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset,", "v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2 =", "== 3: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u) < 1e-14", "lat = (particle.lon, particle.lat) if lat < 65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle,", "please unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) #", "def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: if particle.beached == 0: (u1,", "= (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5", "lat3, lon3] particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6.", "lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) =", "particles that get out of bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat =", "* particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached == 0:", "unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1 beached,", "particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2,", "advecting particles in Parcels\"\"\" from parcels import (JITParticle, Variable) import numpy as np", "freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 70.: particle.inBounds", "fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon +", "particle.depth, lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3,", "65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat = (particle.lon, particle.lat)", "if particle.lon < -180.: particle.lon = particle.lon + 360. def northPolePushBack(particle, fieldset, time):", "== 4: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub *", "it is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: if", "beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): #", "+ .5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt,", "Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached :", "unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds : 1 yes, 0", "2*v3 + v4) / 6. * particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset,", "particle.lon < -180.: particle.lon = particle.lon + 360. def northPolePushBack(particle, fieldset, time): if", "particle.lon += ub * particle.dt particle.lat += vb * particle.dt particle.beached = 0", "+ u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3,", "after beach dyn, 4 please unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount =", "v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon += (u1", "+ .5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt,", "class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1", "1e-14: if particle.beached == 2: particle.beached = 4 else: particle.beached = 1 else:", "from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if particle.beached == 4: (ub, vb) =", "(ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub * particle.dt particle.lat", "fabs(v) < 1e-14: if particle.beached == 2: particle.beached = 4 else: particle.beached =", "particle.inBounds = 0 # Freeze particles that get out of bounds def freezeOutOfBoundsArctic(particle,", "# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1 beached, 2", "that get out of bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat = (particle.lon,", "particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if particle.beached ==", "non-beach dyn, 3 after beach dyn, 4 please unbeach beached = Variable('beached', dtype=np.int32,", "2: particle.beached = 4 else: particle.beached = 1 else: particle.beached = 0 #", "not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: if particle.beached ==", "fieldset, time): if particle.lat > 89.915: particle.lat = 89.915 # Freeze particles that", "- 360. if particle.lon < -180.: particle.lon = particle.lon + 360. def northPolePushBack(particle,", "(v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt def deleteParticle(particle,", "beached : 0 sea, 1 beached, 2 after non-beach dyn, 3 after beach", "if fabs(u) < 1e-14 and fabs(v) < 1e-14: if particle.beached == 2: particle.beached", "* particle.dt particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6.", "lat = (particle.lon, particle.lat) if lon > 65. or lon < -45. or", "3: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u) < 1e-14 and", "particle.beached = 1 else: particle.beached = 0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def", "particle.lat) if lat < 70.: particle.inBounds = 0 # Advection kernel. Checks first", "fieldset, time): if particle.beached == 2 or particle.beached == 3: (u, v) =", "2 def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached == 0: (u1, v1) = fieldset.UV[time,", "whether a particle is within bounds and whether it is not beached. def", "def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() #", "if lat < 70.: particle.inBounds = 0 # Advection kernel. Checks first whether", "89.915: particle.lat = 89.915 # Freeze particles that get out of bounds def", "Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1 beached, 2 after", "from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1 beached, 2 after non-beach dyn,", "-45. or lat > 85. or lat < 60.: particle.inBounds = 0 #", "2*v2 + 2*v3 + v4) / 6. * particle.dt particle.beached = 2 def", "if particle.inBounds == 1: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1", "unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1 beached,", "dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0", "initial=1.) # Kernels for circular boundary def wrapLon(particle, fieldset, time): if particle.lon >", "= fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u) < 1e-14 and fabs(v) < 1e-14:", "/ 6. * particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat}", "= Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds : 1", "== 1: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon", "= particle.lon - 360. if particle.lon < -180.: particle.lon = particle.lon + 360.", "1: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon +", "particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete()", "dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1 beached, 2 after non-beach", ": 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): #", "out of bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if", "Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds :", "lat = (particle.lon, particle.lat) if lat < 60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle,", "particle.beached == 2 or particle.beached == 3: (u, v) = fieldset.UV[time, particle.depth, particle.lat,", "UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: if particle.beached == 0: (u1, v1)", "particle.depth, particle.lat, particle.lon] particle.lon += ub * particle.dt particle.lat += vb * particle.dt", "+= (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt def", "fabs(u) < 1e-14 and fabs(v) < 1e-14: if particle.beached == 2: particle.beached =", "or lat > 85. or lat < 60.: particle.inBounds = 0 # Freeze", "lon, lat = (particle.lon, particle.lat) if lat < 70.: particle.inBounds = 0 #", "particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2)", "time): lon, lat = (particle.lon, particle.lat) if lat < 65.: particle.inBounds = 0", "= (particle.lon, particle.lat) if lon > 65. or lon < -45. or lat", "lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time", "freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 60.: particle.inBounds", "< -180.: particle.lon = particle.lon + 360. def northPolePushBack(particle, fieldset, time): if particle.lat", "u4) / 6. * particle.dt particle.lat += (v1 + 2*v2 + 2*v3 +", "= Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds : 1 yes, 0 no", "boundedParticle(JITParticle): # inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.)", "if particle.beached == 2 or particle.beached == 3: (u, v) = fieldset.UV[time, particle.depth,", "3 after beach dyn, 4 please unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount", "= 2 def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached == 0: (u1, v1) =", "deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle,", "particle.lat, particle.lon] particle.lon += ub * particle.dt particle.lat += vb * particle.dt particle.beached", "particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1,", "def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 70.:", "+= (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt particle.lat", "time): if particle.inBounds == 1: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1,", "lat < 70.: particle.inBounds = 0 # Advection kernel. Checks first whether a", "is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: if particle.beached", "u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth,", "bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lon >", "+ 360. def northPolePushBack(particle, fieldset, time): if particle.lat > 89.915: particle.lat = 89.915", "at {particle.time})\") particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if", "beach dyn, 4 please unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount',", "> 180.: particle.lon = particle.lon - 360. if particle.lon < -180.: particle.lon =", "v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat", "2*v2 + 2*v3 + v4) / 6. * particle.dt def deleteParticle(particle, fieldset, time):", "+ u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 * particle.dt,", "Parcels\"\"\" from parcels import (JITParticle, Variable) import numpy as np class unbeachableBoundedParticle(JITParticle): #", "+ u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 * particle.dt,", "Checks first whether a particle is within bounds and whether it is not", "1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for circular", "first whether a particle is within bounds and whether it is not beached.", "UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat,", "if lat < 60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat", "time): lon, lat = (particle.lon, particle.lat) if lon > 65. or lon <", "= (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5", "+ v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon +=", "time): if particle.beached == 2 or particle.beached == 3: (u, v) = fieldset.UV[time,", "dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds : 1 yes, 0 no inBounds =", "particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2 + 2*u3 + u4) /", "< 70.: particle.inBounds = 0 # Advection kernel. Checks first whether a particle", "(JITParticle, Variable) import numpy as np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts", "class unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1", "6. * particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached ==", "Advection kernel. Checks first whether a particle is within bounds and whether it", "def beachTesting(particle, fieldset, time): if particle.beached == 2 or particle.beached == 3: (u,", "= 4 else: particle.beached = 1 else: particle.beached = 0 # Beaching dynamics", "> 85. or lat < 60.: particle.inBounds = 0 # Freeze particles that", "whether it is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1:", "1e-14 and fabs(v) < 1e-14: if particle.beached == 2: particle.beached = 4 else:", "https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea, 1 beached, 2 after non-beach dyn, 3", "fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2 + 2*u3", "is within bounds and whether it is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time):", "dtype=np.int32, initial=1.) # Kernels for circular boundary def wrapLon(particle, fieldset, time): if particle.lon", "if particle.lon > 180.: particle.lon = particle.lon - 360. if particle.lon < -180.:", "time): if particle.lat > 89.915: particle.lat = 89.915 # Freeze particles that get", "# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if particle.beached == 4:", "time): if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1,", "if particle.beached == 2: particle.beached = 4 else: particle.beached = 1 else: particle.beached", "after non-beach dyn, 3 after beach dyn, 4 please unbeach beached = Variable('beached',", "particle.lon] particle.lon += ub * particle.dt particle.lat += vb * particle.dt particle.beached =", "1: if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1,", "get out of bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat = (particle.lon, particle.lat)", "# inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) #", "0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for circular boundary def", "0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon +", "= 0 # Advection kernel. Checks first whether a particle is within bounds", "if particle.lat > 89.915: particle.lat = 89.915 # Freeze particles that get out", "dtype=np.int32, initial=0.) # inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32,", "particle.lon = particle.lon + 360. def northPolePushBack(particle, fieldset, time): if particle.lat > 89.915:", "lat > 85. or lat < 60.: particle.inBounds = 0 # Freeze particles", "particles that get out of bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat =", "= fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub * particle.dt particle.lat += vb", "= fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon", "# Kernels for circular boundary def wrapLon(particle, fieldset, time): if particle.lon > 180.:", "= particle.lon + 360. def northPolePushBack(particle, fieldset, time): if particle.lat > 89.915: particle.lat", "unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds", "def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: (u1, v1) = fieldset.UV[time, particle.depth,", "< 65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat = (particle.lon,", ": 0 sea, 1 beached, 2 after non-beach dyn, 3 after beach dyn,", "= (particle.lon, particle.lat) if lat < 65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset,", "(particle.lon, particle.lat) if lat < 65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset, time):", "def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lon > 65.", "boundary def wrapLon(particle, fieldset, time): if particle.lon > 180.: particle.lon = particle.lon -", "(particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 *", "= 2 def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: (u1, v1) =", "lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4)", "dyn, 3 after beach dyn, 4 please unbeach beached = Variable('beached', dtype=np.int32, initial=0.)", "+ v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1]", "for advecting particles in Parcels\"\"\" from parcels import (JITParticle, Variable) import numpy as", "Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if particle.beached == 4: (ub,", "particle.beached == 3: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u) <", "lon, lat = (particle.lon, particle.lat) if lat < 65.: particle.inBounds = 0 def", "== 2 or particle.beached == 3: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]", "* particle.dt particle.lat += vb * particle.dt particle.beached = 0 particle.unbeachCount += 1", "particle.beached = 0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if", "sea, 1 beached, 2 after non-beach dyn, 3 after beach dyn, 4 please", "+= (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt particle.beached", "Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for circular boundary def wrapLon(particle, fieldset, time): if", "v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1] lon2,", "lat < 60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat =", "particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached == 0: (u1, v1)", "def wrapLon(particle, fieldset, time): if particle.lon > 180.: particle.lon = particle.lon - 360.", "lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time +", "time): lon, lat = (particle.lon, particle.lat) if lat < 70.: particle.inBounds = 0", "BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat,", "if particle.beached == 4: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon +=", "# inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class", "from parcels import (JITParticle, Variable) import numpy as np class unbeachableBoundedParticle(JITParticle): # Beaching", "yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for circular boundary", "6. * particle.dt particle.lat += (v1 + 2*v2 + 2*v3 + v4) /", "= (particle.lon, particle.lat) if lat < 70.: particle.inBounds = 0 # Advection kernel.", "as np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0", "inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle):", "Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds : 1 yes, 0 no inBounds", "if lat < 65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat", "2*u3 + u4) / 6. * particle.dt particle.lat += (v1 + 2*v2 +", "particle.depth, particle.lat, particle.lon] if fabs(u) < 1e-14 and fabs(v) < 1e-14: if particle.beached", "Variable) import numpy as np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts #", "particle.lon + 360. def northPolePushBack(particle, fieldset, time): if particle.lat > 89.915: particle.lat =", "= fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon", "fieldset, time): if particle.lon > 180.: particle.lon = particle.lon - 360. if particle.lon", "2*v3 + v4) / 6. * particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id}", "if lon > 65. or lon < -45. or lat > 85. or", "no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for circular boundary def wrapLon(particle,", ": 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for", "bounds and whether it is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds", "* particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat +", "(u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt particle.lat +=", "v4) / 6. * particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon},", "+ v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2]", "particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached == 0: (u1,", "0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if particle.beached ==", "lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time +", "for circular boundary def wrapLon(particle, fieldset, time): if particle.lon > 180.: particle.lon =", "1 beached, 2 after non-beach dyn, 3 after beach dyn, 4 please unbeach", "particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if", "particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 =", "(particle.lon, particle.lat) if lat < 70.: particle.inBounds = 0 # Advection kernel. Checks", "if particle.inBounds == 1: if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth,", "< 1e-14 and fabs(v) < 1e-14: if particle.beached == 2: particle.beached = 4", "< -45. or lat > 85. or lat < 60.: particle.inBounds = 0", "from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if particle.beached == 2 or particle.beached ==", "particle.inBounds == 1: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 =", "unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle):", "freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 65.: particle.inBounds", "particle.inBounds = 0 # Advection kernel. Checks first whether a particle is within", "< 60.: particle.inBounds = 0 # Freeze particles that get out of bounds", "65. or lon < -45. or lat > 85. or lat < 60.:", "fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon +", "(u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 +", "fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 60.: particle.inBounds =", "particle.lat = 89.915 # Freeze particles that get out of bounds def freezeOutOfBoundsWedge(particle,", ".5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat", "* particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat +", "def northPolePushBack(particle, fieldset, time): if particle.lat > 89.915: particle.lat = 89.915 # Freeze", "fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() # Beaching dynamics", "60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat = (particle.lon, particle.lat)", "if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1", "particle.lon > 180.: particle.lon = particle.lon - 360. if particle.lon < -180.: particle.lon", "of bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat", "= 0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if particle.beached", "dyn, 4 please unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32,", "2*u2 + 2*u3 + u4) / 6. * particle.dt particle.lat += (v1 +", "dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds : 1", "within bounds and whether it is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if", "0 # Advection kernel. Checks first whether a particle is within bounds and", "{particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def", "(u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u) < 1e-14 and fabs(v)", "lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time", "particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt)", "beached, 2 after non-beach dyn, 3 after beach dyn, 4 please unbeach beached", "particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if", "initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds : 1 yes,", "+ 2*v2 + 2*v3 + v4) / 6. * particle.dt def deleteParticle(particle, fieldset,", "Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if particle.beached == 2 or", "# beached : 0 sea, 1 beached, 2 after non-beach dyn, 3 after", "freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lon > 65. or", "Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds : 1 yes,", "def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 60.:", "particle.lat) if lat < 65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon,", "-180.: particle.lon = particle.lon + 360. def northPolePushBack(particle, fieldset, time): if particle.lat >", "particle.lat + v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2,", "1 else: particle.beached = 0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset,", "fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u) < 1e-14 and fabs(v) < 1e-14: if", "= 0 # Freeze particles that get out of bounds def freezeOutOfBoundsArctic(particle, fieldset,", "0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat <", "u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth,", "or lon < -45. or lat > 85. or lat < 60.: particle.inBounds", "0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics from", "else: particle.beached = 0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time):", "v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2", "out of bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if", "# Advection kernel. Checks first whether a particle is within bounds and whether", "Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds : 1 yes, 0 no inBounds = Variable('inBounds',", "{particle.time})\") particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if particle.beached", "import numpy as np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached", "class boundedParticle(JITParticle): # inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32,", "* particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\")", "Freeze particles that get out of bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat", "parcels import (JITParticle, Variable) import numpy as np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics", "lat < 60.: particle.inBounds = 0 # Freeze particles that get out of", "== 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon", "lon3] particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. *", "yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics", "a particle is within bounds and whether it is not beached. def UnbeachBoundedAdvectionRK4(particle,", "else: particle.beached = 1 else: particle.beached = 0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts", "= 0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat", "inBounds = Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for circular boundary def wrapLon(particle, fieldset,", "get out of bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat = (particle.lon, particle.lat)", "time): if particle.lon > 180.: particle.lon = particle.lon - 360. if particle.lon <", "+ u4) / 6. * particle.dt particle.lat += (v1 + 2*v2 + 2*v3", "initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea,", "circular boundary def wrapLon(particle, fieldset, time): if particle.lon > 180.: particle.lon = particle.lon", "(particle.lon, particle.lat) if lon > 65. or lon < -45. or lat >", "particle.lat, particle.lon] if fabs(u) < 1e-14 and fabs(v) < 1e-14: if particle.beached ==", "particle.dt particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. *", "2 or particle.beached == 3: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] if", "< 60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat = (particle.lon,", "4 please unbeach beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.)", "and fabs(v) < 1e-14: if particle.beached == 2: particle.beached = 4 else: particle.beached", "lon < -45. or lat > 85. or lat < 60.: particle.inBounds =", "# Freeze particles that get out of bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon,", "+ 2*v3 + v4) / 6. * particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle", "Freeze particles that get out of bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat", "particle is within bounds and whether it is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset,", "def unBeaching(particle, fieldset, time): if particle.beached == 4: (ub, vb) = fieldset.UVunbeach[time, particle.depth,", "= Variable('inBounds', dtype=np.int32, initial=1.) # Kernels for circular boundary def wrapLon(particle, fieldset, time):", "time): if particle.beached == 4: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon", "= 0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat", "dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if particle.beached == 2 or particle.beached", "fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 70.: particle.inBounds =", "({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset,", "deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() # Beaching", "180.: particle.lon = particle.lon - 360. if particle.lon < -180.: particle.lon = particle.lon", "1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching", "numpy as np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached :", "particle.lat) if lat < 60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset, time): lon,", "/ 6. * particle.dt particle.lat += (v1 + 2*v2 + 2*v3 + v4)", "= 1 else: particle.beached = 0 # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle,", "fieldset, time): if particle.inBounds == 1: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]", "4: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub * particle.dt", "+ 2*v3 + v4) / 6. * particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle,", "+= ub * particle.dt particle.lat += vb * particle.dt particle.beached = 0 particle.unbeachCount", "= Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) class boundedParticle(JITParticle): # inBounds", "dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds : 1 yes, 0", "particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) =", "{particle.lat} at {particle.time})\") particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time):", "particle.lon - 360. if particle.lon < -180.: particle.lon = particle.lon + 360. def", "60.: particle.inBounds = 0 # Freeze particles that get out of bounds def", "particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon", "= Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds : 1 yes, 0 no inBounds =", "beachTesting(particle, fieldset, time): if particle.beached == 2 or particle.beached == 3: (u, v)", "lat = (particle.lon, particle.lat) if lat < 70.: particle.inBounds = 0 # Advection", "v4) / 6. * particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset, time): if", "dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if particle.beached == 4: (ub, vb)", "lon, lat = (particle.lon, particle.lat) if lat < 60.: particle.inBounds = 0 def", "* particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1:", "/ 6. * particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds", "+ v4) / 6. * particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset, time):", "inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) # Kernels", "vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub * particle.dt particle.lat +=", "+ 2*u3 + u4) / 6. * particle.dt particle.lat += (v1 + 2*v2", "Kernels for circular boundary def wrapLon(particle, fieldset, time): if particle.lon > 180.: particle.lon", "ub * particle.dt particle.lat += vb * particle.dt particle.beached = 0 particle.unbeachCount +=", "+ v4) / 6. * particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset, time):", "lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) =", "2 after non-beach dyn, 3 after beach dyn, 4 please unbeach beached =", "fieldset, time): if particle.beached == 4: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon]", "particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: (u1, v1)", "0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat <", "initial=0.) class boundedParticle(JITParticle): # inBounds : 1 yes, 0 no inBounds = Variable('inBounds',", "\"\"\"Kernels for advecting particles in Parcels\"\"\" from parcels import (JITParticle, Variable) import numpy", "> 89.915: particle.lat = 89.915 # Freeze particles that get out of bounds", "inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts #", "of bounds def freezeOutOfBoundsWedge(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lon", "lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time +", "fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub * particle.dt particle.lat += vb *", "(particle.lon, particle.lat) if lat < 60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset, time):", "particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2 + 2*u3 + u4)", "(v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt particle.beached =", ".5 * particle.dt, particle.depth, lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat", "+ 2*v3 + v4) / 6. * particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle,", "= Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached", "unBeaching(particle, fieldset, time): if particle.beached == 4: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat,", "lat < 65.: particle.inBounds = 0 def freezeOutOfBoundsArctic70(particle, fieldset, time): lon, lat =", "particle.inBounds == 1: if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat,", "no inBounds = Variable('inBounds', dtype=np.int32, initial=1.) class unbeachableParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts", "in Parcels\"\"\" from parcels import (JITParticle, Variable) import numpy as np class unbeachableBoundedParticle(JITParticle):", "particle.lon = particle.lon - 360. if particle.lon < -180.: particle.lon = particle.lon +", "particle.dt, particle.depth, lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt)", "or particle.beached == 3: (u, v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u)", "6. * particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds ==", "import (JITParticle, Variable) import numpy as np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from", "0 # Freeze particles that get out of bounds def freezeOutOfBoundsArctic(particle, fieldset, time):", "+ 2*u2 + 2*u3 + u4) / 6. * particle.dt particle.lat += (v1", "# Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if particle.beached == 2", "4 else: particle.beached = 1 else: particle.beached = 0 # Beaching dynamics from", "particle.lon] if fabs(u) < 1e-14 and fabs(v) < 1e-14: if particle.beached == 2:", "v2*.5*particle.dt) (u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2] lon3,", "lon, lat = (particle.lon, particle.lat) if lon > 65. or lon < -45.", "lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time", "time): lon, lat = (particle.lon, particle.lat) if lat < 60.: particle.inBounds = 0", "beached = Variable('beached', dtype=np.int32, initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds :", "6. * particle.dt def deleteParticle(particle, fieldset, time): print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at", "= fieldset.UV[time + particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2 +", "and whether it is not beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds ==", "(u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1] lon2, lat2", "= (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt,", "np class unbeachableBoundedParticle(JITParticle): # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts # beached : 0 sea,", "v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] if fabs(u) < 1e-14 and fabs(v) <", "360. if particle.lon < -180.: particle.lon = particle.lon + 360. def northPolePushBack(particle, fieldset,", "(particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth,", "particle.depth, lat2, lon2] lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) (u4,", "/ 6. * particle.dt particle.beached = 2 def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached", "kernel. Checks first whether a particle is within bounds and whether it is", "== 2: particle.beached = 4 else: particle.beached = 1 else: particle.beached = 0", "fieldset, time): if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]", "wrapLon(particle, fieldset, time): if particle.lon > 180.: particle.lon = particle.lon - 360. if", "> 65. or lon < -45. or lat > 85. or lat <", "beached. def UnbeachBoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: if particle.beached == 0:", "particle.lon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt", "particle.lat) if lon > 65. or lon < -45. or lat > 85.", "(u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt,", "+ 2*v2 + 2*v3 + v4) / 6. * particle.dt particle.beached = 2", "particle.lat > 89.915: particle.lat = 89.915 # Freeze particles that get out of", "bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat <", "fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt)", "initial=0.) # inBounds : 1 yes, 0 no inBounds = Variable('inBounds', dtype=np.int32, initial=1.)", "def freezeOutOfBoundsArctic65(particle, fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 65.:", "== 1: if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth, particle.lat, particle.lon]", "particle.beached == 2: particle.beached = 4 else: particle.beached = 1 else: particle.beached =", "particle.beached == 4: (ub, vb) = fieldset.UVunbeach[time, particle.depth, particle.lat, particle.lon] particle.lon += ub", "https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def beachTesting(particle, fieldset, time): if particle.beached == 2 or particle.beached == 3:", "unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds : 1 yes, 0 no inBounds", "fieldset, time): if particle.inBounds == 1: if particle.beached == 0: (u1, v1) =", "89.915 # Freeze particles that get out of bounds def freezeOutOfBoundsWedge(particle, fieldset, time):", "time): if particle.inBounds == 1: if particle.beached == 0: (u1, v1) = fieldset.UV[time,", "lat1, lon1] lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) (u3, v3)", "fieldset, time): lon, lat = (particle.lon, particle.lat) if lat < 65.: particle.inBounds =", "(particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) (u2, v2) = fieldset.UV[time + .5 *", "def UnbeachAdvectionRK4(particle, fieldset, time): if particle.beached == 0: (u1, v1) = fieldset.UV[time, particle.depth,", "< 1e-14: if particle.beached == 2: particle.beached = 4 else: particle.beached = 1", "70.: particle.inBounds = 0 # Advection kernel. Checks first whether a particle is", "= 89.915 # Freeze particles that get out of bounds def freezeOutOfBoundsWedge(particle, fieldset,", "= (particle.lon, particle.lat) if lat < 60.: particle.inBounds = 0 def freezeOutOfBoundsArctic65(particle, fieldset,", "v4) / 6. * particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset, time): if", "85. or lat < 60.: particle.inBounds = 0 # Freeze particles that get", "u3*particle.dt, particle.lat + v3*particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3]", "initial=0.) unbeachCount = Variable('unbeachCount', dtype=np.int32, initial=0.) # inBounds : 1 yes, 0 no", "lon > 65. or lon < -45. or lat > 85. or lat", "+ particle.dt, particle.depth, lat3, lon3] particle.lon += (u1 + 2*u2 + 2*u3 +", "particle.dt particle.beached = 2 def BoundedAdvectionRK4(particle, fieldset, time): if particle.inBounds == 1: (u1,", "that get out of bounds def freezeOutOfBoundsArctic(particle, fieldset, time): lon, lat = (particle.lon,", "particles in Parcels\"\"\" from parcels import (JITParticle, Variable) import numpy as np class", "https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts def unBeaching(particle, fieldset, time): if particle.beached == 4: (ub, vb) = fieldset.UVunbeach[time,", "360. def northPolePushBack(particle, fieldset, time): if particle.lat > 89.915: particle.lat = 89.915 #", "particle.lat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt", "print(f\"Particle {particle.id} deleted: ({particle.lon}, {particle.lat} at {particle.time})\") particle.delete() # Beaching dynamics from https://github.com/OceanParcels/Parcelsv2.0PaperNorthSeaScripts", "northPolePushBack(particle, fieldset, time): if particle.lat > 89.915: particle.lat = 89.915 # Freeze particles", "or lat < 60.: particle.inBounds = 0 # Freeze particles that get out", "= fieldset.UV[time, particle.depth, particle.lat, particle.lon] lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat +" ]
[ "not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if", "is None: # Signature check is skipped only if the secret is missing", "config = prepare_app() session = None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token,", "as bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request signature", "@app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST': return process_webhook() return render_template('index.html',", "= github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature header has incorrect format.\") if hash_name", "issue action is ignored.\" issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue)", "prepare_app() session = None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user", "if data is None: return \"Webhook request missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"]", "path in conf_paths: with open(path, 'r') as file: config_content += file.read() + \"\\n\"", "the ghia-config and in the webhook config return True elif github_signed is None", "not in config or \"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not", "request missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue is", "config[\"github\"][\"token\"] return token, secret, config def prepare_app_test(conf): session = conf[\"session\"] config = conf[\"config\"]", "if conf and \"test\" in conf and conf[\"test\"]: token, secret, config, session =", "\"\\n\" config = configparser.ConfigParser() config.optionxform = str # maintain case sensitivity in keys", "\"\" for path in conf_paths: with open(path, 'r') as file: config_content += file.read()", "the environment.\") conf_paths = env_conf.split(\":\") config_content = \"\" for path in conf_paths: with", "import GhiaPatterns from .ghia_requests import GhiaRequests from .ghia_issue import Issue BAD_REQUEST = 400", "\"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]: secret =", ".ghia_issue import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\",", "return str(e), BAD_REQUEST if event_type == \"issues\": return process_issues() elif event_type == \"ping\":", "raise ValueError(\"Signature verification failed.\") try: hash_name, hash_value = github_signed.split('=', maxsplit=2) except ValueError: raise", "if \"secret\" not in config[\"github\"]: secret = None else: secret = config[\"github\"][\"secret\"] token", "or ghia-config has secret but webhook doesn't send signed request raise ValueError(\"Signature verification", "elif github_signed is None or secret is None: # GitHub request has signature", "verification failed.\") try: hash_name, hash_value = github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature header", "else: token, secret, config = prepare_app() session = None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append')", "[\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG')", "event_type = request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as e: return str(e), BAD_REQUEST if", "= prepare_app_test(conf) else: token, secret, config = prepare_app() session = None ghia_patterns =", "return \"Webhook request missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return \"Closed", "def prepare_app_test(conf): session = conf[\"session\"] config = conf[\"config\"] token = conf[\"TOKEN\"] secret =", "None: # Signature check is skipped only if the secret is missing in", "import GhiaRequests from .ghia_issue import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\", \"edited\",", "type ignored.\" @app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST': return process_webhook()", "\"Issue update done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as", "config_content += file.read() + \"\\n\" config = configparser.ConfigParser() config.optionxform = str # maintain", "hash_name != 'sha1': raise ValueError(\"GitHub signatures are expected to use SHA1.\") computed_hash =", "the secret as bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The", "issue is ignored.\" action = data[\"action\"] if action not in ALLOWED_ACTIONS: return \"This", "from flask import render_template from .ghia_patterns import GhiaPatterns from .ghia_requests import GhiaRequests from", "failed.\") try: hash_name, hash_value = github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature header has", "'r') as file: config_content += file.read() + \"\\n\" config = configparser.ConfigParser() config.optionxform =", "github_signed is None or secret is None: # GitHub request has signature but", "= prepare_app() session = None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session)", "secret, config, session = prepare_app_test(conf) else: token, secret, config = prepare_app() session =", "session = prepare_app_test(conf) else: token, secret, config = prepare_app() session = None ghia_patterns", "GhiaRequests from .ghia_issue import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\",", "secret # or ghia-config has secret but webhook doesn't send signed request raise", "in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]: secret = None else:", "raise ValueError(\"Signature header has incorrect format.\") if hash_name != 'sha1': raise ValueError(\"GitHub signatures", "= conf[\"SECRET\"] return token, secret, config, session def create_app(conf): app = Flask(__name__) if", "token, secret, config = prepare_app() session = None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req", "as e: return str(e), BAD_REQUEST if event_type == \"issues\": return process_issues() elif event_type", "try: github_verify_request() except RuntimeError as e: return str(e), BAD_REQUEST if event_type == \"issues\":", "process_issues() elif event_type == \"ping\": return \"Ping OK\" else: return \"Event type ignored.\"", "get the secret as bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() != hash_value: raise", "in ALLOWED_ACTIONS: return \"This issue action is ignored.\" issue = Issue(data[\"issue\"]) req.slug =", "== \"closed\": return \"Closed issue is ignored.\" action = data[\"action\"] if action not", "def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed is None and secret is None:", "if data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue is ignored.\" action = data[\"action\"] if", "\"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf is None: raise", "except ValueError: raise ValueError(\"Signature header has incorrect format.\") if hash_name != 'sha1': raise", "case sensitivity in keys config.read_string(config_content) if \"github\" not in config or \"token\" not", "= None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user = req.get_user()", "GitHub request has signature but ghia-config is missing the secret # or ghia-config", "= request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as e: return str(e), BAD_REQUEST if event_type", "the webhook config return True elif github_signed is None or secret is None:", "conf[\"SECRET\"] return token, secret, config, session def create_app(conf): app = Flask(__name__) if conf", "= Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue", "\"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf is", "import render_template from .ghia_patterns import GhiaPatterns from .ghia_requests import GhiaRequests from .ghia_issue import", "config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]: secret = None else: secret", "in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\"", "ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user = req.get_user() def github_verify_request():", "== \"issues\": return process_issues() elif event_type == \"ping\": return \"Ping OK\" else: return", "as file: config_content += file.read() + \"\\n\" config = configparser.ConfigParser() config.optionxform = str", "conf[\"test\"]: token, secret, config, session = prepare_app_test(conf) else: token, secret, config = prepare_app()", "return \"Ping OK\" else: return \"Event type ignored.\" @app.route('/', methods=['POST', 'GET']) def index():", "prepare_app_test(conf) else: token, secret, config = prepare_app() session = None ghia_patterns = GhiaPatterns(config)", "is None and secret is None: # Signature check is skipped only if", "# GitHub request has signature but ghia-config is missing the secret # or", "data = request.get_json(silent=True) if data is None: return \"Webhook request missing JSON data.\",", "create_app(conf): app = Flask(__name__) if conf and \"test\" in conf and conf[\"test\"]: token,", "ignored.\" issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue)", "sensitivity in keys config.read_string(config_content) if \"github\" not in config or \"token\" not in", "elif event_type == \"ping\": return \"Ping OK\" else: return \"Event type ignored.\" @app.route('/',", "import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\",", "ignored.\" @app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST': return process_webhook() return", "= request.headers.get('X-Hub-Signature') if github_signed is None and secret is None: # Signature check", "Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue update", "req = GhiaRequests(token, session=session) user = req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if", "str(e), BAD_REQUEST if event_type == \"issues\": return process_issues() elif event_type == \"ping\": return", "computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request signature is wrong.\") def process_issues(): data =", "= 400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def", "request.get_json(silent=True) if data is None: return \"Webhook request missing JSON data.\", BAD_REQUEST if", "in conf and conf[\"test\"]: token, secret, config, session = prepare_app_test(conf) else: token, secret,", "has secret but webhook doesn't send signed request raise ValueError(\"Signature verification failed.\") try:", "config[\"github\"]: secret = None else: secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token,", "is None: return \"Webhook request missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\":", "e: return str(e), BAD_REQUEST if event_type == \"issues\": return process_issues() elif event_type ==", "click import configparser import hmac import os from flask import Flask from flask", "conf and conf[\"test\"]: token, secret, config, session = prepare_app_test(conf) else: token, secret, config", "hash_value = github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature header has incorrect format.\") if", "= None else: secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token, secret, config", "prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is missing from", "Flask(__name__) if conf and \"test\" in conf and conf[\"test\"]: token, secret, config, session", "conf_paths: with open(path, 'r') as file: config_content += file.read() + \"\\n\" config =", "import Flask from flask import request from flask import render_template from .ghia_patterns import", "config, session def create_app(conf): app = Flask(__name__) if conf and \"test\" in conf", "req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue update done.\"", "session def create_app(conf): app = Flask(__name__) if conf and \"test\" in conf and", "github_verify_request() except RuntimeError as e: return str(e), BAD_REQUEST if event_type == \"issues\": return", "ValueError(\"Signature header has incorrect format.\") if hash_name != 'sha1': raise ValueError(\"GitHub signatures are", "\"test\" in conf and conf[\"test\"]: token, secret, config, session = prepare_app_test(conf) else: token,", "# maintain case sensitivity in keys config.read_string(config_content) if \"github\" not in config or", "Issue BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\",", "but ghia-config is missing the secret # or ghia-config has secret but webhook", "and \"test\" in conf and conf[\"test\"]: token, secret, config, session = prepare_app_test(conf) else:", "import configparser import hmac import os from flask import Flask from flask import", "ValueError: raise ValueError(\"Signature header has incorrect format.\") if hash_name != 'sha1': raise ValueError(\"GitHub", "# or ghia-config has secret but webhook doesn't send signed request raise ValueError(\"Signature", "\"github\" not in config or \"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\"", "check is skipped only if the secret is missing in the ghia-config and", "= req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed is None and secret", "\"secret\" not in config[\"github\"]: secret = None else: secret = config[\"github\"][\"secret\"] token =", "and conf[\"test\"]: token, secret, config, session = prepare_app_test(conf) else: token, secret, config =", "click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]:", "conf[\"config\"] token = conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token, secret, config, session def", "updated_issue: req.update_issue(updated_issue) return \"Issue update done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request()", "= \"\" for path in conf_paths: with open(path, 'r') as file: config_content +=", "user = req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed is None and", "conf_paths = env_conf.split(\":\") config_content = \"\" for path in conf_paths: with open(path, 'r')", "request.headers.get('X-Hub-Signature') if github_signed is None and secret is None: # Signature check is", "return \"This issue action is ignored.\" issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue", "github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature header has incorrect format.\") if hash_name !=", "is missing in the ghia-config and in the webhook config return True elif", "= configparser.ConfigParser() config.optionxform = str # maintain case sensitivity in keys config.read_string(config_content) if", "= conf[\"session\"] config = conf[\"config\"] token = conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token,", "is missing the secret # or ghia-config has secret but webhook doesn't send", "ALLOWED_ACTIONS: return \"This issue action is ignored.\" issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"]", "msg=request.get_data() ) if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request signature is wrong.\") def", "file.read() + \"\\n\" config = configparser.ConfigParser() config.optionxform = str # maintain case sensitivity", "= data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue update done.\" def", "None and secret is None: # Signature check is skipped only if the", "secret but webhook doesn't send signed request raise ValueError(\"Signature verification failed.\") try: hash_name,", "keys config.read_string(config_content) if \"github\" not in config or \"token\" not in config[\"github\"]: raise", "is missing from the environment.\") conf_paths = env_conf.split(\":\") config_content = \"\" for path", "JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue is ignored.\" action", "missing in the ghia-config and in the webhook config return True elif github_signed", "format.\") if hash_name != 'sha1': raise ValueError(\"GitHub signatures are expected to use SHA1.\")", "github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed is None and secret is None: #", "\"closed\": return \"Closed issue is ignored.\" action = data[\"action\"] if action not in", "send signed request raise ValueError(\"Signature verification failed.\") try: hash_name, hash_value = github_signed.split('=', maxsplit=2)", "== \"ping\": return \"Ping OK\" else: return \"Event type ignored.\" @app.route('/', methods=['POST', 'GET'])", "\"Event type ignored.\" @app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST': return", "os.getenv('GHIA_CONFIG') if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is missing from the environment.\") conf_paths", "\"Closed issue is ignored.\" action = data[\"action\"] if action not in ALLOWED_ACTIONS: return", "ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue update done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event')", "doesn't send signed request raise ValueError(\"Signature verification failed.\") try: hash_name, hash_value = github_signed.split('=',", "raise RuntimeError(\"The request signature is wrong.\") def process_issues(): data = request.get_json(silent=True) if data", "hmac.new( bytearray(secret, \"utf-8\"), # get the secret as bytes digestmod='sha1', msg=request.get_data() ) if", "secret is None: # Signature check is skipped only if the secret is", "= os.getenv('GHIA_CONFIG') if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is missing from the environment.\")", "GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user = req.get_user() def github_verify_request(): github_signed =", "flask import Flask from flask import request from flask import render_template from .ghia_patterns", "in keys config.read_string(config_content) if \"github\" not in config or \"token\" not in config[\"github\"]:", "= Flask(__name__) if conf and \"test\" in conf and conf[\"test\"]: token, secret, config,", "= data[\"action\"] if action not in ALLOWED_ACTIONS: return \"This issue action is ignored.\"", "if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is missing from the environment.\") conf_paths =", "data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue is ignored.\" action = data[\"action\"] if action", "action not in ALLOWED_ACTIONS: return \"This issue action is ignored.\" issue = Issue(data[\"issue\"])", ") if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request signature is wrong.\") def process_issues():", "data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue update done.\" def process_webhook():", "+= file.read() + \"\\n\" config = configparser.ConfigParser() config.optionxform = str # maintain case", "is None: # GitHub request has signature but ghia-config is missing the secret", "request from flask import render_template from .ghia_patterns import GhiaPatterns from .ghia_requests import GhiaRequests", "secret as bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request", "token = conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token, secret, config, session def create_app(conf):", "action is ignored.\" issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if", "the secret is missing in the ghia-config and in the webhook config return", "data[\"action\"] if action not in ALLOWED_ACTIONS: return \"This issue action is ignored.\" issue", "secret is missing in the ghia-config and in the webhook config return True", "data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue is ignored.\" action =", "= request.get_json(silent=True) if data is None: return \"Webhook request missing JSON data.\", BAD_REQUEST", "GhiaRequests(token, session=session) user = req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed is", "str # maintain case sensitivity in keys config.read_string(config_content) if \"github\" not in config", "return process_issues() elif event_type == \"ping\": return \"Ping OK\" else: return \"Event type", "from flask import request from flask import render_template from .ghia_patterns import GhiaPatterns from", "raise click.BadParameter(\"GHIA_CONFIG is missing from the environment.\") conf_paths = env_conf.split(\":\") config_content = \"\"", "= env_conf.split(\":\") config_content = \"\" for path in conf_paths: with open(path, 'r') as", "not in config[\"github\"]: secret = None else: secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"]", "None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user = req.get_user() def", "import os from flask import Flask from flask import request from flask import", "request raise ValueError(\"Signature verification failed.\") try: hash_name, hash_value = github_signed.split('=', maxsplit=2) except ValueError:", "done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as e: return", "github_signed = request.headers.get('X-Hub-Signature') if github_signed is None and secret is None: # Signature", "None: return \"Webhook request missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return", "def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is missing", "!= hash_value: raise RuntimeError(\"The request signature is wrong.\") def process_issues(): data = request.get_json(silent=True)", "hash_name, hash_value = github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature header has incorrect format.\")", "environment.\") conf_paths = env_conf.split(\":\") config_content = \"\" for path in conf_paths: with open(path,", "prepare_app_test(conf): session = conf[\"session\"] config = conf[\"config\"] token = conf[\"TOKEN\"] secret = conf[\"SECRET\"]", "= GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user = req.get_user() def github_verify_request(): github_signed", "Flask from flask import request from flask import render_template from .ghia_patterns import GhiaPatterns", "config, session = prepare_app_test(conf) else: token, secret, config = prepare_app() session = None", "OK\" else: return \"Event type ignored.\" @app.route('/', methods=['POST', 'GET']) def index(): if request.method", "request signature is wrong.\") def process_issues(): data = request.get_json(silent=True) if data is None:", "config return True elif github_signed is None or secret is None: # GitHub", "return token, secret, config, session def create_app(conf): app = Flask(__name__) if conf and", "event_type == \"issues\": return process_issues() elif event_type == \"ping\": return \"Ping OK\" else:", "secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token, secret, config def prepare_app_test(conf): session", "is ignored.\" issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue:", "secret is None: # GitHub request has signature but ghia-config is missing the", "data is None: return \"Webhook request missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] ==", "if updated_issue: req.update_issue(updated_issue) return \"Issue update done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event') try:", "has incorrect format.\") if hash_name != 'sha1': raise ValueError(\"GitHub signatures are expected to", "ghia-config and in the webhook config return True elif github_signed is None or", "try: hash_name, hash_value = github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature header has incorrect", "'sha1': raise ValueError(\"GitHub signatures are expected to use SHA1.\") computed_hash = hmac.new( bytearray(secret,", "from .ghia_patterns import GhiaPatterns from .ghia_requests import GhiaRequests from .ghia_issue import Issue BAD_REQUEST", "missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue is ignored.\"", ".ghia_patterns import GhiaPatterns from .ghia_requests import GhiaRequests from .ghia_issue import Issue BAD_REQUEST =", "the secret # or ghia-config has secret but webhook doesn't send signed request", "config = conf[\"config\"] token = conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token, secret, config,", "ghia-config is missing the secret # or ghia-config has secret but webhook doesn't", "process_issues(): data = request.get_json(silent=True) if data is None: return \"Webhook request missing JSON", "conf and \"test\" in conf and conf[\"test\"]: token, secret, config, session = prepare_app_test(conf)", "def index(): if request.method == 'POST': return process_webhook() return render_template('index.html', user=user, patterns=ghia_patterns) return", "None or secret is None: # GitHub request has signature but ghia-config is", "\"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf", "\"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR)", "or \"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config: raise", "if github_signed is None and secret is None: # Signature check is skipped", "is skipped only if the secret is missing in the ghia-config and in", "BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue is ignored.\" action = data[\"action\"]", "configparser import hmac import os from flask import Flask from flask import request", "bytearray(secret, \"utf-8\"), # get the secret as bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest()", "issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return", "config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not", "+ \"\\n\" config = configparser.ConfigParser() config.optionxform = str # maintain case sensitivity in", "updated_issue = ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue update done.\" def process_webhook(): event_type", "\"utf-8\"), # get the secret as bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() !=", "open(path, 'r') as file: config_content += file.read() + \"\\n\" config = configparser.ConfigParser() config.optionxform", "session = conf[\"session\"] config = conf[\"config\"] token = conf[\"TOKEN\"] secret = conf[\"SECRET\"] return", "None: # GitHub request has signature but ghia-config is missing the secret #", "is wrong.\") def process_issues(): data = request.get_json(silent=True) if data is None: return \"Webhook", "!= 'sha1': raise ValueError(\"GitHub signatures are expected to use SHA1.\") computed_hash = hmac.new(", "ghia-config has secret but webhook doesn't send signed request raise ValueError(\"Signature verification failed.\")", "and in the webhook config return True elif github_signed is None or secret", "if action not in ALLOWED_ACTIONS: return \"This issue action is ignored.\" issue =", "None else: secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token, secret, config def", "request has signature but ghia-config is missing the secret # or ghia-config has", "action = data[\"action\"] if action not in ALLOWED_ACTIONS: return \"This issue action is", "ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf", "ignored.\" action = data[\"action\"] if action not in ALLOWED_ACTIONS: return \"This issue action", "return \"Closed issue is ignored.\" action = data[\"action\"] if action not in ALLOWED_ACTIONS:", "'GET']) def index(): if request.method == 'POST': return process_webhook() return render_template('index.html', user=user, patterns=ghia_patterns)", "import hmac import os from flask import Flask from flask import request from", "config = configparser.ConfigParser() config.optionxform = str # maintain case sensitivity in keys config.read_string(config_content)", "secret, config, session def create_app(conf): app = Flask(__name__) if conf and \"test\" in", "None: raise click.BadParameter(\"GHIA_CONFIG is missing from the environment.\") conf_paths = env_conf.split(\":\") config_content =", "conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token, secret, config, session def create_app(conf): app =", "raise ValueError(\"GitHub signatures are expected to use SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"),", "def create_app(conf): app = Flask(__name__) if conf and \"test\" in conf and conf[\"test\"]:", "hmac import os from flask import Flask from flask import request from flask", "= hmac.new( bytearray(secret, \"utf-8\"), # get the secret as bytes digestmod='sha1', msg=request.get_data() )", "env_conf = os.getenv('GHIA_CONFIG') if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is missing from the", "is None: raise click.BadParameter(\"GHIA_CONFIG is missing from the environment.\") conf_paths = env_conf.split(\":\") config_content", "SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"), # get the secret as bytes digestmod='sha1',", "not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]: secret = None", "BAD_REQUEST if event_type == \"issues\": return process_issues() elif event_type == \"ping\": return \"Ping", "# Signature check is skipped only if the secret is missing in the", "github_signed is None and secret is None: # Signature check is skipped only", "in conf_paths: with open(path, 'r') as file: config_content += file.read() + \"\\n\" config", "or secret is None: # GitHub request has signature but ghia-config is missing", "maxsplit=2) except ValueError: raise ValueError(\"Signature header has incorrect format.\") if hash_name != 'sha1':", "os from flask import Flask from flask import request from flask import render_template", "maintain case sensitivity in keys config.read_string(config_content) if \"github\" not in config or \"token\"", "return \"Issue update done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError", "token, secret, config, session = prepare_app_test(conf) else: token, secret, config = prepare_app() session", "config.optionxform = str # maintain case sensitivity in keys config.read_string(config_content) if \"github\" not", "bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request signature is", "config_content = \"\" for path in conf_paths: with open(path, 'r') as file: config_content", "def process_issues(): data = request.get_json(silent=True) if data is None: return \"Webhook request missing", "if hash_name != 'sha1': raise ValueError(\"GitHub signatures are expected to use SHA1.\") computed_hash", "if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request signature is wrong.\") def process_issues(): data", "True elif github_signed is None or secret is None: # GitHub request has", "digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() != hash_value: raise RuntimeError(\"The request signature is wrong.\")", "else: return \"Event type ignored.\" @app.route('/', methods=['POST', 'GET']) def index(): if request.method ==", "secret, config def prepare_app_test(conf): session = conf[\"session\"] config = conf[\"config\"] token = conf[\"TOKEN\"]", "raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in", "import request from flask import render_template from .ghia_patterns import GhiaPatterns from .ghia_requests import", "ValueError(\"Signature verification failed.\") try: hash_name, hash_value = github_signed.split('=', maxsplit=2) except ValueError: raise ValueError(\"Signature", "return True elif github_signed is None or secret is None: # GitHub request", "render_template from .ghia_patterns import GhiaPatterns from .ghia_requests import GhiaRequests from .ghia_issue import Issue", "methods=['POST', 'GET']) def index(): if request.method == 'POST': return process_webhook() return render_template('index.html', user=user,", "\"ping\": return \"Ping OK\" else: return \"Event type ignored.\" @app.route('/', methods=['POST', 'GET']) def", "= GhiaRequests(token, session=session) user = req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed", "= [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf =", "from flask import Flask from flask import request from flask import render_template from", "webhook doesn't send signed request raise ValueError(\"Signature verification failed.\") try: hash_name, hash_value =", "signature but ghia-config is missing the secret # or ghia-config has secret but", "\"labeled\", \"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG", "secret = conf[\"SECRET\"] return token, secret, config, session def create_app(conf): app = Flask(__name__)", "\"Ping OK\" else: return \"Event type ignored.\" @app.route('/', methods=['POST', 'GET']) def index(): if", "token, secret, config def prepare_app_test(conf): session = conf[\"session\"] config = conf[\"config\"] token =", "in the webhook config return True elif github_signed is None or secret is", "return \"Event type ignored.\" @app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST':", "\"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is", "wrong.\") def process_issues(): data = request.get_json(silent=True) if data is None: return \"Webhook request", "token = config[\"github\"][\"token\"] return token, secret, config def prepare_app_test(conf): session = conf[\"session\"] config", "incorrect format.\") if hash_name != 'sha1': raise ValueError(\"GitHub signatures are expected to use", "app = Flask(__name__) if conf and \"test\" in conf and conf[\"test\"]: token, secret,", "only if the secret is missing in the ghia-config and in the webhook", "def process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as e: return str(e),", "click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]: secret = None else: secret = config[\"github\"][\"secret\"]", "index(): if request.method == 'POST': return process_webhook() return render_template('index.html', user=user, patterns=ghia_patterns) return app", "is ignored.\" action = data[\"action\"] if action not in ALLOWED_ACTIONS: return \"This issue", "if \"patterns\" not in config: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]: secret", "token, secret, config, session def create_app(conf): app = Flask(__name__) if conf and \"test\"", "from .ghia_requests import GhiaRequests from .ghia_issue import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS =", "for path in conf_paths: with open(path, 'r') as file: config_content += file.read() +", "\"This issue action is ignored.\" issue = Issue(data[\"issue\"]) req.slug = data[\"repository\"][\"full_name\"] updated_issue =", "missing the secret # or ghia-config has secret but webhook doesn't send signed", "use SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"), # get the secret as bytes", "= config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token, secret, config def prepare_app_test(conf): session =", "env_conf is None: raise click.BadParameter(\"GHIA_CONFIG is missing from the environment.\") conf_paths = env_conf.split(\":\")", "signed request raise ValueError(\"Signature verification failed.\") try: hash_name, hash_value = github_signed.split('=', maxsplit=2) except", "with open(path, 'r') as file: config_content += file.read() + \"\\n\" config = configparser.ConfigParser()", "expected to use SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"), # get the secret", "config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token, secret, config def prepare_app_test(conf): session = conf[\"session\"]", "session=session) user = req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed is None", "config.read_string(config_content) if \"github\" not in config or \"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR)", "ValueError(\"GitHub signatures are expected to use SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"), #", "signatures are expected to use SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"), # get", "click.BadParameter(\"GHIA_CONFIG is missing from the environment.\") conf_paths = env_conf.split(\":\") config_content = \"\" for", "to use SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"), # get the secret as", "= conf[\"config\"] token = conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token, secret, config, session", "missing from the environment.\") conf_paths = env_conf.split(\":\") config_content = \"\" for path in", "req.update_issue(updated_issue) return \"Issue update done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request() except", "update done.\" def process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as e:", "config def prepare_app_test(conf): session = conf[\"session\"] config = conf[\"config\"] token = conf[\"TOKEN\"] secret", "but webhook doesn't send signed request raise ValueError(\"Signature verification failed.\") try: hash_name, hash_value", "secret = None else: secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token, secret,", "conf[\"session\"] config = conf[\"config\"] token = conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token, secret,", "header has incorrect format.\") if hash_name != 'sha1': raise ValueError(\"GitHub signatures are expected", "env_conf.split(\":\") config_content = \"\" for path in conf_paths: with open(path, 'r') as file:", "Signature check is skipped only if the secret is missing in the ghia-config", "in config or \"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in", "RuntimeError(\"The request signature is wrong.\") def process_issues(): data = request.get_json(silent=True) if data is", "= str # maintain case sensitivity in keys config.read_string(config_content) if \"github\" not in", "hash_value: raise RuntimeError(\"The request signature is wrong.\") def process_issues(): data = request.get_json(silent=True) if", "req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature') if github_signed is None and secret is", "signature is wrong.\") def process_issues(): data = request.get_json(silent=True) if data is None: return", "return token, secret, config def prepare_app_test(conf): session = conf[\"session\"] config = conf[\"config\"] token", "except RuntimeError as e: return str(e), BAD_REQUEST if event_type == \"issues\": return process_issues()", "from the environment.\") conf_paths = env_conf.split(\":\") config_content = \"\" for path in conf_paths:", "secret, config = prepare_app() session = None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req =", "computed_hash = hmac.new( bytearray(secret, \"utf-8\"), # get the secret as bytes digestmod='sha1', msg=request.get_data()", "= config[\"github\"][\"token\"] return token, secret, config def prepare_app_test(conf): session = conf[\"session\"] config =", "in config[\"github\"]: secret = None else: secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return", "raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"secret\" not in config[\"github\"]: secret = None else: secret =", "import click import configparser import hmac import os from flask import Flask from", "\"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if", "RuntimeError as e: return str(e), BAD_REQUEST if event_type == \"issues\": return process_issues() elif", "session = None ghia_patterns = GhiaPatterns(config) ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user =", "webhook config return True elif github_signed is None or secret is None: #", "configparser.ConfigParser() config.optionxform = str # maintain case sensitivity in keys config.read_string(config_content) if \"github\"", "400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app():", "if event_type == \"issues\": return process_issues() elif event_type == \"ping\": return \"Ping OK\"", "file: config_content += file.read() + \"\\n\" config = configparser.ConfigParser() config.optionxform = str #", "event_type == \"ping\": return \"Ping OK\" else: return \"Event type ignored.\" @app.route('/', methods=['POST',", "in the ghia-config and in the webhook config return True elif github_signed is", "BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\", \"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"]", "skipped only if the secret is missing in the ghia-config and in the", "= conf[\"TOKEN\"] secret = conf[\"SECRET\"] return token, secret, config, session def create_app(conf): app", "ghia_patterns.set_strategy('append') req = GhiaRequests(token, session=session) user = req.get_user() def github_verify_request(): github_signed = request.headers.get('X-Hub-Signature')", "are expected to use SHA1.\") computed_hash = hmac.new( bytearray(secret, \"utf-8\"), # get the", "\"issues\": return process_issues() elif event_type == \"ping\": return \"Ping OK\" else: return \"Event", "# get the secret as bytes digestmod='sha1', msg=request.get_data() ) if computed_hash.hexdigest() != hash_value:", "= ghia_patterns.apply_to(issue) if updated_issue: req.update_issue(updated_issue) return \"Issue update done.\" def process_webhook(): event_type =", "else: secret = config[\"github\"][\"secret\"] token = config[\"github\"][\"token\"] return token, secret, config def prepare_app_test(conf):", "not in ALLOWED_ACTIONS: return \"This issue action is ignored.\" issue = Issue(data[\"issue\"]) req.slug", "if \"github\" not in config or \"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if", "and secret is None: # Signature check is skipped only if the secret", "config or \"token\" not in config[\"github\"]: raise click.BadParameter(GhiaPatterns.CONFIG_VALIDATION_ERR) if \"patterns\" not in config:", "flask import render_template from .ghia_patterns import GhiaPatterns from .ghia_requests import GhiaRequests from .ghia_issue", "request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as e: return str(e), BAD_REQUEST if event_type ==", "has signature but ghia-config is missing the secret # or ghia-config has secret", "\"Webhook request missing JSON data.\", BAD_REQUEST if data[\"issue\"][\"state\"] == \"closed\": return \"Closed issue", "if the secret is missing in the ghia-config and in the webhook config", "\"assigned\", \"unassigned\", \"labeled\", \"unlabeled\"] def prepare_app(): env_conf = os.getenv('GHIA_CONFIG') if env_conf is None:", ".ghia_requests import GhiaRequests from .ghia_issue import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\",", "flask import request from flask import render_template from .ghia_patterns import GhiaPatterns from .ghia_requests", "GhiaPatterns from .ghia_requests import GhiaRequests from .ghia_issue import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS", "from .ghia_issue import Issue BAD_REQUEST = 400 ALLOWED_ACTIONS = [\"opened\", \"edited\", \"transferred\", \"reopened\",", "is None or secret is None: # GitHub request has signature but ghia-config", "process_webhook(): event_type = request.headers.get('X-Github-Event') try: github_verify_request() except RuntimeError as e: return str(e), BAD_REQUEST" ]