index
int64
repo_name
string
branch_name
string
path
string
content
string
import_graph
string
73,863
OSOceanAcoustics/echopype
refs/heads/main
/echopype/utils/log.py
import logging import sys from typing import List, Optional LOG_FORMAT = "{asctime}:{name}:{levelname}: {message}" LOG_FORMATTER = logging.Formatter(LOG_FORMAT, style="{") STDOUT_NAME = "stdout_stream_handler" STDERR_NAME = "stderr_stream_handler" LOGFILE_HANDLE_NAME = "logfile_file_handler" class _ExcludeWarningsFilter(logging.Filter): def filter(self, record): # noqa """Only lets through log messages with log level below ERROR .""" return record.levelno < logging.WARNING def verbose(logfile: Optional[str] = None, override: bool = False) -> None: """Set the verbosity for echopype print outs. If called it will output logs to terminal by default. Parameters ---------- logfile : str, optional Optional string path to the desired log file. override: bool Boolean flag to override verbosity, which turns off verbosity if the value is `False`. Default is `False`. Returns ------- None """ if not isinstance(override, bool): raise ValueError("override argument must be a boolean!") package_name = __name__.split(".")[0] # Get the package name loggers = _get_all_loggers() verbose = True if override is False else False _set_verbose(verbose) for logger in loggers: if package_name in logger.name: handlers = [h.name for h in logger.handlers] if logfile is None: if LOGFILE_HANDLE_NAME in handlers: # Remove log file handler if it exists handler = next(filter(lambda h: h.name == LOGFILE_HANDLE_NAME, logger.handlers)) logger.removeHandler(handler) elif LOGFILE_HANDLE_NAME not in handlers: # Only add the logfile handler if it doesn't exist _set_logfile(logger, logfile) if isinstance(logfile, str): # Prevents multiple handler from propagating messages # this way there are no duplicate line in logfile logger.propagate = False else: logger.propagate = True def _get_all_loggers() -> List[logging.Logger]: """Get all loggers""" loggers = [logging.getLogger()] # get the root logger return loggers + [logging.getLogger(name) for name in logging.root.manager.loggerDict] def _init_logger(name) -> logging.Logger: """Initialize logger with the default stdout stream handler Parameters ---------- name : str Logger name Returns ------- logging.Logger """ # Logging setup logger = logging.getLogger(name) logger.setLevel(logging.INFO) # Setup stream handler STREAM_HANDLER = logging.StreamHandler(sys.stdout) STREAM_HANDLER.setLevel(logging.INFO) STREAM_HANDLER.set_name(STDOUT_NAME) STREAM_HANDLER.setFormatter(LOG_FORMATTER) STREAM_HANDLER.addFilter(_ExcludeWarningsFilter()) logger.addHandler(STREAM_HANDLER) # Setup err stream handler ERR_STREAM_HANDLER = logging.StreamHandler(sys.stderr) ERR_STREAM_HANDLER.setLevel(logging.WARNING) ERR_STREAM_HANDLER.set_name(STDERR_NAME) ERR_STREAM_HANDLER.setFormatter(LOG_FORMATTER) logger.addHandler(ERR_STREAM_HANDLER) return logger def _set_verbose(verbose: bool) -> None: if not verbose: logging.disable(logging.WARNING) else: logging.disable(logging.NOTSET) def _set_logfile(logger: logging.Logger, logfile: Optional[str] = None) -> logging.Logger: """Adds log file handler to logger""" if not logfile: raise ValueError("Please provide logfile path") file_handler = logging.FileHandler(logfile) file_handler.set_name(LOGFILE_HANDLE_NAME) file_handler.setFormatter(LOG_FORMATTER) logger.addHandler(file_handler)
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,864
OSOceanAcoustics/echopype
refs/heads/main
/echopype/tests/echodata/test_echodata_structure.py
from typing import Any, Dict, Optional from datatree import open_datatree import pytest from echopype.echodata.echodata import EchoData, XARRAY_ENGINE_MAP from echopype.echodata.api import open_converted @pytest.fixture def azfp_path(test_path): return test_path['AZFP'] @pytest.fixture def ek60_path(test_path): return test_path['EK60'] @pytest.fixture def ek80_path(test_path): return test_path['EK80'] def _tree_from_file(converted_raw_path: str, ed_storage_options: Optional[Dict[str, Any]] = {}, open_kwargs: Dict[str, Any] = {}): """ Checks that converted_raw_path exists, sanitizes the path, obtains the path's suffix, and lastly opens the file as a datatree. Parameters ---------- converted_raw_path : str path to converted data file ed_storage_options : dict options for cloud storage used by EchoData open_kwargs : dict optional keyword arguments to be passed into xr.open_dataset Returns ------- A Datatree object representing the converted data file. """ # the purpose of this class is so I can use # functions in EchoData as if they were static # TODO: There is a better way to do this if # we change functions in EchoData to static methods class temp_class(object): storage_options = ed_storage_options EchoData._check_path(temp_class, converted_raw_path) converted_raw_path = EchoData._sanitize_path(temp_class, converted_raw_path) suffix = EchoData._check_suffix(temp_class, converted_raw_path) tree = open_datatree( converted_raw_path, engine=XARRAY_ENGINE_MAP[suffix], **open_kwargs, ) return tree def _check_and_drop_var(ed, tree, grp_path, var): """ This function performs minimal checks of a variable contained both in an EchoData object and a Datatree. It ensures that the dimensions, attributes, and data types are the same. Once the checks have passed, it then drops these variables from both the EchoData object and the Datatree. Parameters ---------- ed : EchoData EchoData object that contains the variable to check and drop. tree : Datatree Datatree object that contains the variable to check and drop. grp_path : str The path to the group that the variable is in. var : str The variable to be checked and dropped. Notes ----- The Datatree object is created from an EchoData object written to a netcdf file. """ ed_var = ed[grp_path][var] tree_var = tree[grp_path].ds[var] # make sure that the dimensions and attributes # are the same for the variable assert ed_var.dims == tree_var.dims assert ed_var.attrs == tree_var.attrs # make sure that the data types are correct too assert isinstance(ed_var.values, type(tree_var.values)) # drop variables so we can check that datasets are identical ed[grp_path] = ed[grp_path].drop(var) tree[grp_path].ds = tree[grp_path].ds.drop(var) def _check_and_drop_attr(ed, tree, grp_path, attr, typ): """ This function performs minimal checks of an attribute contained both in an EchoData object and a Datatree group. This function only works for a group's attribute, it cannot work on variable attributes. It ensures that the attribute exists and that it has the expected data type. Once the checks have passed, it then drops the attribute from both the EchoData object and the Datatree. Parameters ---------- ed : EchoData EchoData object that contains the attribute to check and drop. tree : Datatree Datatree object that contains the attribute to check and drop. grp_path : str The path to the group that the attribute is in. attr : str The attribute to be checked and dropped. typ : type The expected data type of the attribute. Notes ----- The Datatree object is created from an EchoData object written to a netcdf file. """ # make sure that the attribute exists assert attr in ed[grp_path].attrs.keys() assert attr in tree[grp_path].ds.attrs.keys() # make sure that the value of the attribute is the right type assert isinstance(ed[grp_path].attrs[attr], typ) assert isinstance(tree[grp_path].ds.attrs[attr], typ) # drop the attribute so we can directly compare datasets del ed[grp_path].attrs[attr] del tree[grp_path].ds.attrs[attr] def compare_ed_against_tree(ed, tree): """ This function compares the Datasets of ed against tree and makes sure they are identical. Parameters ---------- ed : EchoData EchoData object tree : Datatree Datatree object Notes ----- The Datatree object is created from an EchoData object written to a netcdf file. """ for grp_path in ed.group_paths: if grp_path == "Top-level": assert tree.ds.identical(ed[grp_path]) else: assert tree[grp_path].ds.identical(ed[grp_path]) def _get_conversion_file_lists(azfp_path, ek60_path, ek80_path): converted_raw_paths_v06x = [ek60_path / "ek60-Summer2017-D20170615-T190214-ep-v06x.nc", ek60_path / "ek60-combined-ep-v06x.nc", ek80_path / "ek80-Summer2018--D20180905-T033113-ep-v06x.nc", ek80_path / "ek80-2018115-D20181213-T094600-ep-v06x.nc", ek80_path / "ek80-2019118-group2survey-D20191214-T081342-ep-v06x.nc", ek80_path / "ek80-Green2-Survey2-FM-short-slow-D20191004-T211557-ep-v06x.nc", azfp_path / "azfp-17082117_01A_17041823_XML-ep-v06x.nc"] converted_raw_paths_v05x = [ek60_path / "ek60-Summer2017-D20170615-T190214-ep-v05x.nc", ek60_path / "ek60-combined-ep-v05x.nc", ek80_path / "ek80-Summer2018--D20180905-T033113-ep-v05x.nc", ek80_path / "ek80-2018115-D20181213-T094600-ep-v05x.nc", ek80_path / "ek80-2019118-group2survey-D20191214-T081342-ep-v05x.nc", ek80_path / "ek80-Green2-Survey2-FM-short-slow-D20191004-T211557-ep-v05x.nc", azfp_path / "azfp-17082117_01A_17041823_XML-ep-v05x.nc"] return converted_raw_paths_v06x, converted_raw_paths_v05x def test_v05x_v06x_conversion_structure(azfp_path, ek60_path, ek80_path): """ Tests that version 0.5.x echopype files have been correctly converted to the 0.6.x structure. """ pytest.xfail("PR #881 has caused these tests to fail for EK80 sonar models. While we " "revise this test structure, these tests will be skipped. Please see issue " "https://github.com/OSOceanAcoustics/echopype/issues/884 for more information.") converted_raw_paths_v06x, converted_raw_paths_v05x = \ _get_conversion_file_lists(azfp_path, ek60_path, ek80_path) for path_v05x, path_v06x in zip(converted_raw_paths_v05x, converted_raw_paths_v06x): ed_v05x = open_converted(path_v05x) tree_v06x = _tree_from_file(converted_raw_path=path_v06x) # dictionary of attributes to drop (from the group only) where # the group path is the key and the value is a list of tuples # of the form (attr, type of attr expected) attrs_to_drop = { "Provenance": [("conversion_software_version", str), ("conversion_time", str)] } # check and drop attributes that cannot be directly compared # because their values are not the same for key, val in attrs_to_drop.items(): for var in val: _check_and_drop_attr(ed_v05x, tree_v06x, key, var[0], var[1]) _check_and_drop_var(ed_v05x, tree_v06x, "Provenance", "source_filenames") # The following if block is for the case where we have a combined file # TODO: look into this after v0.6.0 release if "echodata_filename" in ed_v05x["Provenance"]: prov_comb_names = ["echodata_filename", "top_attrs", "environment_attrs", "platform_attrs", "nmea_attrs", "provenance_attrs", "sonar_attrs", "beam_attrs", "vendor_attrs", "top_attr_key", "environment_attr_key", "platform_attr_key", "nmea_attr_key", "provenance_attr_key", "sonar_attr_key", "beam_attr_key", "vendor_attr_key"] for name in prov_comb_names: _check_and_drop_var(ed_v05x, tree_v06x, "Provenance", name) ed_v05x["Provenance"] = ed_v05x["Provenance"].drop("src_filenames") # ignore direct comparison of the variables Sonar.sonar_serial_number, # Platform.drop_keel_offset_is_manual, and Platform.water_level_draft_is_manual # for EK80, this data is not present in v0.5.x if ed_v05x["Top-level"].attrs["keywords"] == "EK80": # dictionary of variables to drop where the group path is the # key and the variables are the value vars_to_drop = {"Sonar": ["sonar_serial_number"], "Platform": ["drop_keel_offset_is_manual", "water_level_draft_is_manual"], "Environment": ["sound_velocity_profile", "sound_velocity_profile_depth", "sound_velocity_source", "transducer_name", "transducer_sound_speed"] } # check and drop variables that cannot be directly compared # because their values are not the same for key, val in vars_to_drop.items(): for var in val: _check_and_drop_var(ed_v05x, tree_v06x, key, var) # sort the beam groups for EK80 according to channel (necessary for comparison) ed_v05x['Sonar/Beam_group1'] = ed_v05x['Sonar/Beam_group1'].sortby("channel") if 'Sonar/Beam_group2' in ed_v05x.group_paths: ed_v05x['Sonar/Beam_group2'] = ed_v05x['Sonar/Beam_group2'].sortby("channel") # sort the Platform group by channel for EK80 (necessary for comparison) tree_v06x['Platform'].ds = tree_v06x['Platform'].ds.sortby('channel') ed_v05x['Platform'] = ed_v05x['Platform'].sortby('channel') # remove all attributes from Vendor_specific (data is missing sometimes) tree_v06x["Vendor_specific"].ds.attrs = {"blank": 'None'} ed_v05x["Vendor_specific"].attrs = {"blank": 'None'} compare_ed_against_tree(ed_v05x, tree_v06x) def test_echodata_structure(azfp_path, ek60_path, ek80_path): """ Makes sure that all raw files opened create the expected EchoData structure. """ # TODO: create this test once dev is in its final form. # check and remove conversion time from attributes # _check_and_drop_attr(ed_v05x, tree_v06x, "Provenance", "conversion_time", str) # compare_ed_against_tree(ed_v05x, tree_v06x) pytest.xfail("Full testing of the EchoData Structure has not been implemented yet.")
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,865
OSOceanAcoustics/echopype
refs/heads/main
/echopype/tests/calibrate/test_range_integration.py
import pytest import echopype as ep @pytest.mark.parametrize( ( "test_path_key", "sonar_model", "raw_file", "xml_file", "env_params", "cal_params", "waveform_mode", "encode_mode" ), [ # AZFP ("AZFP", "AZFP", "17082117.01A", "17041823.XML", {"salinity": 30, "pressure": 10}, {}, None, None), # EK60 ("EK60", "EK60", "DY1801_EK60-D20180211-T164025.raw", None, None, None, None, None), # EK80 BB complex ("EK80_CAL", "EK80", "2018115-D20181213-T094600.raw", None, None, None, "BB", "complex"), # EK80 CW complex ("EK80_CAL", "EK80", "2018115-D20181213-T094600.raw", None, None, None, "CW", "complex"), # EK80 CW power ("EK80", "EK80", "Summer2018--D20180905-T033113.raw", None, None, None, "CW", "power"), # TODO: EK80 reduced sampling rate ], ids=[ "azfp", "ek60", "ek80_bb_complex", "ek80_cw_complex", "ek80_cw_power", ] ) def test_range_dimensions( test_path, test_path_key, sonar_model, raw_file, xml_file, env_params, cal_params, waveform_mode, encode_mode, ): if xml_file is not None: ed = ep.open_raw( raw_file=test_path[test_path_key] / raw_file, sonar_model=sonar_model, xml_path=test_path[test_path_key] / xml_file, ) else: ed = ep.open_raw(raw_file=test_path[test_path_key] / raw_file, sonar_model=sonar_model) ds_Sv = ep.calibrate.compute_Sv( echodata=ed, env_params=env_params, cal_params=cal_params, waveform_mode=waveform_mode, encode_mode=encode_mode ) assert ds_Sv["echo_range"].dims == ("channel", "ping_time", "range_sample")
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,866
OSOceanAcoustics/echopype
refs/heads/main
/echopype/tests/calibrate/test_env_params_integration.py
import pytest import numpy as np import xarray as xr import echopype as ep @pytest.fixture def azfp_path(test_path): return test_path['AZFP'] @pytest.fixture def ek60_path(test_path): return test_path['EK60'] @pytest.fixture def ek80_cal_path(test_path): return test_path['EK80_CAL'] def test_env_params_intake_AZFP(azfp_path): """ Test env param intake for AZFP calibration. """ azfp_01a_path = str(azfp_path.joinpath('17082117.01A')) azfp_xml_path = str(azfp_path.joinpath('17041823.XML')) ed = ep.open_raw(azfp_01a_path, sonar_model='AZFP', xml_path=azfp_xml_path) # Assemble external env param env_ext = {"salinity": 30, "pressure": 100} # Manually go through env params intake env_params_manual = ep.calibrate.env_params.get_env_params_AZFP(echodata=ed, user_dict=env_ext) for p in env_params_manual.keys(): env_params_manual[p] = ep.calibrate.env_params.harmonize_env_param_time( env_params_manual[p], ping_time=ed["Sonar/Beam_group1"]["ping_time"] ) env_params_manual["sound_speed"].name = "sound_speed" env_params_manual["sound_absorption"].name = "sound_absorption" # Check against the final env params in the calibration output ds_Sv = ep.calibrate.compute_Sv(ed, env_params=env_ext) assert ds_Sv["formula_sound_speed"] == "AZFP" assert ds_Sv["formula_absorption"] == "AZFP" assert ds_Sv["sound_speed"].identical(env_params_manual["sound_speed"]) assert ds_Sv["sound_absorption"].identical(env_params_manual["sound_absorption"]) def test_env_params_intake_EK60_with_input(ek60_path): """ Test env param intake for EK60 calibration. """ ed = ep.open_raw(ek60_path / "ncei-wcsd" / "Summer2017-D20170620-T011027.raw", sonar_model="EK60") # Assemble external env param env_ext = {"temperature": 10, "salinity": 30, "pressure": 100, "pH": 8.1} # Manually go through env params intake env_params_manual = ep.calibrate.env_params.get_env_params_EK( sonar_type="EK60", beam=ed["Sonar/Beam_group1"], env=ed["Environment"], user_dict=env_ext ) for p in env_params_manual.keys(): env_params_manual[p] = ep.calibrate.env_params.harmonize_env_param_time( env_params_manual[p], ping_time=ed["Sonar/Beam_group1"]["ping_time"] ) # Check against the final env params in the calibration output ds_Sv = ep.calibrate.compute_Sv(ed, env_params=env_ext) assert ds_Sv["formula_sound_speed"] == "Mackenzie" assert ds_Sv["formula_absorption"] == "FG" assert ds_Sv["sound_speed"].values == env_params_manual["sound_speed"] assert np.all(ds_Sv["sound_absorption"].values == env_params_manual["sound_absorption"].values) def test_env_params_intake_EK60_no_input(ek60_path): """ Test default env param extraction for EK60 calibration. """ ed = ep.open_raw(ek60_path / "ncei-wcsd" / "Summer2017-D20170620-T011027.raw", sonar_model="EK60") ds_Sv = ep.calibrate.compute_Sv(ed) assert np.all(ds_Sv["sound_speed"].values == ed["Environment"]["sound_speed_indicative"].values) assert np.all(ds_Sv["sound_absorption"].values == ed["Environment"]["absorption_indicative"].values) def test_env_params_intake_EK80_no_input(ek80_cal_path): """ Test default env param extraction for EK80 calibration. """ ed = ep.open_raw(ek80_cal_path / "2018115-D20181213-T094600.raw", sonar_model="EK80") ds_Sv = ep.calibrate.compute_Sv(ed, waveform_mode="BB", encode_mode="complex") # Use sound speed stored in Environment group assert ds_Sv["sound_speed"].values == ed["Environment"]["sound_speed_indicative"].values # Manually compute absorption cal_obj = ep.calibrate.calibrate_ek.CalibrateEK80( echodata=ed, waveform_mode="BB", encode_mode="complex", cal_params=None, env_params=None ) absorption_ref = ep.utils.uwa.calc_absorption( frequency=cal_obj.freq_center, temperature=ed["Environment"]["temperature"], salinity=ed["Environment"]["salinity"], pressure=ed["Environment"]["depth"], pH=ed["Environment"]["acidity"], sound_speed=ed["Environment"]["sound_speed_indicative"], formula_source="FG", ) absorption_ref = ep.calibrate.env_params.harmonize_env_param_time( absorption_ref, ping_time=ed["Sonar/Beam_group1"]["ping_time"] ) assert np.all(cal_obj.env_params["sound_absorption"].values == absorption_ref.values) assert np.all(ds_Sv["sound_absorption"].values == absorption_ref.values) # TODO: Consolidate this and the one with EK60 with input def test_env_params_intake_EK80_with_input_scalar(ek80_cal_path): """ Test default env param extraction for EK80 calibration. """ ed = ep.open_raw(ek80_cal_path / "2018115-D20181213-T094600.raw", sonar_model="EK80") # Assemble external env param env_ext = {"temperature": 10, "salinity": 30, "pressure": 100, "pH": 8.1} ds_Sv = ep.calibrate.compute_Sv(ed, waveform_mode="CW", encode_mode="complex", env_params=env_ext) # Manually compute absorption cal_obj = ep.calibrate.calibrate_ek.CalibrateEK80( echodata=ed, waveform_mode="CW", encode_mode="complex", cal_params=None, env_params=env_ext ) sound_speed_ref = ep.utils.uwa.calc_sound_speed( temperature=env_ext["temperature"], salinity=env_ext["salinity"], pressure=env_ext["pressure"], formula_source="Mackenzie", ) sound_speed_ref = ep.calibrate.env_params.harmonize_env_param_time( sound_speed_ref, ping_time=ed["Sonar/Beam_group1"]["ping_time"] ) absorption_ref = ep.utils.uwa.calc_absorption( frequency=cal_obj.freq_center, temperature=env_ext["temperature"], salinity=env_ext["salinity"], pressure=env_ext["pressure"], pH=env_ext["pH"], sound_speed=sound_speed_ref, formula_source="FG", ) absorption_ref = ep.calibrate.env_params.harmonize_env_param_time( absorption_ref, ping_time=ed["Sonar/Beam_group1"]["ping_time"] ) assert np.all(cal_obj.env_params["sound_speed"] == sound_speed_ref) assert np.all(ds_Sv["sound_speed"] == sound_speed_ref) assert np.all(cal_obj.env_params["sound_absorption"].values == absorption_ref.values) assert np.all(ds_Sv["sound_absorption"].values == absorption_ref.values) def test_env_params_intake_EK80_with_input_da(ek80_cal_path): """ Test default env param extraction for EK80 calibration. """ ed = ep.open_raw(ek80_cal_path / "2018115-D20181213-T094600.raw", sonar_model="EK80") # Assemble external env param env_ext = { "temperature": 10, "salinity": 30, "sound_speed": 1498, "sound_absorption": xr.DataArray( np.arange(1, 5) * 0.01, coords={"channel": ['WBT 714581-15 ES18', 'WBT 714583-15 ES120-7C', 'WBT 714597-15 ES333-7C', 'WBT 714605-15 ES200-7C']}) } ds_Sv = ep.calibrate.compute_Sv(ed, waveform_mode="CW", encode_mode="complex", env_params=env_ext) assert env_ext["sound_speed"] == ds_Sv["sound_speed"].values assert np.all(env_ext["sound_absorption"].values == ds_Sv["sound_absorption"].values) for p_name in ["temperature", "salinity", "pressure", "pH"]: assert p_name not in ds_Sv
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,867
OSOceanAcoustics/echopype
refs/heads/main
/echopype/tests/convert/test_convert_source_target_locs.py
"""test_convert_source_target_locs.py This module contain all the various tests for echopype conversion from a raw data to standard compliant zarr or netcdf file(s). **Note that in order to run this test, minio server is required for s3 output tests.** """ import os import fsspec import xarray as xr import pytest from datatree import open_datatree from tempfile import TemporaryDirectory from echopype import open_raw from echopype.utils.coding import DEFAULT_ENCODINGS def _check_file_group(data_file, engine, groups): tree = open_datatree(data_file, engine=engine) for group in groups: ds = tree[f"/{group}"].ds assert isinstance(ds, xr.Dataset) is True def _check_output_files(engine, output_files, storage_options): groups = [ "Provenance", "Environment", "Sonar/Beam_group1", "Sonar", "Vendor_specific", "Platform", ] if isinstance(output_files, list): fs = fsspec.get_mapper(output_files[0], **storage_options).fs for f in output_files: if engine == "zarr": _check_file_group(fs.get_mapper(f), engine, groups) fs.delete(f, recursive=True) else: _check_file_group(f, engine, groups) fs.delete(f) else: fs = fsspec.get_mapper(output_files, **storage_options).fs if engine == "zarr": _check_file_group(fs.get_mapper(output_files), engine, groups) fs.delete(output_files, recursive=True) else: _check_file_group(output_files, engine, groups) fs.delete(output_files) def _create_path_str(test_folder, paths): return str(test_folder.joinpath(*paths).absolute()) @pytest.fixture( params=[ None, "/", "/tmp.zarr", "/tmp.nc", "s3://ooi-raw-data/dump/", "s3://ooi-raw-data/dump/tmp.zarr", "s3://ooi-raw-data/dump/tmp.nc", ], ids=[ "None", "folder_string", "zarr_file_string", "netcdf_file_string", "s3_folder_string", "s3_zarr_file_string", "s3_netcdf_file_string", ], ) def output_save_path(request): return request.param @pytest.fixture(params=["zarr", "netcdf4"]) def export_engine(request): return request.param @pytest.fixture( params=[ [("ncei-wcsd", "Summer2017-D20170615-T190214.raw"), "EK60"], [ "s3://data/ek60/ncei-wcsd/Summer2017-D20170615-T190214.raw", "EK60", ], [ [ "http://localhost:8080/data/ek60/ncei-wcsd/Summer2017-D20170615-T190214.raw", "http://localhost:8080/data/ek60/ncei-wcsd/Summer2017-D20170615-T190843.raw", ], "EK60", ], [("D20151202-T020259.raw",), "ES70"], ["s3://data/es70/D20151202-T020259.raw", "ES70"], [ [ "http://localhost:8080/data/es70/D20151202-T020259.raw", ], "ES70", ], [("WBT-D20210620-T012250.raw",), "ES80"], [("WBT-but-internally-marked-as-EK80-D20210710-T204029.raw",), "ES80"], ["s3://data/es80/WBT-D20210620-T012250.raw", "ES80"], [ [ "http://localhost:8080/data/es80/WBT-D20210620-T012250.raw", ], "ES80", ], [("ea640_test.raw",), "EA640"], ["s3://data/ea640/ea640_test.raw", "EA640"], [ [ "http://localhost:8080/data/ea640/ea640_test.raw", ], "EA640", ], [("echopype-test-D20211005-T001135.raw",), "EK80"], [ "http://localhost:8080/data/ek80_new/echopype-test-D20211005-T001135.raw", "EK80", ], ["s3://data/ek80_new/echopype-test-D20211005-T001135.raw", "EK80"], ], ids=[ "ek60_file_path_string", "ek60_s3_file_string", "ek60_multiple_http_file_string", "es70_file_path_string", "es70_s3_file_string", "es70_multiple_http_file_string", "es80_file_path_string_WBT", "es80_file_path_string_WBT_EK80", "es80_s3_file_string", "es80_multiple_http_file_string", "ea640_file_path_string", "ea640_s3_file_string", "ea640_multiple_http_file_string", "ek80_file_path_string", "ek80_http_file_string", "ek80_s3_file_string", ], ) def ek_input_params(request, test_path): path, model = request.param key = model if model == "EK80": key = f"{model}_NEW" if isinstance(path, tuple): path = _create_path_str(test_path[key], path) return [path, model] @pytest.fixture( params=[ ("ooi", "17032923.01A"), "http://localhost:8080/data/azfp/ooi/17032923.01A", ], ids=["file_path_string", "http_file_string"], ) def azfp_input_paths(request, test_path): if isinstance(request.param, tuple): return _create_path_str(test_path["AZFP"], request.param) return request.param @pytest.fixture( params=[ ("ooi", "17032922.XML"), "http://localhost:8080/data/azfp/ooi/17032922.XML", ], ids=["xml_file_path_string", "xml_http_file_string"], ) def azfp_xml_paths(request, test_path): if isinstance(request.param, tuple): return _create_path_str(test_path["AZFP"], request.param) return request.param @pytest.mark.parametrize( "sonar_model, raw_file, xml_path", [ ("azfp", ("ooi", "17032923.01A"), ("ooi", "17032922.XML")), ( "ek60", ("DY1801_EK60-D20180211-T164025.raw",), None, ), ( "es70", ("D20151202-T020259.raw",), None, ), ( "es80", ("WBT-D20210620-T012250.raw",), None, ), ( "ea640", ("ea640_test.raw",), None, ), ( "ek80", ("echopype-test-D20211004-T235757.raw",), None, ), ( "ad2cp", ("raw", "076", "rawtest.076.00000.ad2cp"), None, ), ], ids=["azfp", "ek60", "es70", "es80", "ea640", "ek80", "ad2cp"], ) def test_convert_time_encodings(sonar_model, raw_file, xml_path, test_path): path_model = sonar_model.upper() if path_model == "EK80": path_model = path_model + "_NEW" raw_file = str(test_path[path_model].joinpath(*raw_file).absolute()) if xml_path is not None: xml_path = str(test_path[path_model].joinpath(*xml_path).absolute()) ed = open_raw( sonar_model=sonar_model, raw_file=raw_file, xml_path=xml_path ) ed.to_netcdf(overwrite=True) for group, details in ed.group_map.items(): group_path = details['ep_group'] if group_path is None: group_path = 'Top-level' group_ds = ed[group_path] if isinstance(group_ds, xr.Dataset): for var, encoding in DEFAULT_ENCODINGS.items(): if var in group_ds: da = group_ds[var] assert da.encoding == encoding # Combine encoding and attributes since this # is what is shown when using decode_cf=False # without dtype attribute total_attrs = dict(**da.attrs, **da.encoding) total_attrs.pop('dtype') # Read converted file back in file_da = xr.open_dataset( ed.converted_raw_path, group=details['ep_group'], decode_cf=False, )[var] assert file_da.dtype == encoding['dtype'] # Read converted file back in decoded_da = xr.open_dataset( ed.converted_raw_path, group=details['ep_group'], )[var] assert da.equals(decoded_da) is True os.unlink(ed.converted_raw_path) def test_convert_ek( ek_input_params, export_engine, output_save_path, minio_bucket, ): common_storage_options = minio_bucket output_storage_options = {} input_paths, sonar_model = ek_input_params ipath = input_paths if isinstance(input_paths, list): ipath = input_paths[0] input_storage_options = ( common_storage_options if ipath.startswith("s3://") else {} ) if output_save_path and output_save_path.startswith("s3://"): output_storage_options = common_storage_options # Only using one file echodata = open_raw( raw_file=ipath, sonar_model=sonar_model, storage_options=input_storage_options, ) if ( export_engine == "netcdf4" and output_save_path is not None and output_save_path.startswith("s3://") ): return if export_engine == "netcdf4": to_file = getattr(echodata, "to_netcdf") elif export_engine == "zarr": to_file = getattr(echodata, "to_zarr") else: return try: if output_save_path is not None and (output_save_path.startswith('/') or output_save_path.startswith('\\')): with TemporaryDirectory() as tmpdir: output_save_path = tmpdir + output_save_path to_file( save_path=output_save_path, overwrite=True, output_storage_options=output_storage_options, ) _check_output_files( export_engine, echodata.converted_raw_path, output_storage_options, ) else: to_file( save_path=output_save_path, overwrite=True, output_storage_options=output_storage_options, ) _check_output_files( export_engine, echodata.converted_raw_path, output_storage_options, ) except Exception as e: if export_engine == 'netcdf4' and ( output_save_path is not None and output_save_path.startswith("s3://") ): assert isinstance(e, ValueError) is True assert str(e) == 'Only local netcdf4 is supported.' def test_convert_azfp( azfp_input_paths, azfp_xml_paths, export_engine, output_save_path, minio_bucket, model="AZFP", ): common_storage_options = minio_bucket output_storage_options = {} input_storage_options = ( common_storage_options if azfp_input_paths.startswith("s3://") else {} ) if output_save_path and output_save_path.startswith("s3://"): output_storage_options = common_storage_options echodata = open_raw( raw_file=azfp_input_paths, xml_path=azfp_xml_paths, sonar_model=model, storage_options=input_storage_options, ) assert echodata.xml_path == azfp_xml_paths if ( export_engine == "netcdf4" and output_save_path is not None and output_save_path.startswith("s3://") ): return if export_engine == "netcdf4": to_file = getattr(echodata, "to_netcdf") elif export_engine == "zarr": to_file = getattr(echodata, "to_zarr") else: return try: to_file( save_path=output_save_path, overwrite=True, output_storage_options=output_storage_options, ) _check_output_files( export_engine, echodata.converted_raw_path, output_storage_options ) except Exception as e: if export_engine == 'netcdf4' and ( output_save_path is not None and output_save_path.startswith("s3://") ): assert isinstance(e, ValueError) is True assert str(e) == 'Only local netcdf4 is supported.'
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,868
OSOceanAcoustics/echopype
refs/heads/main
/echopype/convert/utils/ek_date_conversion.py
""" Code originally developed for pyEcholab (https://github.com/CI-CMG/pyEcholab) by Rick Towler <rick.towler@noaa.gov> at NOAA AFSC. Contains functions to convert date information. TODO: merge necessary function into ek60.py or group everything into a class TODO: fix docstring """ import datetime from pytz import utc as pytz_utc # NT epoch is Jan 1st 1601 UTC_NT_EPOCH = datetime.datetime(1601, 1, 1, 0, 0, 0, tzinfo=pytz_utc) # Unix epoch is Jan 1st 1970 UTC_UNIX_EPOCH = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=pytz_utc) EPOCH_DELTA_SECONDS = (UTC_UNIX_EPOCH - UTC_NT_EPOCH).total_seconds() __all__ = ["nt_to_unix", "unix_to_nt"] def nt_to_unix(nt_timestamp_tuple, return_datetime=True): """ :param nt_timestamp_tuple: Tuple of two longs representing the NT date :type nt_timestamp_tuple: (long, long) :param return_datetime: Return a datetime object instead of float :type return_datetime: bool Returns a datetime.datetime object w/ UTC timezone calculated from the nt time tuple lowDateTime, highDateTime = nt_timestamp_tuple The timestamp is a 64bit count of 100ns intervals since the NT epoch broken into two 32bit longs, least significant first: >>> dt = nt_to_unix((19496896L, 30196149L)) >>> match_dt = datetime.datetime(2011, 12, 23, 20, 54, 3, 964000, pytz_utc) >>> assert abs(dt - match_dt) <= dt.resolution """ lowDateTime, highDateTime = nt_timestamp_tuple sec_past_nt_epoch = ((highDateTime << 32) + lowDateTime) * 1.0e-7 if return_datetime: return UTC_NT_EPOCH + datetime.timedelta(seconds=sec_past_nt_epoch) else: sec_past_unix_epoch = sec_past_nt_epoch - EPOCH_DELTA_SECONDS return sec_past_unix_epoch def unix_to_nt(unix_timestamp): """ Given a date, return the 2-element tuple used for timekeeping with SIMRAD echosounders #Simple conversion >>> dt = datetime.datetime(2011, 12, 23, 20, 54, 3, 964000, pytz_utc) >>> assert (19496896L, 30196149L) == unix_to_nt(dt) #Converting back and forth between the two standards: >>> orig_dt = datetime.datetime.now(tz=pytz_utc) >>> nt_tuple = unix_to_nt(orig_dt) #converting back may not yield the exact original date, #but will be within the datetime's precision >>> back_to_dt = nt_to_unix(nt_tuple) >>> d_mu_seconds = abs(orig_dt - back_to_dt).microseconds >>> mu_sec_resolution = orig_dt.resolution.microseconds >>> assert d_mu_seconds <= mu_sec_resolution """ if isinstance(unix_timestamp, datetime.datetime): if unix_timestamp.tzinfo is None: unix_datetime = pytz_utc.localize(unix_timestamp) elif unix_timestamp.tzinfo == pytz_utc: unix_datetime = unix_timestamp else: unix_datetime = pytz_utc.normalize(unix_timestamp.astimezone(pytz_utc)) else: unix_datetime = unix_to_datetime(unix_timestamp) sec_past_nt_epoch = (unix_datetime - UTC_NT_EPOCH).total_seconds() onehundred_ns_intervals = int(sec_past_nt_epoch * 1e7) lowDateTime = onehundred_ns_intervals & 0xFFFFFFFF highDateTime = onehundred_ns_intervals >> 32 return lowDateTime, highDateTime def unix_to_datetime(unix_timestamp): """ :param unix_timestamp: Number of seconds since unix epoch (1/1/1970) :type unix_timestamp: float :param tz: timezone to use for conversion (default None = UTC) :type tz: None or tzinfo object (see datetime docs) :returns: datetime object :raises: ValueError if unix_timestamp is not of type float or datetime Returns a datetime object from a unix timestamp. Simple wrapper for :func:`datetime.datetime.fromtimestamp` >>> from pytz import utc >>> from datetime import datetime >>> epoch = unix_to_datetime(0.0, tz=utc) >>> assert epoch == datetime(1970, 1, 1, tzinfo=utc) """ if isinstance(unix_timestamp, datetime.datetime): if unix_timestamp.tzinfo is None: unix_datetime = pytz_utc.localize(unix_timestamp) elif unix_timestamp.tzinfo == pytz_utc: unix_datetime = unix_timestamp else: unix_datetime = pytz_utc.normalize(unix_timestamp.astimezone(pytz_utc)) elif isinstance(unix_timestamp, float): unix_datetime = pytz_utc.localize(datetime.datetime.fromtimestamp(unix_timestamp)) else: errstr = "Looking for a timestamp of type datetime.datetime or # of sec past unix epoch.\n" errstr += "Supplied timestamp '%s' of type %s." % ( str(unix_timestamp), type(unix_timestamp), ) raise ValueError(errstr) return unix_datetime def datetime_to_unix(datetime_obj): """ :param datetime_obj: datetime object to convert :type datetime_obj: :class:`datetime.datetime` :param tz: Timezone to use for converted time -- if None, uses timezone information contained within datetime_obj :type tz: :class:datetime.tzinfo >>> from pytz import utc >>> from datetime import datetime >>> epoch = datetime(1970, 1, 1, tzinfo=utc) >>> assert datetime_to_unix(epoch) == 0 """ timestamp = (datetime_obj - UTC_UNIX_EPOCH).total_seconds() return timestamp
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,869
OSOceanAcoustics/echopype
refs/heads/main
/echopype/tests/utils/test_utils_uwa.py
import pytest import numpy as np from echopype.utils.uwa import calc_absorption, calc_sound_speed # Tolerance used here are set empirically # so the test captures a sort of current status @pytest.mark.parametrize( "frequency, temperature, salinity, pressure, pH, tolerance, tolerance_AZFP", [ (18e3, 27, 35, 10, 8, 2.11e-5, 2.3e-4), (18e3, 27, 35, 100, 8, 3e-5, 2.2e-4), (38e3, 27, 35, 10, 8, 1.8e-4, 8.5e-4), (38e3, 10, 35, 10, 8, 2.1e-4, 2.4e-3), (120e3, 27, 35, 10, 8, 3e-5, 7.4e-3), (200e3, 27, 35, 10, 8, 3.1e-3, 0.02), (455e3, 20, 35, 10, 8, 7.4e-3, 2.1e-2), (1e6, 10, 35, 10, 8, 2.49e-2, 1.4e-2), ], ) def test_absorption(frequency, temperature, salinity, pressure, pH, tolerance, tolerance_AZFP): abs_dB_m = dict() for fm in ["AM", "FG", "AZFP"]: abs_dB_m[fm] = calc_absorption( frequency=frequency, temperature=temperature, salinity=salinity, pressure=pressure, pH=pH, formula_source=fm, ) assert np.abs(abs_dB_m["AM"] - abs_dB_m["FG"]) < tolerance # AZFP values are an order of magnitude larger than the other 2 assert np.all( np.abs( [abs_dB_m["AM"] - abs_dB_m["AZFP"], abs_dB_m["FG"] - abs_dB_m["AZFP"]] ) < tolerance_AZFP ) # These tests are just to make sure the code runs # and the difference between formula is not too dramatic, # instead of trying to verify the actual numbers # as we don't know the source of the AZFP formula. @pytest.mark.parametrize( "temperature, salinity, pressure, tolerance", [ (27, 35, 10, 0.07), (27, 35, 100, 0.07), (5, 35, 3500, 0.5), ], ) def test_sound_speed(temperature, salinity, pressure, tolerance): c = dict() for fm in ["Mackenzie", "AZFP"]: c[fm] = calc_sound_speed( temperature=temperature, salinity=salinity, pressure=pressure, formula_source=fm ) assert np.abs(c["Mackenzie"] - c["AZFP"]) < tolerance
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,870
OSOceanAcoustics/echopype
refs/heads/main
/echopype/utils/io.py
""" echopype utilities for file handling """ import os import pathlib import platform import sys import uuid from pathlib import Path, WindowsPath from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union import fsspec import xarray as xr from fsspec import FSMap from fsspec.implementations.local import LocalFileSystem from ..utils.coding import set_storage_encodings from ..utils.log import _init_logger if TYPE_CHECKING: from ..core import PathHint SUPPORTED_ENGINES = { "netcdf4": { "ext": ".nc", }, "zarr": { "ext": ".zarr", }, } logger = _init_logger(__name__) ECHOPYPE_DIR = Path(os.path.expanduser("~")) / ".echopype" def init_ep_dir(): """Initialize hidden directory for echopype""" if not ECHOPYPE_DIR.exists(): ECHOPYPE_DIR.mkdir(exist_ok=True) def get_files_from_dir(folder): """Retrieves all Netcdf and Zarr files from a given folder""" valid_ext = [".nc", ".zarr"] return [f for f in os.listdir(folder) if os.path.splitext(f)[1] in valid_ext] def save_file(ds, path, mode, engine, group=None, compression_settings=None, **kwargs): """ Saves a dataset to netcdf or zarr depending on the engine If ``compression_settings`` are set, compress all variables with those settings """ # set zarr or netcdf specific encodings for each variable in ds encoding = set_storage_encodings(ds, compression_settings, engine) # Allows saving both NetCDF and Zarr files from an xarray dataset if engine == "netcdf4": ds.to_netcdf(path=path, mode=mode, group=group, encoding=encoding, **kwargs) elif engine == "zarr": # Ensure that encoding and chunks match for var, enc in encoding.items(): ds[var] = ds[var].chunk(enc.get("chunks", {})) ds.to_zarr(store=path, mode=mode, group=group, encoding=encoding, **kwargs) else: raise ValueError(f"{engine} is not a supported save format") def get_file_format(file): """Gets the file format (either Netcdf4 or Zarr) from the file extension""" if isinstance(file, list): file = file[0] elif isinstance(file, FSMap): file = file.root if isinstance(file, str) and file.endswith(".nc"): return "netcdf4" elif isinstance(file, str) and file.endswith(".zarr"): return "zarr" elif isinstance(file, pathlib.Path) and file.suffix == ".nc": return "netcdf4" elif isinstance(file, pathlib.Path) and file.suffix == ".zarr": return "zarr" else: raise ValueError(f"Unsupported file format: {os.path.splitext(file)[1]}") def _get_suffix(filepath: Union[str, Path, FSMap]) -> str: """Check if file type is supported.""" # TODO: handle multiple files through the same set of checks for combining files if isinstance(filepath, FSMap): suffix = Path(filepath.root).suffix else: suffix = Path(str(filepath)).suffix if suffix not in [".nc", ".zarr"]: raise ValueError("Input file type not supported!") return suffix def sanitize_file_path( file_path: "PathHint", storage_options: Dict[str, str] = {}, is_dir: bool = False, ) -> Union[Path, FSMap]: """ Cleans and checks the user output file path type to a standardized Path or FSMap type. Parameters ---------- file_path : str | Path | FSMap The source file path engine : str {'netcdf4', 'zarr'} The engine to be used for file output storage_options : dict Storage options for file path is_dir : bool Flag for the function to know if file_path is a directory or not. If not, suffix will be determined. """ if not is_dir: suffix = _get_suffix(file_path) else: suffix = "" if isinstance(file_path, Path): # Check for extension if ":/" in str(file_path): raise ValueError(f"{file_path} is not a valid posix path.") if suffix == ".zarr": return fsspec.get_mapper(str(file_path)) return file_path elif isinstance(file_path, str): if "://" in file_path: if suffix == ".nc": raise ValueError("Only local netcdf4 is supported.") return fsspec.get_mapper(file_path, **storage_options) elif suffix == ".zarr": return fsspec.get_mapper(file_path) else: return Path(file_path) elif isinstance(file_path, fsspec.FSMap): root = file_path.root if suffix == ".nc": if not isinstance(file_path.fs, LocalFileSystem): # For special case of netcdf. # netcdf4 engine can only read Path or string raise ValueError("Only local netcdf4 is supported.") return Path(root) return file_path else: raise ValueError( f"{type(file_path)} is not supported. Please pass posix path, path string, or FSMap." # noqa ) def validate_output_path( source_file: str, engine: str, output_storage_options: Dict = {}, save_path: Optional[Union[Path, str]] = None, ) -> str: """ Assembles output file names and path. The final resulting file will be saved as provided in save path. If a directory path is provided then the final file name will use the same name as the source file and saved within the directory path in `save_path` or echopype's `temp_output` directory. Example 1. source_file - test.raw engine - zarr save_path - /path/dir/ output is /path/dir/test.zarr Example 2. source_file - test.raw engine - zarr save_path - None output is ~/.echopype/temp_output/test.zarr Example 3. source_file - test.raw engine - zarr save_path - /path/dir/myzarr.zarr output is /path/dir/myzarr.zarr Parameters ---------- source_file : str The source file path engine : str {'netcdf4', 'zarr'} The engine to be used for file output output_storage_options : dict Storage options for remote output path save_path : str | Path | None Either a directory or a file path. If it's not provided, we will save output file(s) in the echopype's `temp_output` directory. Returns ------- str The final string path of the resulting file. Raises ------ ValueError If engine is not one of the supported output engine of zarr or netcdf TypeError If `save_path` is not of type Path or str """ if engine not in SUPPORTED_ENGINES: ValueError(f"Engine {engine} is not supported for file export.") file_ext = SUPPORTED_ENGINES[engine]["ext"] if save_path is None: logger.warning("A directory or file path is not provided!") out_dir = ECHOPYPE_DIR / "temp_output" if not out_dir.exists(): out_dir.mkdir(parents=True) logger.warning(f"Resulting converted file(s) will be available at {str(out_dir)}") out_path = str(out_dir / (Path(source_file).stem + file_ext)) elif not isinstance(save_path, Path) and not isinstance(save_path, str): raise TypeError("save_path must be a string or Path") else: # convert save_path into a nicely formatted Windows path if we are on # a Windows machine and the path is not a cloud storage path. Then convert back to a string. if platform.system() == "Windows": if isinstance(save_path, str) and ("://" not in save_path): save_path = str(WindowsPath(save_path).absolute()) if isinstance(save_path, str): # Clean folder path by stripping '/' at the end if save_path.endswith("/") or save_path.endswith("\\"): save_path = save_path[:-1] # Determine whether this is a directory or not is_dir = True if Path(save_path).suffix == "" else False else: is_dir = True if save_path.suffix == "" else False # Cleans path sanitized_path = sanitize_file_path( save_path, storage_options=output_storage_options, is_dir=is_dir ) # Check file permissions if is_dir: check_file_permissions(sanitized_path) out_path = os.path.join(save_path, Path(source_file).stem + file_ext) else: if isinstance(sanitized_path, Path): check_file_permissions(sanitized_path.parent) final_path = sanitized_path out_path = str(final_path.parent.joinpath(final_path.stem + file_ext).absolute()) else: path_dir = fsspec.get_mapper(os.path.dirname(save_path), **output_storage_options) check_file_permissions(path_dir) final_path = Path(save_path) out_path = save_path if final_path.suffix != file_ext: logger.warning( "Mismatch between specified engine and save_path found; forcing output format to engine." # noqa ) return out_path def check_file_existence(file_path: "PathHint", storage_options: Dict[str, str] = {}) -> bool: """ Checks if file exists in the specified path Parameters ---------- file_path : str or pathlib.Path or fsspec.FSMap path to file storage_options : dict options for cloud storage """ if isinstance(file_path, Path): # Check for extension if ":/" in str(file_path): raise ValueError(f"{file_path} is not a valid posix path.") if file_path.exists(): return True else: return False elif isinstance(file_path, str) or isinstance(file_path, FSMap): if isinstance(file_path, FSMap): fsmap = file_path else: fsmap = fsspec.get_mapper(file_path, **storage_options) if not fsmap.fs.exists(fsmap.root): return False else: return True else: raise ValueError( f"{type(file_path)} is not supported. Please pass posix path, path string, or FSMap." # noqa ) def check_file_permissions(FILE_DIR): try: fname = "." + str(uuid.uuid4()) if isinstance(FILE_DIR, FSMap): base_dir = os.path.dirname(FILE_DIR.root) if not base_dir: base_dir = FILE_DIR.root TEST_FILE = os.path.join(base_dir, fname).replace("\\", "/") with FILE_DIR.fs.open(TEST_FILE, "w") as f: f.write("testing\n") FILE_DIR.fs.delete(TEST_FILE) elif isinstance(FILE_DIR, (Path, str)): if isinstance(FILE_DIR, str): FILE_DIR = Path(FILE_DIR) if not FILE_DIR.exists(): logger.warning(f"{str(FILE_DIR)} does not exist. Attempting to create it.") FILE_DIR.mkdir(exist_ok=True, parents=True) TEST_FILE = FILE_DIR.joinpath(Path(fname)) TEST_FILE.write_text("testing\n") # Do python version check since missing_ok is for python 3.9 and up if sys.version_info >= (3, 9): TEST_FILE.unlink(missing_ok=True) else: TEST_FILE.unlink() except Exception: raise PermissionError("Writing to specified path is not permitted.") def env_indep_joinpath(*args: Tuple[str, ...]) -> str: """ Joins a variable number of paths taking into account the form of cloud storage paths. Parameters ---------- *args: tuple of str A variable number of strings that should be joined in the order they are provided Returns ------- joined_path: str Full path constructed by joining all input strings """ if "://" in args[0]: # join paths for cloud storage path joined_path = r"/".join(args) else: # join paths for non-cloud storage path joined_path = os.path.join(*args) return joined_path def validate_source_ds_da( source_ds_da: Union[xr.Dataset, xr.DataArray, str, Path], storage_options: Optional[dict] ) -> Tuple[Union[xr.Dataset, str, xr.DataArray], Optional[str]]: """ This function ensures that ``source_ds_da`` is of the correct type and validates the path of ``source_ds_da``, if it is provided. Parameters ---------- source_ds_da: xr.Dataset, xr.DataArray, str or pathlib.Path A source that points to a Dataset or DataArray. If the input is a path, it specifies the path to a zarr or netcdf file. storage_options: dict, optional Any additional parameters for the storage backend, corresponding to the path provided for ``source_ds_da`` Returns ------- source_ds_da: xr.Dataset or xr.DataArray or str A Dataset or DataArray which will be the same as the input ``source_ds_da`` or a validated path to a zarr or netcdf file file_type: {"netcdf4", "zarr"}, optional The file type of the input path if ``source_ds_da`` is a path, otherwise ``None`` """ # initialize file_type file_type = None # make sure that storage_options is of the appropriate type if not isinstance(storage_options, dict): raise TypeError("storage_options must be a dict!") # check that source_ds_da is of the correct type, if it is a path validate # the path and open the Dataset or DataArray using xarray if not isinstance(source_ds_da, (xr.Dataset, xr.DataArray, str, Path)): raise TypeError("source_ds_da must be a Dataset or DataArray or str or pathlib.Path!") elif isinstance(source_ds_da, (str, Path)): # determine if we obtained a zarr or netcdf file file_type = get_file_format(source_ds_da) # validate source_ds_da if it is a path source_ds_da = validate_output_path( source_file="blank", # will be unused since source_ds cannot be none engine=file_type, output_storage_options=storage_options, save_path=source_ds_da, ) # check that the path exists check_file_existence(file_path=source_ds_da, storage_options=storage_options) return source_ds_da, file_type
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,871
OSOceanAcoustics/echopype
refs/heads/main
/echopype/tests/calibrate/test_ek80_complex.py
import pytest import numpy as np import xarray as xr from echopype.calibrate.ek80_complex import get_vend_filter_EK80 @pytest.fixture def ek80_path(test_path): return test_path['EK80'] def gen_mock_vend(ch_num, filter_len=10, has_nan=False): vend = xr.Dataset( data_vars={ "WBT_filter_r": (["channel", "WBT_filter_n"], np.random.rand(ch_num, filter_len)), "WBT_filter_i": (["channel", "WBT_filter_n"], np.random.rand(ch_num, filter_len)), "WBT_decimation": 6, "PC_filter_r": (["channel", "PC_filter_n"], np.random.rand(ch_num, filter_len*2)), "PC_filter_i": (["channel", "PC_filter_n"], np.random.rand(ch_num, filter_len*2)), "PC_decimation": 1, }, coords={ "channel": [f"ch_{ch}" for ch in np.arange(ch_num)], "WBT_filter_n": np.arange(filter_len), "PC_filter_n": np.arange(filter_len*2), } ) if has_nan: # replace some parts of filter coeff with NaN if filter_len != 1: vend["WBT_filter_r"].data[:, int(filter_len/2):] = np.nan vend["WBT_filter_i"].data[:, int(filter_len/2):] = np.nan vend["PC_filter_r"].data[:, filter_len:] = np.nan vend["PC_filter_i"].data[:, filter_len:] = np.nan else: raise ValueError("Cannot replace some parts of filter coeff with NaN") return vend @pytest.mark.parametrize( ("ch_num", "filter_len", "has_nan"), [ # filter coeff are of the same length for all channels (2, 10, False), # filter coeff are of different lengths across channels, so some parts are NaN-padded (2, 10, True), # filter coeff is of length=1 (2, 1, False), ], ids=[ "filter_coeff_filled", "filter_coeff_has_nan", "filter_coeff_len_1", ] ) def test_get_vend_filter_EK80(ch_num, filter_len, has_nan): vend = gen_mock_vend(ch_num, filter_len, has_nan) for ch in [f"ch_{ch}" for ch in np.arange(ch_num)]: for filter_name in ["WBT", "PC"]: var_imag = f"{filter_name}_filter_i" var_real = f"{filter_name}_filter_r" var_df = f"{filter_name}_decimation" sel_vend = vend.sel(channel=ch) assert np.all( (sel_vend[var_real] + 1j * sel_vend[var_imag]).dropna(dim=f"{filter_name}_filter_n").values == get_vend_filter_EK80(vend, channel_id=ch, filter_name=filter_name, param_type="coeff") ) assert sel_vend[var_df].values == get_vend_filter_EK80( vend, channel_id=ch, filter_name=filter_name, param_type="decimation" )
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,872
OSOceanAcoustics/echopype
refs/heads/main
/echopype/utils/uwa.py
""" Utilities for calculating seawater acoustic properties. """ import numpy as np def calc_sound_speed(temperature=27, salinity=35, pressure=10, formula_source="Mackenzie"): """ Calculate sound speed in [m/s]. Parameters ---------- temperature: num temperature [deg C] salinity: num salinity [PSU, part per thousand] pressure: num pressure [dbars] formula_source: str, {"Mackenzie", "AZFP"} Source of formula used to calculate sound speed. "Mackenzie" (default) uses the formula from Mackenzie 1981 (see Notes below) as implemented in ``arlpy`` package. "AZFP" uses the formula supplied in the AZFP Matlab code. Returns ------- Sound speed [m/s] for each input temperature value. Notes ----- Mackenzie KV (1981) Nine‐term equation for sound speed in the oceans. The Journal of the Acoustical Society of America, 70(3), 807–812. https://doi.org/10.1121/1.386920 The ranges of validity encompass the following: temperature −2 to 30 °C, salinity 30 to 40 ppt, and depth 0 to 8000 m. """ if formula_source == "Mackenzie": ss = ( 1448.96 + 4.591 * temperature - 5.304e-2 * temperature**2 + 2.374e-4 * temperature**3 ) ss += 1.340 * (salinity - 35) + 1.630e-2 * pressure + 1.675e-7 * pressure**2 ss += -1.025e-2 * temperature * (salinity - 35) - 7.139e-13 * temperature * pressure**3 elif formula_source == "AZFP": z = temperature / 10 ss = ( 1449.05 + z * (45.7 + z * (-5.21 + 0.23 * z)) + (1.333 + z * (-0.126 + z * 0.009)) * (salinity - 35.0) + (pressure / 1000) * (16.3 + 0.18 * (pressure / 1000)) ) else: ValueError("Unknown formula source") return ss def calc_absorption( frequency, temperature=27, salinity=35, pressure=10, pH=8.1, sound_speed=None, formula_source="AM", ): """ Calculate sea water absorption in units [dB/m]. Parameters ---------- frequency: int or numpy array frequency [Hz] temperature: num temperature [deg C] salinity: num salinity [PSU, part per thousand] pressure: num pressure [dbars] pH: num pH of water formula_source: str, {"AM", "FG", "AZFP"} Source of formula used to calculate sound speed. "AM" (default) uses the formula from Ainslie and McColm (1998). "FG" uses the formula from Francois and Garrison (1982). "AZFP" uses the the formula supplied in the AZFP Matlab code. See Notes below for the references. Returns ------- Sea water absorption [dB/m]. Notes ----- Ainslie MA, McColm JG. (1998). A simplified formula for viscous and chemical absorption in sea water. The Journal of the Acoustical Society of America, 103(3), 1671–1672. https://doi.org/10.1121/1.421258 Francois RE, Garrison GR. (1982). Sound absorption based on ocean measurements. Part II: Boric acid contribution and equation for total absorption. The Journal of the Acoustical Society of America, 72(6), 1879–1890. https://doi.org/10.1121/1.388673 The accuracy of the simplified formula from Ainslie & McColm 1998 compared with the original complicated formula from Francois & Garrison 1982 was demonstrated between 100 Hz and 1 MHz. """ if formula_source == "FG": f = frequency / 1000.0 # convert from Hz to kHz due to formula if sound_speed is None: c = 1412.0 + 3.21 * temperature + 1.19 * salinity + 0.0167 * pressure else: c = sound_speed A1 = 8.86 / c * 10 ** (0.78 * pH - 5) P1 = 1.0 f1 = 2.8 * np.sqrt(salinity / 35) * 10 ** (4 - 1245 / (temperature + 273)) A2 = 21.44 * salinity / c * (1 + 0.025 * temperature) P2 = 1.0 - 1.37e-4 * pressure + 6.2e-9 * pressure**2 f2 = 8.17 * 10 ** (8 - 1990 / (temperature + 273)) / (1 + 0.0018 * (salinity - 35)) P3 = 1.0 - 3.83e-5 * pressure + 4.9e-10 * pressure**2 if np.all(temperature < 20): A3 = ( 4.937e-4 - 2.59e-5 * temperature + 9.11e-7 * temperature**2 - 1.5e-8 * temperature**3 ) else: A3 = ( 3.964e-4 - 1.146e-5 * temperature + 1.45e-7 * temperature**2 - 6.5e-10 * temperature**3 ) a = ( A1 * P1 * f1 * f**2 / (f**2 + f1**2) + A2 * P2 * f2 * f**2 / (f**2 + f2**2) + A3 * P3 * f**2 ) sea_abs = a / 1000 # formula output is in unit [dB/km] elif formula_source == "AM": freq = frequency / 1000 D = pressure / 1000 f1 = 0.78 * np.sqrt(salinity / 35) * np.exp(temperature / 26) f2 = 42 * np.exp(temperature / 17) a1 = 0.106 * (f1 * (freq**2)) / ((f1**2) + (freq**2)) * np.exp((pH - 8) / 0.56) a2 = ( 0.52 * (1 + temperature / 43) * (salinity / 35) * (f2 * (freq**2)) / ((f2**2) + (freq**2)) * np.exp(-D / 6) ) a3 = 0.00049 * freq**2 * np.exp(-(temperature / 27 + D)) sea_abs = (a1 + a2 + a3) / 1000 # convert to db/m from db/km elif formula_source == "AZFP": temp_k = temperature + 273.0 f1 = 1320.0 * temp_k * np.exp(-1700 / temp_k) f2 = 1.55e7 * temp_k * np.exp(-3052 / temp_k) # Coefficients for absorption calculations k = 1 + pressure / 10.0 a = 8.95e-8 * (1 + temperature * (2.29e-2 - 5.08e-4 * temperature)) b = ( (salinity / 35.0) * 4.88e-7 * (1 + 0.0134 * temperature) * (1 - 0.00103 * k + 3.7e-7 * k**2) ) c = ( 4.86e-13 * (1 + temperature * (-0.042 + temperature * (8.53e-4 - temperature * 6.23e-6))) * (1 + k * (-3.84e-4 + k * 7.57e-8)) ) if salinity == 0: sea_abs = c * frequency**2 else: sea_abs = ( (a * f1 * frequency**2) / (f1**2 + frequency**2) + (b * f2 * frequency**2) / (f2**2 + frequency**2) + c * frequency**2 ) else: ValueError("Unknown formula source") return sea_abs
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,873
OSOceanAcoustics/echopype
refs/heads/main
/echopype/convert/set_groups_ek60.py
from collections import defaultdict from typing import List import numpy as np import xarray as xr from ..utils.coding import set_time_encodings from ..utils.log import _init_logger # fmt: off from .set_groups_base import SetGroupsBase # fmt: on logger = _init_logger(__name__) class SetGroupsEK60(SetGroupsBase): """Class for saving groups to netcdf or zarr from EK60 data files.""" # The sets beam_only_names, ping_time_only_names, and # beam_ping_time_names are used in set_groups_base and # in converting from v0.5.x to v0.6.0. The values within # these sets are applied to all Sonar/Beam_groupX groups. # 2023-07-24: # PRs: # - https://github.com/OSOceanAcoustics/echopype/pull/1056 # - https://github.com/OSOceanAcoustics/echopype/pull/1083 # The artificially added beam and ping_time dimensions at v0.6.0 # were reverted at v0.8.0, due to concerns with efficiency and code clarity # (see https://github.com/OSOceanAcoustics/echopype/issues/684 and # https://github.com/OSOceanAcoustics/echopype/issues/978). # However, the mechanisms to expand these dimensions were preserved for # flexibility and potential later use. # Note such expansion is still applied on AZFP data for 2 variables # (see set_groups_azfp.py). # Variables that need only the beam dimension added to them. beam_only_names = set() # Variables that need only the ping_time dimension added to them. ping_time_only_names = set() # Variables that need beam and ping_time dimensions added to them. beam_ping_time_names = set() beamgroups_possible = [ { "name": "Beam_group1", "descr": ( "contains backscatter power (uncalibrated) and other beam or" " channel-specific data, including split-beam angle data when they exist." ), } ] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # obtain sorted channel dict in ascending order channels = list(self.parser_obj.config_datagram["transceivers"].keys()) channel_ids = { ch: self.parser_obj.config_datagram["transceivers"][ch]["channel_id"] for ch in channels } # example sorted_channel from a 5-channel data file for future reference: # 1: 'GPT 18 kHz 009072034d45 1-1 ES18-11' # 2: 'GPT 38 kHz 009072033fa2 2-1 ES38B' # 3: 'GPT 70 kHz 009072058c6c 3-1 ES70-7C' # 4: 'GPT 120 kHz 00907205794e 4-1 ES120-7C' # 5: 'GPT 200 kHz 0090720346a8 5-1 ES200-7C' # In some examples the channels may not be ordered, thus sorting is required self.sorted_channel = dict(sorted(channel_ids.items(), key=lambda item: item[1])) # obtain corresponding frequency dict from sorted channels self.freq = [ self.parser_obj.config_datagram["transceivers"][ch]["frequency"] for ch in self.sorted_channel.keys() ] def set_env(self) -> xr.Dataset: """Set the Environment group.""" # Loop over channels ds_env = [] for ch in self.sorted_channel.keys(): ds_tmp = xr.Dataset( { "absorption_indicative": ( ["time1"], self.parser_obj.ping_data_dict["absorption_coefficient"][ch], { "long_name": "Indicative acoustic absorption", "units": "dB/m", "valid_min": 0.0, }, ), "sound_speed_indicative": ( ["time1"], self.parser_obj.ping_data_dict["sound_velocity"][ch], { "long_name": "Indicative sound speed", "standard_name": "speed_of_sound_in_sea_water", "units": "m/s", "valid_min": 0.0, }, ), }, coords={ "time1": ( ["time1"], self.parser_obj.ping_time[ch], { "axis": "T", "long_name": "Timestamps for NMEA position datagrams", "standard_name": "time", "comment": "Time coordinate corresponding to environmental variables.", }, ) }, ) # Attach channel dimension/coordinate ds_tmp = ds_tmp.expand_dims({"channel": [self.sorted_channel[ch]]}) ds_tmp["channel"] = ds_tmp["channel"].assign_attrs( self._varattrs["beam_coord_default"]["channel"] ) ds_tmp["frequency_nominal"] = ( ["channel"], [self.parser_obj.config_datagram["transceivers"][ch]["frequency"]], { "units": "Hz", "long_name": "Transducer frequency", "valid_min": 0.0, "standard_name": "sound_frequency", }, ) ds_env.append(ds_tmp) # Merge data from all channels ds = xr.merge(ds_env) return set_time_encodings(ds) def set_sonar(self) -> xr.Dataset: """Set the Sonar group.""" # Add beam_group and beam_group_descr variables sharing a common dimension # (beam_group), using the information from self._beamgroups self._beamgroups = self.beamgroups_possible beam_groups_vars, beam_groups_coord = self._beam_groups_vars() ds = xr.Dataset(beam_groups_vars, coords=beam_groups_coord) # Assemble sonar group global attribute dictionary sonar_attr_dict = { "sonar_manufacturer": "Simrad", "sonar_model": self.sonar_model, # transducer (sonar) serial number is not stored in the EK60 raw data file, # so sonar_serial_number can't be populated from the raw datagrams "sonar_serial_number": "", "sonar_software_name": self.parser_obj.config_datagram["sounder_name"], "sonar_software_version": self.parser_obj.config_datagram["version"], "sonar_type": "echosounder", } ds = ds.assign_attrs(sonar_attr_dict) return ds def set_platform(self) -> xr.Dataset: """Set the Platform group.""" # Collect variables # Read lat/long from NMEA datagram time1, msg_type, lat, lon = self._extract_NMEA_latlon() # NMEA dataset: variables filled with np.nan if they do not exist platform_dict = {"platform_name": "", "platform_type": "", "platform_code_ICES": ""} # Values for the variables below having a channel (ch) dependence # are identical across channels ch = list(self.sorted_channel.keys())[0] ds = xr.Dataset( { "latitude": ( ["time1"], lat, self._varattrs["platform_var_default"]["latitude"], ), "longitude": ( ["time1"], lon, self._varattrs["platform_var_default"]["longitude"], ), "sentence_type": ( ["time1"], msg_type, self._varattrs["platform_var_default"]["sentence_type"], ), "pitch": ( ["time2"], self.parser_obj.ping_data_dict["pitch"][ch], self._varattrs["platform_var_default"]["pitch"], ), "roll": ( ["time2"], self.parser_obj.ping_data_dict["roll"][ch], self._varattrs["platform_var_default"]["roll"], ), "vertical_offset": ( ["time2"], self.parser_obj.ping_data_dict["heave"][ch], self._varattrs["platform_var_default"]["vertical_offset"], ), "water_level": ( [], # a scalar, assumed to be a constant in the source transducer_depth data self.parser_obj.ping_data_dict["transducer_depth"][ch][0], self._varattrs["platform_var_default"]["water_level"], ), **{ var: ([], np.nan, self._varattrs["platform_var_default"][var]) for var in [ "MRU_offset_x", "MRU_offset_y", "MRU_offset_z", "MRU_rotation_x", "MRU_rotation_y", "MRU_rotation_z", "position_offset_x", "position_offset_y", "position_offset_z", ] }, }, coords={ "time1": ( ["time1"], time1, { **self._varattrs["platform_coord_default"]["time1"], "comment": "Time coordinate corresponding to NMEA position data.", }, ), "time2": ( ["time2"], self.parser_obj.ping_time[ch], { "axis": "T", "long_name": "Timestamps for platform motion and orientation data", "standard_name": "time", "comment": "Time coordinate corresponding to platform motion and " "orientation data.", }, ), }, ) # Loop over channels and merge all ds_plat = [] for ch in self.sorted_channel.keys(): ds_tmp = xr.Dataset( { "transducer_offset_x": ( [], self.parser_obj.config_datagram["transceivers"][ch].get("pos_x", np.nan), self._varattrs["platform_var_default"]["transducer_offset_x"], ), "transducer_offset_y": ( [], self.parser_obj.config_datagram["transceivers"][ch].get("pos_y", np.nan), self._varattrs["platform_var_default"]["transducer_offset_y"], ), "transducer_offset_z": ( [], self.parser_obj.config_datagram["transceivers"][ch].get("pos_z", np.nan), self._varattrs["platform_var_default"]["transducer_offset_z"], ), }, ) # Attach channel dimension/coordinate ds_tmp = ds_tmp.expand_dims({"channel": [self.sorted_channel[ch]]}) ds_tmp["frequency_nominal"] = ( ["channel"], [self.parser_obj.config_datagram["transceivers"][ch]["frequency"]], { "units": "Hz", "long_name": "Transducer frequency", "valid_min": 0.0, "standard_name": "sound_frequency", }, ) ds_plat.append(ds_tmp) # Merge data from all channels # TODO: for current test data we see all # pitch/roll/heave are the same for all freq channels # consider only saving those from the first channel ds_plat = xr.merge(ds_plat) ds_plat["channel"] = ds_plat["channel"].assign_attrs( self._varattrs["beam_coord_default"]["channel"] ) # Merge with NMEA data ds = xr.merge([ds, ds_plat], combine_attrs="override") ds = ds.assign_attrs(platform_dict) return set_time_encodings(ds) def _set_beam_group1_zarr_vars(self, ds: xr.Dataset) -> xr.Dataset: """ Modifies ds by setting all variables associated with ``Beam_group1``, that were directly written to a temporary zarr file. Parameters ---------- ds : xr.Dataset Dataset representing ``Beam_group1`` filled with all variables, besides those written to zarr Returns ------- A modified version of ``ds`` with the zarr variables added to it. """ # TODO: In the future it would be nice to have a dictionary of # attributes stored in one place for all of the variables. # This would reduce unnecessary code duplication in the # functions below. # obtain DataArrays using zarr variables zarr_path = self.parsed2zarr_obj.zarr_file_name backscatter_r = self._get_power_dataarray(zarr_path) angle_athwartship, angle_alongship = self._get_angle_dataarrays(zarr_path) # append DataArrays created from zarr file ds = ds.assign( backscatter_r=backscatter_r, angle_athwartship=angle_athwartship, angle_alongship=angle_alongship, ) return ds def set_beam(self) -> List[xr.Dataset]: """Set the /Sonar/Beam_group1 group.""" # Channel-specific variables params = [ "beam_type", "beamwidth_alongship", "beamwidth_athwartship", "dir_x", "dir_y", "dir_z", "angle_offset_alongship", "angle_offset_athwartship", "angle_sensitivity_alongship", "angle_sensitivity_athwartship", "pos_x", "pos_y", "pos_z", "equivalent_beam_angle", "gpt_software_version", "gain", ] beam_params = defaultdict() for param in params: beam_params[param] = [ self.parser_obj.config_datagram["transceivers"][ch_seq].get(param, np.nan) for ch_seq in self.sorted_channel.keys() ] for i, ch in enumerate(self.sorted_channel.keys()): if ( np.isclose(beam_params["dir_x"][i], 0.00) and np.isclose(beam_params["dir_y"][i], 0.00) and np.isclose(beam_params["dir_z"][i], 0.00) ): beam_params["dir_x"][i] = np.nan beam_params["dir_y"][i] = np.nan beam_params["dir_z"][i] = np.nan # TODO: Need to discuss if to remove INDEX2POWER factor from the backscatter_r # currently this factor is multiplied to the raw data before backscatter_r is saved. # This is if we are encoding only raw data to the .nc/zarr file. # Need discussion since then the units won't match # with convention (though it didn't match already...). # Assemble variables into a dataset ds = xr.Dataset( { "frequency_nominal": ( ["channel"], self.freq, { "units": "Hz", "long_name": "Transducer frequency", "valid_min": 0.0, "standard_name": "sound_frequency", }, ), "beam_type": ( "channel", beam_params["beam_type"], {"long_name": "type of transducer (0-single, 1-split)"}, ), "beamwidth_twoway_alongship": ( ["channel"], beam_params["beamwidth_alongship"], { "long_name": "Half power two-way beam width along alongship axis of beam", # noqa "units": "arc_degree", "valid_range": (0.0, 360.0), "comment": ( "Introduced in echopype for Simrad echosounders to avoid potential confusion with convention definitions. " # noqa "The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa "The convention defines one-way transmit or receive beamwidth (beamwidth_receive_minor and beamwidth_transmit_minor), but Simrad echosounders record two-way beamwidth in the data." # noqa ), }, ), "beamwidth_twoway_athwartship": ( ["channel"], beam_params["beamwidth_athwartship"], { "long_name": "Half power two-way beam width along athwartship axis of beam", # noqa "units": "arc_degree", "valid_range": (0.0, 360.0), "comment": ( "Introduced in echopype for Simrad echosounders to avoid potential confusion with convention definitions. " # noqa "The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa "The convention defines one-way transmit or receive beamwidth (beamwidth_receive_major and beamwidth_transmit_major), but Simrad echosounders record two-way beamwidth in the data." # noqa ), }, ), "beam_direction_x": ( ["channel"], beam_params["dir_x"], { "long_name": "x-component of the vector that gives the pointing " "direction of the beam, in sonar beam coordinate " "system", "units": "1", "valid_range": (-1.0, 1.0), }, ), "beam_direction_y": ( ["channel"], beam_params["dir_y"], { "long_name": "y-component of the vector that gives the pointing " "direction of the beam, in sonar beam coordinate " "system", "units": "1", "valid_range": (-1.0, 1.0), }, ), "beam_direction_z": ( ["channel"], beam_params["dir_z"], { "long_name": "z-component of the vector that gives the pointing " "direction of the beam, in sonar beam coordinate " "system", "units": "1", "valid_range": (-1.0, 1.0), }, ), "angle_offset_alongship": ( ["channel"], beam_params["angle_offset_alongship"], { "long_name": "electrical alongship angle offset of the transducer", "comment": ( "Introduced in echopype for Simrad echosounders. " # noqa "The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa ), }, ), "angle_offset_athwartship": ( ["channel"], beam_params["angle_offset_athwartship"], { "long_name": "electrical athwartship angle offset of the transducer", "comment": ( "Introduced in echopype for Simrad echosounders. " # noqa "The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa ), }, ), "angle_sensitivity_alongship": ( ["channel"], beam_params["angle_sensitivity_alongship"], { "long_name": "alongship angle sensitivity of the transducer", "comment": ( "Introduced in echopype for Simrad echosounders. " # noqa "The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa ), }, ), "angle_sensitivity_athwartship": ( ["channel"], beam_params["angle_sensitivity_athwartship"], { "long_name": "athwartship angle sensitivity of the transducer", "comment": ( "Introduced in echopype for Simrad echosounders. " # noqa "The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa ), }, ), "equivalent_beam_angle": ( ["channel"], beam_params["equivalent_beam_angle"], { "long_name": "Equivalent beam angle", "units": "sr", "valid_range": (0.0, 4 * np.pi), }, ), "gain_correction": ( ["channel"], beam_params["gain"], {"long_name": "Gain correction", "units": "dB"}, ), "gpt_software_version": ( ["channel"], beam_params["gpt_software_version"], ), "transmit_frequency_start": ( ["channel"], self.freq, self._varattrs["beam_var_default"]["transmit_frequency_start"], ), "transmit_frequency_stop": ( ["channel"], self.freq, self._varattrs["beam_var_default"]["transmit_frequency_stop"], ), "transmit_type": ( [], "CW", { "long_name": "Type of transmitted pulse", "flag_values": ["CW"], "flag_meanings": [ "Continuous Wave – a pulse nominally of one frequency", ], }, ), "beam_stabilisation": ( [], np.array(0, np.byte), { "long_name": "Beam stabilisation applied (or not)", "flag_values": [0, 1], "flag_meanings": ["not stabilised", "stabilised"], }, ), "non_quantitative_processing": ( [], np.array(0, np.int16), { "long_name": "Presence or not of non-quantitative processing applied" " to the backscattering data (sonar specific)", "flag_values": [0], "flag_meanings": ["None"], }, ), }, coords={ "channel": ( ["channel"], list(self.sorted_channel.values()), self._varattrs["beam_coord_default"]["channel"], ), }, attrs={"beam_mode": "vertical", "conversion_equation_t": "type_3"}, ) # Construct Dataset with ping-by-ping data from all channels ds_backscatter = [] for ch in self.sorted_channel.keys(): var_dict = { "sample_interval": ( ["ping_time"], self.parser_obj.ping_data_dict["sample_interval"][ch], { "long_name": "Interval between recorded raw data samples", "units": "s", "valid_min": 0.0, }, ), "transmit_bandwidth": ( ["ping_time"], self.parser_obj.ping_data_dict["bandwidth"][ch], { "long_name": "Nominal bandwidth of transmitted pulse", "units": "Hz", "valid_min": 0.0, }, ), "transmit_duration_nominal": ( ["ping_time"], self.parser_obj.ping_data_dict["pulse_length"][ch], { "long_name": "Nominal bandwidth of transmitted pulse", "units": "s", "valid_min": 0.0, }, ), "transmit_power": ( ["ping_time"], self.parser_obj.ping_data_dict["transmit_power"][ch], { "long_name": "Nominal transmit power", "units": "W", "valid_min": 0.0, }, ), "data_type": ( ["ping_time"], np.array(self.parser_obj.ping_data_dict["mode"][ch], dtype=np.byte), { "long_name": "recorded data type (1=power only, 2=angle only, 3=power and angle)", # noqa "flag_values": [1, 2, 3], "flag_meanings": ["power only", "angle only", "power and angle"], }, ), "sample_time_offset": ( ["ping_time"], ( np.array(self.parser_obj.ping_data_dict["offset"][ch]) * np.array(self.parser_obj.ping_data_dict["sample_interval"][ch]) ), { "long_name": "Time offset that is subtracted from the timestamp" " of each sample", "units": "s", }, ), "channel_mode": ( ["ping_time"], np.array(self.parser_obj.ping_data_dict["transmit_mode"][ch], dtype=np.byte), { "long_name": "Transceiver mode", "flag_values": [-1, 0, 1, 2], "flag_meanings": ["Unknown", "Active", "Passive", "Test"], "comment": "From transmit_mode in the EK60 datagram", }, ), } if not self.parsed2zarr_obj.temp_zarr_dir: var_dict["backscatter_r"] = ( ["ping_time", "range_sample"], self.parser_obj.ping_data_dict["power"][ch], { "long_name": self._varattrs["beam_var_default"]["backscatter_r"][ "long_name" ], "units": "dB", }, ) ds_tmp = xr.Dataset( var_dict, coords={ "ping_time": ( ["ping_time"], self.parser_obj.ping_time[ch], self._varattrs["beam_coord_default"]["ping_time"], ), "range_sample": ( ["range_sample"], np.arange(self.parser_obj.ping_data_dict["power"][ch].shape[1]), self._varattrs["beam_coord_default"]["range_sample"], ), }, ) else: ds_tmp = xr.Dataset( var_dict, coords={ "ping_time": ( ["ping_time"], self.parser_obj.ping_time[ch], self._varattrs["beam_coord_default"]["ping_time"], ), }, ) if not self.parsed2zarr_obj.temp_zarr_dir: # Save angle data if exist based on values in # self.parser_obj.ping_data_dict['mode'][ch] # Assume the mode of all pings are identical # 1 = Power only, 2 = Angle only 3 = Power & Angle if np.all(np.array(self.parser_obj.ping_data_dict["mode"][ch]) != 1): ds_tmp = ds_tmp.assign( { "angle_athwartship": ( ["ping_time", "range_sample"], self.parser_obj.ping_data_dict["angle"][ch][:, :, 0], { "long_name": "electrical athwartship angle", "comment": ( "Introduced in echopype for Simrad echosounders. " # noqa + "The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa ), }, ), "angle_alongship": ( ["ping_time", "range_sample"], self.parser_obj.ping_data_dict["angle"][ch][:, :, 1], { "long_name": "electrical alongship angle", "comment": ( "Introduced in echopype for Simrad echosounders. " # noqa + "The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa ), }, ), } ) # Attach frequency dimension/coordinate ds_tmp = ds_tmp.expand_dims({"channel": [self.sorted_channel[ch]]}) ds_tmp["channel"] = ds_tmp["channel"].assign_attrs( self._varattrs["beam_coord_default"]["channel"] ) ds_backscatter.append(ds_tmp) # Merge data from all channels ds = xr.merge( [ds, xr.merge(ds_backscatter)], combine_attrs="override" ) # override keeps the Dataset attributes if self.parsed2zarr_obj.temp_zarr_dir: ds = self._set_beam_group1_zarr_vars(ds) # Manipulate some Dataset dimensions to adhere to convention self.beam_groups_to_convention( ds, self.beam_only_names, self.beam_ping_time_names, self.ping_time_only_names ) return [set_time_encodings(ds)] def set_vendor(self) -> xr.Dataset: # Retrieve pulse length, gain, and sa correction pulse_length = np.array( [ self.parser_obj.config_datagram["transceivers"][ch]["pulse_length_table"] for ch in self.sorted_channel.keys() ] ) gain = np.array( [ self.parser_obj.config_datagram["transceivers"][ch]["gain_table"] for ch in self.sorted_channel.keys() ] ) sa_correction = [ self.parser_obj.config_datagram["transceivers"][ch]["sa_correction_table"] for ch in self.sorted_channel.keys() ] # Save pulse length and sa correction ds = xr.Dataset( { "frequency_nominal": ( ["channel"], self.freq, { "units": "Hz", "long_name": "Transducer frequency", "valid_min": 0.0, "standard_name": "sound_frequency", }, ), "sa_correction": (["channel", "pulse_length_bin"], sa_correction), "gain_correction": (["channel", "pulse_length_bin"], gain), "pulse_length": (["channel", "pulse_length_bin"], pulse_length), }, coords={ "channel": ( ["channel"], list(self.sorted_channel.values()), self._varattrs["beam_coord_default"]["channel"], ), "pulse_length_bin": ( ["pulse_length_bin"], np.arange(pulse_length.shape[1]), ), }, ) return ds
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,874
OSOceanAcoustics/echopype
refs/heads/main
/echopype/convert/utils/ek_raw_io.py
""" Code originally developed for pyEcholab (https://github.com/CI-CMG/pyEcholab) by Rick Towler <rick.towler@noaa.gov> at NOAA AFSC. Contains low-level functions called by ./ek_raw_parsers.py """ import struct from io import SEEK_CUR, SEEK_END, SEEK_SET, BufferedReader, FileIO import fsspec from fsspec.implementations.local import LocalFileSystem from ...utils.log import _init_logger from . import ek_raw_parsers as parsers __all__ = ["RawSimradFile"] logger = _init_logger(__name__) class SimradEOF(Exception): def __init__(self, message="EOF Reached!"): self.message = message def __str__(self): return self.message class DatagramSizeError(Exception): def __init__(self, message, expected_size_tuple, file_pos=(None, None)): self.message = message self.expected_size = expected_size_tuple[0] self.retrieved_size = expected_size_tuple[1] self.file_pos_bytes = file_pos[0] self.file_pos_dgrams = file_pos[1] def __str__(self): errstr = self.message + "%s != %s @ (%s, %s)" % ( self.expected_size, self.retrieved_size, self.file_pos_bytes, self.file_pos_dgrams, ) return errstr class DatagramReadError(Exception): def __init__(self, message, expected_size_tuple, file_pos=(None, None)): self.message = message self.expected_size = expected_size_tuple[0] self.retrieved_size = expected_size_tuple[1] self.file_pos_bytes = file_pos[0] self.file_pos_dgrams = file_pos[1] def __str__(self): errstr = [self.message] if self.expected_size is not None: errstr.append("%s != %s" % (self.expected_size, self.retrieved_size)) if self.file_pos_bytes is not None: errstr.append("@ (%sL, %s)" % (self.file_pos_bytes, self.file_pos_dgrams)) return " ".join(errstr) class RawSimradFile(BufferedReader): """ A low-level extension of the built in python file object allowing the reading/writing of SIMRAD RAW files on datagram by datagram basis (instead of at the byte level.) Calls to the read method return parse datagrams as dicts. """ #: Dict object with datagram header/python class key/value pairs DGRAM_TYPE_KEY = { "RAW": parsers.SimradRawParser(), "CON": parsers.SimradConfigParser(), "TAG": parsers.SimradAnnotationParser(), "NME": parsers.SimradNMEAParser(), "BOT": parsers.SimradBottomParser(), "DEP": parsers.SimradDepthParser(), "XML": parsers.SimradXMLParser(), "FIL": parsers.SimradFILParser(), "MRU": parsers.SimradMRUParser(), } def __init__( self, name, mode="rb", closefd=True, return_raw=False, buffer_size=1024 * 1024, storage_options={}, ): # 9-28-18 RHT: Changed RawSimradFile to implement BufferedReader instead of # io.FileIO to increase performance. # create a raw file object for the buffered reader fmap = fsspec.get_mapper(name, **storage_options) if isinstance(fmap.fs, LocalFileSystem): fio = FileIO(name, mode=mode, closefd=closefd) else: fio = fmap.fs.open(fmap.root) # initialize the superclass super().__init__(fio, buffer_size=buffer_size) self._current_dgram_offset = 0 self._total_dgram_count = None self._return_raw = return_raw def _seek_bytes(self, bytes_, whence=0): """ :param bytes_: byte offset :type bytes_: int :param whence: Seeks a file by bytes instead of datagrams. """ super().seek(bytes_, whence) def _tell_bytes(self): """ Returns the file pointer position in bytes. """ return super().tell() def _read_dgram_size(self): """ Attempts to read the size of the next datagram in the file. """ buf = self._read_bytes(4) if len(buf) != 4: self._seek_bytes(-len(buf), SEEK_CUR) raise DatagramReadError( "Short read while getting dgram size", (4, len(buf)), file_pos=(self._tell_bytes(), self.tell()), ) else: return struct.unpack("=l", buf)[0] # This return value is an int object. def _bytes_remaining(self): old_pos = self._tell_bytes() self._seek_bytes(0, SEEK_END) end_pos = self._tell_bytes() offset = end_pos - old_pos self._seek_bytes(old_pos, SEEK_SET) return offset def _read_timestamp(self): """ Attempts to read the datagram timestamp. """ buf = self._read_bytes(8) if len(buf) != 8: self._seek_bytes(-len(buf), SEEK_CUR) raise DatagramReadError( "Short read while getting timestamp", (8, len(buf)), file_pos=(self._tell_bytes(), self.tell()), ) else: lowDateField, highDateField = struct.unpack("=2L", buf) # 11/26/19 - RHT - modified to return the raw bytes return lowDateField, highDateField, buf def _read_dgram_header(self): """ :returns: dgram_size, dgram_type, (low_date, high_date) Attempts to read the datagram header consisting of: long dgram_size char[4] type long lowDateField long highDateField """ try: dgram_size = self._read_dgram_size() except Exception: if self.at_eof(): raise SimradEOF() else: raise # get the datagram type buf = self._read_bytes(4) if len(buf) != 4: if self.at_eof(): raise SimradEOF() else: self._seek_bytes(-len(buf), SEEK_CUR) raise DatagramReadError( "Short read while getting dgram type", (4, len(buf)), file_pos=(self._tell_bytes(), self.tell()), ) else: dgram_type = buf dgram_type = dgram_type.decode("latin_1") # 11/26/19 - RHT # As part of the rewrite of read to remove the reverse seeking, # store the raw header bytes so we can prepend them to the raw # data bytes and pass it all to the parser. raw_bytes = buf # read the timestamp - this method was also modified to return # the raw bytes lowDateField, highDateField, buf = self._read_timestamp() # add the timestamp bytes to the raw_bytes string raw_bytes += buf return dict( size=dgram_size, type=dgram_type, low_date=lowDateField, high_date=highDateField, raw_bytes=raw_bytes, ) def _read_bytes(self, k): """ Reads raw bytes from the file """ return super().read(k) def _read_next_dgram(self): """ Attempts to read the next datagram from the file. Returns the datagram as a raw string """ # 11/26/19 - RHT - Modified this method so it doesn't "peek" # at the next datagram before reading which was inefficient. # To minimize changes to the code, methods to read the header # and timestamp were modified to return the raw bytes which # allows us to pass them onto the parser without having to # rewind and read again as was previously done. # store our current location in the file old_file_pos = self._tell_bytes() # try to read the header of the next datagram try: header = self._read_dgram_header() except DatagramReadError as e: e.message = "Short read while getting raw file datagram header" raise e # check for invalid time data if (header["low_date"], header["high_date"]) == (0, 0): logger.warning( "Skipping %s datagram w/ timestamp of (0, 0) at %sL:%d", header["type"], str(self._tell_bytes()), self.tell(), ) self.skip() return self._read_next_dgram() # basic sanity check on size if header["size"] < 16: # size can't be smaller than the header size logger.warning( "Invalid datagram header: size: %d, type: %s, nt_date: %s. dgram_size < 16", header["size"], header["type"], str((header["low_date"], header["high_date"])), ) # see if we can find the next datagram self._find_next_datagram() # and then return that return self._read_next_dgram() # get the raw bytes from the header raw_dgram = header["raw_bytes"] # and append the rest of the datagram - we subtract 12 # since we have already read 12 bytes: 4 for type and # 8 for time. raw_dgram += self._read_bytes(header["size"] - 12) # determine the size of the payload in bytes bytes_read = len(raw_dgram) # and make sure it checks out if bytes_read < header["size"]: logger.warning( "Datagram %d (@%d) shorter than expected length: %d < %d", self.tell(), old_file_pos, bytes_read, header["size"], ) self._find_next_datagram() return self._read_next_dgram() # now read the trailing size value try: dgram_size_check = self._read_dgram_size() except DatagramReadError as e: self._seek_bytes(old_file_pos, SEEK_SET) e.message = "Short read while getting trailing raw file datagram size for check" raise e # make sure they match if header["size"] != dgram_size_check: # self._seek_bytes(old_file_pos, SEEK_SET) logger.warning( "Datagram failed size check: %d != %d @ (%d, %d)", header["size"], dgram_size_check, self._tell_bytes(), self.tell(), ) logger.warning("Skipping to next datagram...") self._find_next_datagram() return self._read_next_dgram() # add the header (16 bytes) and repeated size (4 bytes) to the payload # bytes to get the total bytes read for this datagram. bytes_read = bytes_read + 20 if self._return_raw: self._current_dgram_offset += 1 return raw_dgram else: nice_dgram = self._convert_raw_datagram(raw_dgram, bytes_read) self._current_dgram_offset += 1 return nice_dgram def _convert_raw_datagram(self, raw_datagram_string, bytes_read): """ :param raw_datagram_string: bytestring containing datagram (first 4 bytes indicate datagram type, such as 'RAW0') :type raw_datagram_string: str :param bytes_read: integer specifying the datagram size, including header in bytes, :type bytes_read: int Returns a formatted datagram object using the data in raw_datagram_string """ # 11/26/19 - RHT - Modified this method to pass through the number of # bytes read so we can bubble that up to the user. dgram_type = raw_datagram_string[:3].decode() try: parser = self.DGRAM_TYPE_KEY[dgram_type] except KeyError: # raise KeyError('Unknown datagram type %s, # valid types: %s' % (str(dgram_type), # str(self.DGRAM_TYPE_KEY.keys()))) return raw_datagram_string nice_dgram = parser.from_string(raw_datagram_string, bytes_read) return nice_dgram def _set_total_dgram_count(self): """ Skips quickly through the file counting datagrams and stores the resulting number in self._total_dgram_count :raises: ValueError if self._total_dgram_count is not None (it has been set before) """ if self._total_dgram_count is not None: raise ValueError( "self._total_dgram_count has already been set. Call .reset() first if you really want to recount" # noqa ) # Save current position for later old_file_pos = self._tell_bytes() old_dgram_offset = self.tell() self._current_dgram_offset = 0 self._seek_bytes(0, SEEK_SET) while True: try: self.skip() except (DatagramReadError, SimradEOF): self._total_dgram_count = self.tell() break # Return to where we started self._seek_bytes(old_file_pos, SEEK_SET) self._current_dgram_offset = old_dgram_offset def at_eof(self): old_pos = self._tell_bytes() self._seek_bytes(0, SEEK_END) eof_pos = self._tell_bytes() # Check to see if we're at the end of file and raise EOF if old_pos == eof_pos: return True # Othereise, go back to where we were and re-raise the original # exception else: offset = old_pos - eof_pos self._seek_bytes(offset, SEEK_END) return False def read(self, k): """ :param k: Number of datagrams to read :type k: int Reads the next k datagrams. A list of datagrams is returned if k > 1. The entire file is read from the CURRENT POSITION if k < 0. (does not necessarily read from beginning of file if previous datagrams were read) """ if k == 1: try: return self._read_next_dgram() except Exception: if self.at_eof(): raise SimradEOF() else: raise elif k > 0: dgram_list = [] for m in range(k): try: dgram = self._read_next_dgram() dgram_list.append(dgram) except Exception: break return dgram_list elif k < 0: return self.readall() def readall(self): """ Reads the entire file from the beginning and returns a list of datagrams. """ self.seek(0, SEEK_SET) dgram_list = [] for raw_dgram in self.iter_dgrams(): dgram_list.append(raw_dgram) return dgram_list def _find_next_datagram(self): old_file_pos = self._tell_bytes() logger.warning("Attempting to find next valid datagram...") try: while self.peek()["type"][:3] not in list(self.DGRAM_TYPE_KEY.keys()): self._seek_bytes(1, 1) except DatagramReadError: logger.warning("No next datagram found. Ending reading of file.") raise SimradEOF() else: logger.warning("Found next datagram: %s", self.peek()) logger.warning("Skipped ahead %d bytes", self._tell_bytes() - old_file_pos) def tell(self): """ Returns the current file pointer offset by datagram number """ return self._current_dgram_offset def peek(self): """ Returns the header of the next datagram in the file. The file position is reset back to the original location afterwards. :returns: [dgram_size, dgram_type, (low_date, high_date)] """ dgram_header = self._read_dgram_header() if dgram_header["type"].startswith("RAW0"): dgram_header["channel"] = struct.unpack("h", self._read_bytes(2))[0] self._seek_bytes(-18, SEEK_CUR) elif dgram_header["type"].startswith("RAW3"): chan_id = struct.unpack("128s", self._read_bytes(128)) dgram_header["channel_id"] = chan_id.strip("\x00") self._seek_bytes(-(16 + 128), SEEK_CUR) else: self._seek_bytes(-16, SEEK_CUR) return dgram_header def __next__(self): """ Returns the next datagram (synonymous with self.read(1)) """ return self.read(1) def prev(self): """ Returns the previous datagram 'behind' the current file pointer position """ self.skip_back() raw_dgram = self.read(1) self.skip_back() return raw_dgram def skip(self): """ Skips forward to the next datagram without reading the contents of the current one """ # dgram_size, dgram_type, (low_date, high_date) = self.peek()[:3] header = self.peek() if header["size"] < 16: logger.warning( "Invalid datagram header: size: %d, type: %s, nt_date: %s. dgram_size < 16", header["size"], header["type"], str((header["low_date"], header["high_date"])), ) self._find_next_datagram() else: self._seek_bytes(header["size"] + 4, SEEK_CUR) dgram_size_check = self._read_dgram_size() if header["size"] != dgram_size_check: logger.warning( "Datagram failed size check: %d != %d @ (%d, %d)", header["size"], dgram_size_check, self._tell_bytes(), self.tell(), ) logger.warning("Skipping to next datagram... (in skip)") self._find_next_datagram() self._current_dgram_offset += 1 def skip_back(self): """ Skips backwards to the previous datagram without reading it's contents """ old_file_pos = self._tell_bytes() try: self._seek_bytes(-4, SEEK_CUR) except IOError: raise dgram_size_check = self._read_dgram_size() # Seek to the beginning of the datagram and read as normal try: self._seek_bytes(-(8 + dgram_size_check), SEEK_CUR) except IOError: raise DatagramSizeError try: dgram_size = self._read_dgram_size() except DatagramSizeError: logger.info("Error reading the datagram") self._seek_bytes(old_file_pos, SEEK_SET) raise if dgram_size_check != dgram_size: self._seek_bytes(old_file_pos, SEEK_SET) raise DatagramSizeError else: self._seek_bytes(-4, SEEK_CUR) self._current_dgram_offset -= 1 def iter_dgrams(self): """ Iterates through the file, repeatedly calling self.next() until the end of file is reached """ while True: # new_dgram = self.next() # yield new_dgram try: new_dgram = next(self) except Exception: logger.debug("Caught EOF?") raise StopIteration yield new_dgram # Unsupported members def readline(self): """ aliased to self.next() """ return next(self) def readlines(self): """ aliased to self.read(-1) """ return self.read(-1) def seek(self, offset, whence): """ Performs the familiar 'seek' operation using datagram offsets instead of raw bytes. """ if whence == SEEK_SET: if offset < 0: raise ValueError("Cannot seek backwards from beginning of file") else: self._seek_bytes(0, SEEK_SET) self._current_dgram_offset = 0 elif whence == SEEK_END: if offset > 0: raise ValueError("Use negative offsets when seeking backward from end of file") # Do we need to generate the total number of datagrams w/in the file? try: self._set_total_dgram_count() # Throws a value error if _total_dgram_count has already been set. We can ignore it except ValueError: pass self._seek_bytes(0, SEEK_END) self._current_dgram_offset = self._total_dgram_count elif whence == SEEK_CUR: pass else: raise ValueError( "Illegal value for 'whence' (%s), use 0 (beginning), 1 (current), or 2 (end)" % (str(whence)) ) if offset > 0: for k in range(offset): self.skip() elif offset < 0: for k in range(-offset): self.skip_back() def reset(self): self._current_dgram_offset = 0 self._total_dgram_count = None self._seek_bytes(0, SEEK_SET)
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,875
OSOceanAcoustics/echopype
refs/heads/main
/echopype/convert/parse_azfp.py
import os import xml.dom.minidom from collections import defaultdict from datetime import datetime as dt from struct import unpack import fsspec import numpy as np from ..utils.log import _init_logger from .parse_base import ParseBase FILENAME_DATETIME_AZFP = "\\w+.01A" XML_INT_PARAMS = { "NumFreq": "num_freq", "SerialNumber": "serial_number", "BurstInterval": "burst_interval", "PingsPerBurst": "pings_per_burst", "AverageBurstPings": "average_burst_pings", "SensorsFlag": "sensors_flag", } XML_FLOAT_PARAMS = [ # Temperature coeffs "ka", "kb", "kc", "A", "B", "C", # Tilt coeffs "X_a", "X_b", "X_c", "X_d", "Y_a", "Y_b", "Y_c", "Y_d", ] XML_FREQ_PARAMS = { "RangeSamples": "range_samples", "RangeAveragingSamples": "range_averaging_samples", "DigRate": "dig_rate", "LockOutIndex": "lockout_index", "Gain": "gain", "PulseLen": "pulse_length", "DS": "DS", "EL": "EL", "TVR": "TVR", "VTX0": "VTX", "BP": "BP", } HEADER_FIELDS = ( ("profile_flag", "u2"), ("profile_number", "u2"), ("serial_number", "u2"), ("ping_status", "u2"), ("burst_int", "u4"), ("year", "u2"), # Year ("month", "u2"), # Month ("day", "u2"), # Day ("hour", "u2"), # Hour ("minute", "u2"), # Minute ("second", "u2"), # Second ("hundredths", "u2"), # Hundredths of a second ("dig_rate", "u2", 4), # Digitalization rate for each channel ("lockout_index", "u2", 4), # Lockout index for each channel ("num_bins", "u2", 4), # Number of bins for each channel ( "range_samples_per_bin", "u2", 4, ), # Range samples per bin for each channel ("ping_per_profile", "u2"), # Number of pings per profile ("avg_pings", "u2"), # Flag indicating whether the pings average in time ("num_acq_pings", "u2"), # Pings acquired in the burst ("ping_period", "u2"), # Ping period in seconds ("first_ping", "u2"), ("last_ping", "u2"), ( "data_type", "u1", 4, ), # Datatype for each channel 1=Avg unpacked_data (5bytes), 0=raw (2bytes) ("data_error", "u2"), # Error number is an error occurred ("phase", "u1"), # Phase number used to acquire this profile ("overrun", "u1"), # 1 if an overrun occurred ("num_chan", "u1"), # 1, 2, 3, or 4 ("gain", "u1", 4), # gain channel 1-4 ("spare_chan", "u1"), # spare channel ("pulse_length", "u2", 4), # Pulse length chan 1-4 uS ("board_num", "u2", 4), # The board the data came from channel 1-4 ("frequency", "u2", 4), # frequency for channel 1-4 in kHz ( "sensor_flag", "u2", ), # Flag indicating if pressure sensor or temperature sensor is available ("ancillary", "u2", 5), # Tilt-X, Y, Battery, Pressure, Temperature ("ad", "u2", 2), # AD channel 6 and 7 ) logger = _init_logger(__name__) class ParseAZFP(ParseBase): """Class for converting data from ASL Environmental Sciences AZFP echosounder.""" # Instrument specific constants HEADER_SIZE = 124 HEADER_FORMAT = ">HHHHIHHHHHHHHHHHHHHHHHHHHHHHHHHHHHBBBBHBBBBBBBBHHHHHHHHHHHHHHHHHHHH" FILE_TYPE = 64770 def __init__(self, file, params, storage_options={}, dgram_zarr_vars={}): super().__init__(file, storage_options) # Parent class attributes # regex pattern used to grab datetime embedded in filename self.timestamp_pattern = FILENAME_DATETIME_AZFP self.xml_path = params # Class attributes self.parameters = dict() self.unpacked_data = defaultdict(list) self.sonar_type = "AZFP" def load_AZFP_xml(self): """Parse XML file to get params for reading AZFP data.""" """Parses the AZFP XML file. """ def get_value_by_tag_name(tag_name, element=0): """Returns the value in an XML tag given the tag name and the number of occurrences.""" return px.getElementsByTagName(tag_name)[element].childNodes[0].data xmlmap = fsspec.get_mapper(self.xml_path, **self.storage_options) px = xml.dom.minidom.parse(xmlmap.fs.open(xmlmap.root)) # Retrieve integer parameters from the xml file for old_name, new_name in XML_INT_PARAMS.items(): self.parameters[new_name] = int(get_value_by_tag_name(old_name)) # Retrieve floating point parameters from the xml file for param in XML_FLOAT_PARAMS: self.parameters[param] = float(get_value_by_tag_name(param)) # Retrieve frequency dependent parameters from the xml file for old_name, new_name in XML_FREQ_PARAMS.items(): self.parameters[new_name] = [ float(get_value_by_tag_name(old_name, ch)) for ch in range(self.parameters["num_freq"]) ] def _compute_temperature(self, ping_num, is_valid): """ Compute temperature in celsius. Parameters ---------- ping_num ping number is_valid whether the associated parameters have valid values """ if not is_valid: return np.nan counts = self.unpacked_data["ancillary"][ping_num][4] v_in = 2.5 * (counts / 65535) R = (self.parameters["ka"] + self.parameters["kb"] * v_in) / (self.parameters["kc"] - v_in) # fmt: off T = 1 / ( self.parameters["A"] + self.parameters["B"] * (np.log(R)) + self.parameters["C"] * (np.log(R) ** 3) ) - 273 # fmt: on return T def _compute_tilt(self, ping_num, xy, is_valid): """ Compute instrument tilt. Parameters ---------- ping_num ping number xy either "X" or "Y" is_valid whether the associated parameters have valid values """ if not is_valid: return np.nan else: idx = 0 if xy == "X" else 1 N = self.unpacked_data["ancillary"][ping_num][idx] a = self.parameters[f"{xy}_a"] b = self.parameters[f"{xy}_b"] c = self.parameters[f"{xy}_c"] d = self.parameters[f"{xy}_d"] return a + b * N + c * N**2 + d * N**3 def _compute_battery(self, ping_num, battery_type): """ Compute battery voltage. Parameters ---------- ping_num ping number type either "main" or "tx" """ USL5_BAT_CONSTANT = (2.5 / 65536.0) * (86.6 + 475.0) / 86.6 if battery_type == "main": N = self.unpacked_data["ancillary"][ping_num][2] elif battery_type == "tx": N = self.unpacked_data["ad"][ping_num][0] return N * USL5_BAT_CONSTANT def parse_raw(self): """ Parse raw data file from AZFP echosounder. """ # Read xml file into dict self.load_AZFP_xml() fmap = fsspec.get_mapper(self.source_file, **self.storage_options) # Set flags for presence of valid parameters for temperature and tilt def _test_valid_params(params): if all([np.isclose(self.parameters[p], 0) for p in params]): return False else: return True temperature_is_valid = _test_valid_params(["ka", "kb", "kc"]) tilt_x_is_valid = _test_valid_params(["X_a", "X_b", "X_c"]) tilt_y_is_valid = _test_valid_params(["Y_a", "Y_b", "Y_c"]) with fmap.fs.open(fmap.root, "rb") as file: ping_num = 0 eof = False while not eof: header_chunk = file.read(self.HEADER_SIZE) if header_chunk: header_unpacked = unpack(self.HEADER_FORMAT, header_chunk) # Reading will stop if the file contains an unexpected flag if self._split_header(file, header_unpacked): # Appends the actual 'data values' to unpacked_data self._add_counts(file, ping_num) if ping_num == 0: # Display information about the file that was loaded in self._print_status() # Compute temperature from unpacked_data[ii]['ancillary][4] self.unpacked_data["temperature"].append( self._compute_temperature(ping_num, temperature_is_valid) ) # compute x tilt from unpacked_data[ii]['ancillary][0] self.unpacked_data["tilt_x"].append( self._compute_tilt(ping_num, "X", tilt_x_is_valid) ) # Compute y tilt from unpacked_data[ii]['ancillary][1] self.unpacked_data["tilt_y"].append( self._compute_tilt(ping_num, "Y", tilt_y_is_valid) ) # Compute cos tilt magnitude from tilt x and y values self.unpacked_data["cos_tilt_mag"].append( np.cos( ( np.sqrt( self.unpacked_data["tilt_x"][ping_num] ** 2 + self.unpacked_data["tilt_y"][ping_num] ** 2 ) ) * np.pi / 180 ) ) # Calculate voltage of main battery pack self.unpacked_data["battery_main"].append( self._compute_battery(ping_num, battery_type="main") ) # If there is a Tx battery pack self.unpacked_data["battery_tx"].append( self._compute_battery(ping_num, battery_type="tx") ) else: break else: # End of file eof = True ping_num += 1 self._check_uniqueness() self._get_ping_time() # Explicitly cast frequency to a float in accordance with the SONAR-netCDF4 convention self.unpacked_data["frequency"] = self.unpacked_data["frequency"].astype(np.float64) # cast unpacked_data values to np arrays, so they are easier to reference for key, val in self.unpacked_data.items(): # if it is not a nested list, make the value into a ndarray if isinstance(val, list) and (not isinstance(val[0], list)): self.unpacked_data[key] = np.asarray(val) # cast all list parameter values to np array, so they are easier to reference for key, val in self.parameters.items(): if isinstance(val, list): self.parameters[key] = np.asarray(val) def _print_status(self): """Prints message to console giving information about the raw file being parsed.""" filename = os.path.basename(self.source_file) timestamp = dt( self.unpacked_data["year"][0], self.unpacked_data["month"][0], self.unpacked_data["day"][0], self.unpacked_data["hour"][0], self.unpacked_data["minute"][0], int(self.unpacked_data["second"][0] + self.unpacked_data["hundredths"][0] / 100), ) timestr = timestamp.strftime("%Y-%b-%d %H:%M:%S") pathstr, xml_name = os.path.split(self.xml_path) logger.info(f"parsing file {filename} with {xml_name}, " f"time of first ping: {timestr}") def _split_header(self, raw, header_unpacked): """Splits the header information into a dictionary. Modifies self.unpacked_data Parameters ---------- raw open binary file header_unpacked output of struct unpack of raw file Returns ------- True or False depending on whether the unpacking was successful """ if ( header_unpacked[0] != self.FILE_TYPE ): # first field should match hard-coded FILE_TYPE from manufacturer check_eof = raw.read(1) if check_eof: logger.error("Unknown file type") return False header_byte_cnt = 0 # fields with num_freq data still takes 4 bytes, # the extra bytes contain random numbers firmware_freq_len = 4 field_w_freq = ( "dig_rate", "lockout_index", "num_bins", "range_samples_per_bin", # fields with num_freq data "data_type", "gain", "pulse_length", "board_num", "frequency", ) for field in HEADER_FIELDS: if field[0] in field_w_freq: # fields with num_freq data self.unpacked_data[field[0]].append( header_unpacked[header_byte_cnt : header_byte_cnt + self.parameters["num_freq"]] ) header_byte_cnt += firmware_freq_len elif len(field) == 3: # other longer fields ('ancillary' and 'ad') self.unpacked_data[field[0]].append( header_unpacked[header_byte_cnt : header_byte_cnt + field[2]] ) header_byte_cnt += field[2] else: self.unpacked_data[field[0]].append(header_unpacked[header_byte_cnt]) header_byte_cnt += 1 return True def _add_counts(self, raw, ping_num): """Unpacks the echosounder raw data. Modifies self.unpacked_data.""" vv_tmp = [[]] * self.unpacked_data["num_chan"][ping_num] for freq_ch in range(self.unpacked_data["num_chan"][ping_num]): counts_byte_size = self.unpacked_data["num_bins"][ping_num][freq_ch] if self.unpacked_data["data_type"][ping_num][freq_ch]: if self.unpacked_data["avg_pings"][ping_num]: # if pings are averaged over time divisor = ( self.unpacked_data["ping_per_profile"][ping_num] * self.unpacked_data["range_samples_per_bin"][ping_num][freq_ch] ) else: divisor = self.unpacked_data["range_samples_per_bin"][ping_num][freq_ch] ls = unpack( ">" + "I" * counts_byte_size, raw.read(counts_byte_size * 4) ) # Linear sum lso = unpack( ">" + "B" * counts_byte_size, raw.read(counts_byte_size * 1) ) # linear sum overflow v = (np.array(ls) + np.array(lso) * 4294967295) / divisor v = (np.log10(v) - 2.5) * (8 * 65535) * self.parameters["DS"][freq_ch] v[np.isinf(v)] = 0 vv_tmp[freq_ch] = v else: counts_chunk = raw.read(counts_byte_size * 2) counts_unpacked = unpack(">" + "H" * counts_byte_size, counts_chunk) vv_tmp[freq_ch] = counts_unpacked self.unpacked_data["counts"].append(vv_tmp) def _check_uniqueness(self): """Check for ping-by-ping consistency of sampling parameters and reduce if identical.""" if not self.unpacked_data: self.parse_raw() if np.array(self.unpacked_data["profile_flag"]).size != 1: # Only check uniqueness once. # fields with num_freq data field_w_freq = ( "dig_rate", "lockout_index", "num_bins", "range_samples_per_bin", "data_type", "gain", "pulse_length", "board_num", "frequency", ) # fields to reduce size if the same for all pings field_include = ( "profile_flag", "serial_number", "burst_int", "ping_per_profile", "avg_pings", "ping_period", "phase", "num_chan", "spare_chan", ) for field in field_w_freq: uniq = np.unique(self.unpacked_data[field], axis=0) if uniq.shape[0] == 1: self.unpacked_data[field] = uniq.squeeze() else: raise ValueError(f"Header value {field} is not constant for each ping") for field in field_include: uniq = np.unique(self.unpacked_data[field]) if uniq.shape[0] == 1: self.unpacked_data[field] = uniq.squeeze() else: raise ValueError(f"Header value {field} is not constant for each ping") def _get_ping_time(self): """Assemble ping time from parsed values.""" if not self.unpacked_data: self.parse_raw() ping_time = [] for ping_num, year in enumerate(self.unpacked_data["year"]): ping_time.append( np.datetime64( dt( year, self.unpacked_data["month"][ping_num], self.unpacked_data["day"][ping_num], self.unpacked_data["hour"][ping_num], self.unpacked_data["minute"][ping_num], int( self.unpacked_data["second"][ping_num] + self.unpacked_data["hundredths"][ping_num] / 100 ), ).replace(tzinfo=None), "[ns]", ) ) self.ping_time = ping_time @staticmethod def _calc_Sv_offset(f, pulse_length): """Calculate the compensation factor for Sv calculation.""" # TODO: this method seems should be in echopype.process if f > 38000: if pulse_length == 300: return 1.1 elif pulse_length == 500: return 0.8 elif pulse_length == 700: return 0.5 elif pulse_length == 900: return 0.3 elif pulse_length == 1000: return 0.3 else: if pulse_length == 500: return 1.1 elif pulse_length == 1000: return 0.7
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,876
OSOceanAcoustics/echopype
refs/heads/main
/echopype/metrics/summary_statistics.py
""" "echo metrics" functions Reference: Urmy et al. 2012. Measuring the vertical distributional variability of pelagic fauna in Monterey Bay. ICES Journal of Marine Science 69 (2): 184-196. https://doi.org/10.1093/icesjms/fsr205 Original implementation: https://github.com/ElOceanografo/EchoMetrics/blob/master/echometrics/echometrics.py """ import numpy as np import xarray as xr def delta_z(ds: xr.Dataset, range_label="echo_range") -> xr.DataArray: """Helper function to calculate widths between range samples (dz) for discretized integral. Parameters ---------- ds : xr.Dataset range_label : str Name of an xarray DataArray in ``ds`` containing ``echo_range`` information. Returns ------- xr.DataArray """ if range_label not in ds: raise ValueError(f"{range_label} not in the input Dataset!") dz = ds[range_label].diff(dim="range_sample") return dz.where(dz != 0, other=np.nan) def convert_to_linear(ds: xr.Dataset, Sv_label="Sv") -> xr.DataArray: """Helper function to convert volume backscattering strength (Sv) values to linear domain. Parameters ---------- ds : xr.Dataset Sv_label : str Name of an xarray DataArray in `ds` containing volume backscattering strength (Sv). Returns ------- xr.DataArray """ return 10 ** (ds[Sv_label] / 10) def abundance(ds: xr.Dataset, range_label="echo_range") -> xr.DataArray: """Calculates the area-backscattering strength (Sa) [unit: dB re 1 m^2 m^-2]. This quantity is the integral of volumetric backscatter over range (``echo_range``). Parameters ---------- ds : xr.Dataset range_label : str Name of an xarray DataArray in ``ds`` containing ``echo_range`` information. Returns ------- xr.DataArray """ dz = delta_z(ds, range_label=range_label) sv = convert_to_linear(ds, "Sv") return 10 * np.log10((sv * dz).sum(dim="range_sample")) def center_of_mass(ds: xr.Dataset, range_label="echo_range") -> xr.DataArray: """Calculates the mean backscatter location [unit: m]. This quantity is the weighted average of backscatter along range (``echo_range``). Parameters ---------- ds : xr.Dataset range_label : str Name of an xarray DataArray in ``ds`` containing ``echo_range`` information. Returns ------- xr.DataArray """ dz = delta_z(ds, range_label=range_label) sv = convert_to_linear(ds, "Sv") return (ds[range_label] * sv * dz).sum(dim="range_sample") / (sv * dz).sum(dim="range_sample") def dispersion(ds: xr.Dataset, range_label="echo_range") -> xr.DataArray: """Calculates the inertia (I) [unit: m^-2]. This quantity measures dispersion or spread of backscatter from the center of mass. Parameters ---------- ds : xr.Dataset range_label : str Name of an xarray DataArray in ``ds`` containing ``echo_range`` information. Returns ------- xr.DataArray """ dz = delta_z(ds, range_label=range_label) sv = convert_to_linear(ds, "Sv") cm = center_of_mass(ds) return ((ds[range_label] - cm) ** 2 * sv * dz).sum(dim="range_sample") / (sv * dz).sum( dim="range_sample" ) def evenness(ds: xr.Dataset, range_label="echo_range") -> xr.DataArray: """Calculates the equivalent area (EA) [unit: m]. This quantity represents the area that would be occupied if all datacells contained the mean density. Parameters ---------- ds : xr.Dataset range_label : str Name of an xarray DataArray in ``ds`` containing ``echo_range`` information. Returns ------- xr.DataArray """ dz = delta_z(ds, range_label=range_label) sv = convert_to_linear(ds, "Sv") return ((sv * dz).sum(dim="range_sample")) ** 2 / (sv**2 * dz).sum(dim="range_sample") def aggregation(ds: xr.Dataset, range_label="echo_range") -> xr.DataArray: """Calculated the index of aggregation (IA) [unit: m^-1]. This quantity is reciprocal of the equivalent area. IA is high when small areas are much denser than the rest of the distribution. Parameters ---------- ds : xr.Dataset range_label : str Name of an xarray DataArray in ``ds`` containing ``echo_range`` information. Returns ------- xr.DataArray """ return 1 / evenness(ds, range_label=range_label)
{"/echopype/convert/set_groups_ad2cp.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/convert/parse_ad2cp.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/utils/test_source_filenames.py": ["/echopype/utils/prov.py"], "/echopype/echodata/convention/__init__.py": ["/echopype/echodata/convention/conv.py"], "/echopype/consolidate/split_beam_angle.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/calibrate/ecs.py": ["/echopype/utils/log.py"], "/echopype/tests/calibrate/test_ecs_integration.py": ["/echopype/__init__.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/cal_params.py"], "/echopype/calibrate/api.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/convert/parse_ad2cp.py": ["/echopype/convert/parse_base.py"], "/echopype/echodata/combine.py": ["/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/echodata.py"], "/echopype/tests/consolidate/test_consolidate_integration.py": ["/echopype/__init__.py"], "/echopype/echodata/api.py": ["/echopype/echodata/echodata.py", "/echopype/core.py"], "/echopype/tests/utils/test_utils_io.py": ["/echopype/utils/io.py"], "/echopype/echodata/sensor_ep_version_mapping/ep_version_mapper.py": ["/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py"], "/echopype/tests/mask/test_mask.py": ["/echopype/__init__.py", "/echopype/mask/__init__.py", "/echopype/mask/api.py"], "/echopype/tests/conftest.py": ["/echopype/testing.py"], "/echopype/tests/echodata/utils.py": ["/echopype/convert/set_groups_base.py", "/echopype/echodata/echodata.py"], "/echopype/tests/commongrid/test_mvbs.py": ["/echopype/__init__.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/calibrate/test_cal_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/test_core.py": ["/echopype/core.py"], "/echopype/calibrate/calibrate_azfp.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/echodata/widgets/utils.py": ["/echopype/echodata/convention/utils.py"], "/echopype/echodata/simrad.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek60.py": ["/echopype/__init__.py"], "/echopype/convert/parsed_to_zarr_ek60.py": ["/echopype/convert/parsed_to_zarr.py"], "/echopype/mask/api.py": ["/echopype/utils/io.py", "/echopype/utils/prov.py"], "/echopype/qc/__init__.py": ["/echopype/qc/api.py"], "/echopype/tests/utils/test_processinglevels_integration.py": ["/echopype/__init__.py"], "/echopype/tests/metrics/test_metrics_summary_statistics.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/__init__.py": ["/echopype/convert/api.py", "/echopype/echodata/api.py", "/echopype/echodata/combine.py", "/echopype/utils/io.py", "/echopype/utils/log.py"], "/.ci_helpers/check-version.py": ["/echopype/__init__.py"], "/echopype/calibrate/ek80_complex.py": ["/echopype/convert/set_groups_ek80.py"], "/echopype/consolidate/__init__.py": ["/echopype/consolidate/api.py"], "/echopype/commongrid/api.py": ["/echopype/utils/prov.py", "/echopype/commongrid/mvbs.py"], "/echopype/tests/echodata/test_echodata_combine.py": ["/echopype/__init__.py", "/echopype/utils/coding.py", "/echopype/echodata/__init__.py", "/echopype/echodata/combine.py"], "/echopype/tests/utils/test_utils_log.py": ["/echopype/__init__.py"], "/echopype/mask/__init__.py": ["/echopype/mask/api.py"], "/echopype/tests/convert/test_convert_ad2cp.py": ["/echopype/__init__.py", "/echopype/testing.py"], "/echopype/convert/parsed_to_zarr_ek80.py": ["/echopype/convert/parsed_to_zarr_ek60.py"], "/echopype/tests/visualize/test_plot.py": ["/echopype/__init__.py", "/echopype/visualize/__init__.py", "/echopype/testing.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/echodata/__init__.py", "/echopype/visualize/api.py"], "/echopype/calibrate/env_params.py": ["/echopype/echodata/__init__.py", "/echopype/calibrate/cal_params.py"], "/echopype/convert/utils/ek_raw_parsers.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_date_conversion.py"], "/echopype/tests/convert/test_convert_azfp.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_calibrate.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py"], "/echopype/calibrate/range.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/calibrate/env_params.py"], "/echopype/tests/calibrate/test_env_params.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params.py"], "/echopype/convert/set_groups_base.py": ["/echopype/echodata/convention/__init__.py", "/echopype/utils/coding.py", "/echopype/utils/prov.py"], "/echopype/clean/api.py": ["/echopype/utils/prov.py", "/echopype/clean/noise_est.py"], "/echopype/calibrate/calibrate_ek.py": ["/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/log.py", "/echopype/calibrate/cal_params.py", "/echopype/calibrate/calibrate_base.py", "/echopype/calibrate/ecs.py", "/echopype/calibrate/ek80_complex.py", "/echopype/calibrate/env_params.py", "/echopype/calibrate/range.py"], "/echopype/calibrate/calibrate_base.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py", "/echopype/calibrate/ecs.py"], "/echopype/echodata/echodata.py": ["/echopype/utils/coding.py", "/echopype/utils/io.py", "/echopype/utils/log.py", "/echopype/utils/prov.py", "/echopype/echodata/convention/__init__.py", "/echopype/echodata/widgets/utils.py", "/echopype/echodata/widgets/widgets.py", "/echopype/core.py", "/echopype/convert/api.py"], "/echopype/visualize/api.py": ["/echopype/visualize/plot.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py", "/echopype/calibrate/calibrate_azfp.py", "/echopype/utils/log.py"], "/echopype/echodata/sensor_ep_version_mapping/v05x_to_v06x.py": ["/echopype/core.py", "/echopype/utils/log.py", "/echopype/echodata/convention/__init__.py"], "/echopype/clean/__init__.py": ["/echopype/clean/api.py"], "/echopype/visualize/plot.py": ["/echopype/visualize/cm.py", "/echopype/utils/log.py"], "/echopype/commongrid/__init__.py": ["/echopype/commongrid/api.py"], "/echopype/convert/parsed_to_zarr.py": ["/echopype/utils/io.py"], "/echopype/calibrate/__init__.py": ["/echopype/calibrate/api.py"], "/echopype/echodata/convention/utils.py": ["/echopype/echodata/convention/__init__.py"], "/echopype/tests/echodata/test_echodata.py": ["/echopype/__init__.py", "/echopype/calibrate/env_params_old.py", "/echopype/echodata/__init__.py", "/echopype/calibrate/calibrate_ek.py"], "/echopype/qc/api.py": ["/echopype/echodata/__init__.py", "/echopype/utils/log.py"], "/echopype/visualize/__init__.py": ["/echopype/visualize/api.py"], "/echopype/tests/qc/test_qc.py": ["/echopype/qc/__init__.py", "/echopype/qc/api.py"], "/echopype/core.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/parsed_to_zarr_ek60.py", "/echopype/convert/parsed_to_zarr_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/parse_ek60.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/calibrate/test_cal_params.py": ["/echopype/calibrate/cal_params.py"], "/echopype/tests/calibrate/test_ecs.py": ["/echopype/calibrate/ecs.py"], "/echopype/echodata/__init__.py": ["/echopype/echodata/echodata.py"], "/echopype/tests/convert/test_convert_ek80.py": ["/echopype/__init__.py", "/echopype/testing.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/convert/set_groups_ek80.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/consolidate/api.py": ["/echopype/calibrate/ek80_complex.py", "/echopype/echodata/__init__.py", "/echopype/echodata/simrad.py", "/echopype/utils/io.py", "/echopype/utils/prov.py", "/echopype/consolidate/split_beam_angle.py"], "/echopype/convert/__init__.py": ["/echopype/convert/parse_ad2cp.py", "/echopype/convert/parse_azfp.py", "/echopype/convert/parse_base.py", "/echopype/convert/parse_ek60.py", "/echopype/convert/parse_ek80.py", "/echopype/convert/set_groups_ad2cp.py", "/echopype/convert/set_groups_azfp.py", "/echopype/convert/set_groups_ek60.py", "/echopype/convert/set_groups_ek80.py"], "/echopype/utils/prov.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_ek80.py": ["/echopype/convert/parse_base.py"], "/echopype/tests/echodata/test_echodata_simrad.py": ["/echopype/echodata/simrad.py"], "/echopype/tests/utils/test_coding.py": ["/echopype/utils/coding.py"], "/echopype/echodata/convention/conv.py": ["/echopype/echodata/__init__.py"], "/echopype/metrics/__init__.py": ["/echopype/metrics/summary_statistics.py"], "/echopype/tests/calibrate/test_calibrate_ek80.py": ["/echopype/__init__.py"], "/echopype/tests/commongrid/test_nasc.py": ["/echopype/__init__.py", "/echopype/calibrate/__init__.py", "/echopype/commongrid/__init__.py", "/echopype/commongrid/nasc.py", "/echopype/consolidate/__init__.py"], "/echopype/convert/api.py": ["/echopype/core.py", "/echopype/convert/parsed_to_zarr.py", "/echopype/echodata/echodata.py", "/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/utils/prov.py"], "/echopype/convert/set_groups_azfp.py": ["/echopype/utils/coding.py", "/echopype/convert/set_groups_base.py"], "/echopype/tests/clean/test_noise.py": ["/echopype/__init__.py"], "/echopype/convert/parse_base.py": ["/echopype/utils/log.py", "/echopype/convert/utils/ek_raw_io.py"], "/echopype/tests/convert/test_parsed_to_zarr.py": ["/echopype/__init__.py", "/echopype/echodata/echodata.py"], "/echopype/echodata/widgets/widgets.py": ["/echopype/echodata/widgets/utils.py"], "/echopype/tests/echodata/test_echodata_structure.py": ["/echopype/echodata/echodata.py", "/echopype/echodata/api.py"], "/echopype/tests/calibrate/test_range_integration.py": ["/echopype/__init__.py"], "/echopype/tests/calibrate/test_env_params_integration.py": ["/echopype/__init__.py"], "/echopype/tests/convert/test_convert_source_target_locs.py": ["/echopype/__init__.py", "/echopype/utils/coding.py"], "/echopype/tests/utils/test_utils_uwa.py": ["/echopype/utils/uwa.py"], "/echopype/utils/io.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/core.py"], "/echopype/tests/calibrate/test_ek80_complex.py": ["/echopype/calibrate/ek80_complex.py"], "/echopype/convert/set_groups_ek60.py": ["/echopype/utils/coding.py", "/echopype/utils/log.py", "/echopype/convert/set_groups_base.py"], "/echopype/convert/utils/ek_raw_io.py": ["/echopype/utils/log.py"], "/echopype/convert/parse_azfp.py": ["/echopype/utils/log.py", "/echopype/convert/parse_base.py"]}
73,877
sebleier/stega
refs/heads/master
/stega/__init__.py
class Block(object): def __init__(self, img, x, y): self.img = img self.x = x self.y = y self.bits = 8 def __iter__(self): x, y = self.x, self.y yield self.img.getpixel((x - 1, y - 1))[0] yield self.img.getpixel((x, y - 1))[0] yield self.img.getpixel((x + 1, y - 1))[0] yield self.img.getpixel((x - 1, y))[0] yield self.img.getpixel((x + 1, y))[0] yield self.img.getpixel((x - 1, y + 1))[0] yield self.img.getpixel((x, y + 1))[0] yield self.img.getpixel((x + 1, y + 1))[0] def __len__(self): return self.bits def get_value(self): return self.img.getpixel((self.x, self.y))[0] def set_value(self, value): color = list(self.img.getpixel((self.x, self.y))) color[0] = value self.img.putpixel((self.x, self.y), tuple(color)) value = property(get_value, set_value) @property def avg(self): average = sum(value for value in self) / len(self) #print "average", average return average @property def avg_diff(self): diff = self.value - self.avg #print "diff", diff return diff @property def delta(self): if self.avg_diff < 0: d = min(self) - self.avg else: d = max(self) - self.avg #print "delta", d return d @property def s(self): if self.avg + 2 * self.avg_diff < 0 or pow(2, self.bits) - 2 < self.avg + 2 * self.avg_diff: return abs(self.delta) else: return float("inf") class BlockSpace(object): def __init__(self, img): self.img = img self.width = self.img.size[0] self.height = self.img.size[1] def __iter__(self): for x in range(1, self.width - 7, 2): for y in range(1, self.height - 7, 2): yield Block(self.img, x, y) class BitMessage(object): """ Object to allow an index to select the nth bit in a message """ def __init__(self, msg=""): self.msg = map(ord, msg) def __getitem__(self, index): return self.msg[index / 8] >> (index % 8) & 1 def __setitem__(self, index, value): try: self.msg[index / 8] = self.msg[index / 8] | (value << index % 8) except IndexError: self.msg.append(0) self.msg[index / 8] = self.msg[index / 8] | (value << index % 8) @property def message(self): return "".join(map(chr, self.msg)) def __len__(self): return len(self.msg) * 8 def __str__(self): return self.message class Stega(object): def __init__(self, img): self.img = img.copy() self.width = self.img.size[0] self.height = self.img.size[1] self.B = BlockSpace(self.img) @property def s(self): if getattr(self, '_s', None) is not None: return self._s S = [] for block in self.B: S.append(block.s) self._s = min(S) return self._s def add_message(self, msg): n = 0 W = BitMessage(msg) for b in self.B: if abs(b.delta) < self.s: b.value = b.avg + 2 * b.delta + W[n] n += 1 if n == len(W): break def save(self, name): self.img.save(name) def close(self): self.img.close() def extract(self): W = BitMessage() n = 0 for b in self.B: if b.value - b.avg < 0: delta = min(b) - b.avg else: delta = max(b) - b.avg if delta < self.s: bit = (b.value - b.avg) % 2 W[n] = bit if abs(delta) < self.s: b.value = (b.value + b.avg - bit) / 2 n += 1 return self.img, "".join(filter(lambda c: ord(c) != 0, W.message))
{"/tests/__init__.py": ["/stega/__init__.py"]}
73,878
sebleier/stega
refs/heads/master
/tests/__init__.py
import os import sys import unittest path = os.path.join(os.path.dirname(__file__), '..') sys.path.insert(0, path) from stega import BitMessage, Block, BlockSpace, Stega import Image class TestBitMessage(unittest.TestCase): def test_usage_64(self): msg = BitMessage(chr(64)) self.assertEqual(msg[0], 0) self.assertEqual(msg[1], 0) self.assertEqual(msg[2], 0) self.assertEqual(msg[3], 0) self.assertEqual(msg[4], 0) self.assertEqual(msg[5], 0) self.assertEqual(msg[6], 1) self.assertEqual(msg[7], 0) def test_usage_63(self): msg = BitMessage(chr(63)) self.assertEqual(msg[0], 1) self.assertEqual(msg[1], 1) self.assertEqual(msg[2], 1) self.assertEqual(msg[3], 1) self.assertEqual(msg[4], 1) self.assertEqual(msg[5], 1) self.assertEqual(msg[6], 0) self.assertEqual(msg[7], 0) def test_usage_two_chars(self): msg = BitMessage(chr(1) + chr(255)) self.assertEqual(msg[0], 1) self.assertEqual(msg[1], 0) self.assertEqual(msg[2], 0) self.assertEqual(msg[3], 0) self.assertEqual(msg[4], 0) self.assertEqual(msg[5], 0) self.assertEqual(msg[6], 0) self.assertEqual(msg[7], 0) self.assertEqual(msg[8], 1) self.assertEqual(msg[9], 1) self.assertEqual(msg[10], 1) self.assertEqual(msg[11], 1) self.assertEqual(msg[12], 1) self.assertEqual(msg[13], 1) self.assertEqual(msg[14], 1) self.assertEqual(msg[15], 1) def test_inputing_message(self): msg = BitMessage() msg[0] = 0 msg[1] = 0 msg[2] = 0 msg[3] = 0 msg[4] = 0 msg[5] = 0 msg[6] = 1 msg[7] = 0 self.assertEqual(msg.message, chr(64)) def test_reassembling(self): msg = BitMessage("sean") self.assertEqual(str(msg), "sean") def paint_color(image, width, height, color): for x in range(width): for y in range(height): image.putpixel((x, y), color) class BlockTest(unittest.TestCase): def setUp(self): self.image = Image.new('RGB', (9, 9)) paint_color(self.image, 9, 9, (200, 0, 0)) def test_block(self): block = Block(self.image, 1, 1) iterations = 0 for color in block: self.assertEqual(color, 200) iterations += 1 self.assertEqual(iterations, 8) class BlockSpaceTest(unittest.TestCase): def setUp(self): self.image = Image.new('RGB', (11, 11)) paint_color(self.image, 11, 11, (200, 0, 0)) def test_blockspace(self): blockspace = BlockSpace(self.image) bs_iterations = 0 for block in blockspace: iterations = 0 for color in block: self.assertEqual(color, 200) iterations += 1 self.assertEqual(iterations, 8) bs_iterations += 1 self.assertEqual(bs_iterations, 4) class StegaTest(unittest.TestCase): def setUp(self): self.image = Image.new('RGB', (111, 111)) self.original = self.image.copy() """ Borked for me right now because of a PIL-inside-a-virtualenv problem. def test_stega(self): " Test steganography by saving image to disk, then reopening to extract message " msg = "daniellindsleyrocksdahouse" s = Stega(self.image) s.add_message(msg) s.save("test_image.png") s.close() saved_image = Image.open('test_image.png') s = Stega(saved_image) im, message = s.extract() self.assertEqual(message, msg) self.assertEqual(im.tostring(), self.original.tostring()) """ def test_stega(self): """ Test stega with in memory image representation, i.e. not saving to disk and reopening later. """ msg = "daniellindsleyrocksdahouse" s = Stega(self.image) s.add_message(msg) im, message = s.extract() self.assertEqual(message, msg) self.assertEqual(im.tostring(), self.original.tostring()) if __name__ == '__main__': unittest.main()
{"/tests/__init__.py": ["/stega/__init__.py"]}
73,879
sebleier/stega
refs/heads/master
/setup.py
from setuptools import setup setup( name = "stega", url = "http://github.com/sebleier/stega/", author = "Sean Bleier", author_email = "sebleier@gmail.com", version = "0.0.1", packages = ["stega"], description = "A steganography tool for lossless message hiding in images.", install_requires=['PIL'], )
{"/tests/__init__.py": ["/stega/__init__.py"]}
73,880
drea713/Flask_Registration_Functionality
refs/heads/main
/app/__init__.py
from config import Config from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate from flask_login import LoginManager app = Flask(__name__) app.config.from_object(Config) # setup database connection db = SQLAlchemy(app) migrate = Migrate(app, db, compare_type=True) login_manager = LoginManager(app) from app import routes, models
{"/app/models.py": ["/app/__init__.py"], "/app/routes.py": ["/app/__init__.py", "/app/models.py"]}
73,881
drea713/Flask_Registration_Functionality
refs/heads/main
/app/models.py
from app import db from datetime import datetime as dt from app import login_manager from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash class Post(db.Model): id = db.Column(db.Integer, primary_key=True) body = db.Column(db.Text()) date_created = db.Column(db.DateTime(), default=dt.utcnow) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) first_name = db.Column(db.String(50)) last_name = db.Column(db.String(50)) email = db.Column(db.String(50)) password = db.Column(db.String(250)) posts = db.relationship('Post', backref='user', lazy='dynamic') def create_password_hash(self, new_password): self.password = generate_password_hash(new_password) def check_password(self, current_password): return check_password_hash(self.password, current_password) @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) # HASHING AND SALTING # lucas-hash # derek-hash # HASHING # password = abc123 # translation => er7p98789arhuo8bozufjn2 # SALTING # real password for password 1 = abc123 # original = er7p98789arhuo8bozufjn2 # salt = 2q480we89b801dfuuoijsriodfuo # real password for password 2 = abc123 # original = er7p98789arhuo8bozufjn2 # salt = 84yar8h90fd9n80uO2YAH09 # REAL_PASSWORD = ABC123 # salt = 84yar8h90fd9n80uO2YAH09
{"/app/models.py": ["/app/__init__.py"], "/app/routes.py": ["/app/__init__.py", "/app/models.py"]}
73,882
drea713/Flask_Registration_Functionality
refs/heads/main
/app/routes.py
from flask.helpers import url_for import flask_login from app import app, db from flask import render_template, request, redirect, url_for, flash from app.models import Post, User from flask_login import login_user, logout_user, current_user # import flask_register @app.route('/', methods=['GET', 'POST']) def home(): print(current_user if current_user else None) if request.method == 'POST': p = Post( body=request.form.get('body'), user_id=1 ) db.session.add(p) db.session.commit() flash('Post created successfully', 'success') return redirect(url_for('home')) context = { 'posts': Post.query.order_by(Post.date_created.desc()).all() } # return render_template('home.html', body='This is the first post', first_name='Derek', last_name='Lang', date_posted=9) return render_template('home.html', **context) @app.route('/about') def about(): return render_template('about.html') @app.route('/contact') def contact(): return render_template('contact.html') @app.route('/login', methods=['GET', 'POST']) def login(): if request.method == 'POST': email = request.form.get('email') password = request.form.get('password') # look for the user in our database user = User.query.filter_by(email=email).first() # if the email and/or password don't match, if user is None or not user.check_password(password): # show an error messages flash('You typed in either an incorrect email or password', 'danger') # redirect to the login page return redirect(url_for('login')) # otherwise # log the user in login_user(user) flash('You have logged in successfully!', 'info') return redirect(url_for('home')) return render_template('login.html') @app.route('/logout') def logout(): logout_user() flash('You have logged out successfully', 'primary') return redirect(url_for('home')) # route created to connect to register @app.route('/register', methods=['GET', 'POST']) def login(): if request.method == 'POST': first_name = request.form.get('first name') last_name = request.form.get('last name') email = request.form.get('email') password = request.form.get('password') # look for the user in our database user = User.query.filter_by(email=email).first() # if the email and/or password don't match, if user is None or not user.check_password(password): # show an error messages flash('You typed in either an incorrect email or password', 'danger') # redirect to the login page return redirect(url_for('register')) # otherwise # log the user in login_user(user) flash('You have logged in successfully!', 'info') return redirect(url_for('home')) return render_template('login.html')
{"/app/models.py": ["/app/__init__.py"], "/app/routes.py": ["/app/__init__.py", "/app/models.py"]}
73,883
sjtuwyf/label_with_osvos
refs/heads/master
/main.py
# fix opengl load error on linux platform # according to https://bugs.launchpad.net/ubuntu/+source/python-qt4/+bug/941826 import os os.environ['QT_QUICK_CONTROLS_STYLE'] = 'Material' import ctypes from ctypes import util ctypes.CDLL(util.find_library('GL'), ctypes.RTLD_GLOBAL) import sys from PyQt5.QtCore import QUrl, QObject, pyqtSignal, pyqtSlot from PyQt5.QtWidgets import QApplication, QMainWindow from PyQt5.QtQml import QQmlEngine, QQmlComponent, QQmlApplicationEngine from PyQt5.QtQuick import QQuickView from osvos_demo import run_osvos if __name__ == '__main__': myApp = QApplication(sys.argv) engine = QQmlApplicationEngine() # context = engine.rootContext() # context.setContextProperty() engine.load(QUrl('qml/main.qml')) main_window = engine.rootObjects()[0] def osvos(message): main_window.setStatusBarContent("Training") # run_osvos() print("run osvos") main_window.setStatusBarContent("Finished") main_window.runOSVOS.connect(osvos) sys.exit(myApp.exec_())
{"/main.py": ["/osvos_demo.py"]}
73,884
sjtuwyf/label_with_osvos
refs/heads/master
/osvos_demo.py
import os import tensorflow as tf import osvos from dataset import Dataset def run_osvos(): # User defined parameters gpu_id = 0 train_model = True result_path = os.path.join('data', 'result') # Train parameters seq_name = 'osvos' parent_path = os.path.join('models', 'OSVOS_parent', 'OSVOS_parent.ckpt-50000') logs_path = os.path.join('models', 'osvos') max_training_iters = 10 # Define Dataset test_frames = sorted(os.listdir(os.path.join('data', 'imgs'))) test_imgs = [os.path.join('data', 'imgs', frame) for frame in test_frames] if train_model: train_imgs = [os.path.join('data', 'imgs', '00000.jpg')+' '+ os.path.join('data', 'annotations', '00000.png')] dataset = Dataset(train_imgs, test_imgs, './', data_aug=True) else: dataset = Dataset(None, test_imgs, './') # Train the network if train_model: # More training parameters learning_rate = 1e-8 save_step = max_training_iters side_supervision = 3 display_step = 10 with tf.Graph().as_default(): with tf.device('/gpu:' + str(gpu_id)): global_step = tf.Variable(0, name='global_step', trainable=False) osvos.train_finetune(dataset, parent_path, side_supervision, learning_rate, logs_path, max_training_iters, save_step, display_step, global_step, iter_mean_grad=1, ckpt_name=seq_name) # Test the network with tf.Graph().as_default(): with tf.device('/gpu:' + str(gpu_id)): checkpoint_path = os.path.join('models', seq_name, seq_name+'.ckpt-'+str(max_training_iters)) osvos.test(dataset, checkpoint_path, result_path)
{"/main.py": ["/osvos_demo.py"]}
73,885
mantissa-aidan/TwoUp
refs/heads/master
/blackjack_game.py
import gym env = gym.make('Blackjack-v0') from tkinter import * import numpy as np from PIL import Image, ImageTk observation = env.reset() ### blacjack environment shit # observation = env.reset() # action = env.action_space.sample() # observation, reward, done, info = env.step(action) ### ### setup gui structure root = Tk() root.title("HitAgent_io") root.geometry('{}x{}'.format(500 , 350)) dealer_title = Label(root, text ='Dealers Hand', font = "Arial") dealer_title.pack() frame = Frame(root) frame.pack() middleframe = Frame(root) middleframe.pack( side = BOTTOM ) bottomframe = Frame(middleframe) bottomframe.pack( side = BOTTOM ) ## dealer images dealer_images = [] card_size = (75, 125) img = Image.open('cards/JPEG/blue_back.jpg') img = img.resize(card_size, Image.ANTIALIAS) dealer_card = ImageTk.PhotoImage(img) dealer_imgLabel = Label(frame, image=dealer_card) dealer_images.append(dealer_imgLabel) # ai_hand ai_images = [] ai_title = Label(middleframe, text ='Ai\'s hand', font = "Arial") ai_title.pack() img = Image.open('cards/JPEG/blue_back.jpg') img = img.resize(card_size, Image.ANTIALIAS) temp_image = ImageTk.PhotoImage(img) ai_imgLabel = Label(middleframe, image=temp_image) ai_images.append(ai_imgLabel) obs_label = Label(bottomframe, text ='none', font = "Arial") obs_label.pack(side=BOTTOM) ### Game functions def getAiHand(): print("hand:" , env.player) return env.player ### Button scaffold - needs to go here because this must be defined before beinng called playRoundBtn = Button(bottomframe, text='Next', command=lambda:onclick(frame,middleframe,ai_images,dealer_images)) game_switcher = { "getAiHand" : getAiHand } # Gui functions def addCardImage(path,frame, img_list): img = Image.open(path) img = img.resize(card_size, Image.ANTIALIAS) temp_image = ImageTk.PhotoImage(img) image_label = Label(frame,image=temp_image) img_list.append(image_label) return img_list def updateLabelText(label,text): label["text"] = text def getCardPath(card_index): suite = ["H", "D", "C", "S"] tens = ["K","Q","J","10"] path = "" if card_index > 1: if card_index == 10: t_i = np.random.randint(3) s_i = np.random.randint(3) path = "cards/JPEG/" + tens[t_i] + suite[s_i] + ".jpg" return path else: s_i = np.random.randint(3) path = "cards/JPEG/" + str(card_index) + suite[s_i] + ".jpg" return path else: s_i = np.random.randint(3) path = "cards/JPEG/" + str(card_index) + "A"+ suite[s_i] + ".jpg" return path return "unexpected item in the bagging area" def updateAI(curr,middleframe, ai_images): path = getCardPath(curr) addCardImage(path, middleframe, ai_images) return ai_images def updateDealer(curr, frame, ai_images): path = getCardPath(curr) addCardImage(path, frame, dealer_images) return dealer_images gui_switcher = { "updateLabelText" : updateLabelText, "addCardImage" : addCardImage } gui_switcher = { "updateLabelText" : updateLabelText, "addCardImage" : addCardImage, "updateAI" : updateAI, "updateDealer" : updateDealer } def onclick(frame, middleframe, ai_images, dealer_images): curr_ai = env.player curr_dealer = env.dealer ai_images = updateAI(curr_ai[0], middleframe,ai_images) dealer_images = updateDealer(curr_dealer[0], frame, dealer_images) return (ai_images, dealer_images) ### displaying the button for image in dealer_images: image.pack(side=LEFT) for image in ai_images: image.pack(side=LEFT) playRoundBtn.pack(side=LEFT) root.mainloop() # for i_episode in range(20): # observation = env.reset() # for t in range(100): # # env.render() # print(observation) # action = env.action_space.sample() # observation, reward, done, info = env.step(action) # print("obs", observation, "reward", reward, "done", done,"info",info) # if done: # print("Episode finished after {} timesteps".format(t+1)) # break # env.close()
{"/twoup_gamev2.py": ["/DQN.py"]}
73,886
mantissa-aidan/TwoUp
refs/heads/master
/test.py
def subset_sum(numbers, target, partial=[], partial_sum=0): if partial_sum <= target: yield partial if partial_sum >= target: return for i, n in enumerate(numbers): remaining = numbers[i + 1:] yield from subset_sum(remaining, target, partial + [n], partial_sum + n) bets = [] for bet in list(subset_sum([5,10,20,50,100],250)): bets.append(bet) print(len(bets))
{"/twoup_gamev2.py": ["/DQN.py"]}
73,887
mantissa-aidan/TwoUp
refs/heads/master
/gym/gym/envs/aidan_envs/two_up.py
import numpy as np import gym from gym import spaces from gym.utils import seeding def flip(np_random): return 1 if np_random.uniform() < 0.5 else 0 def subset_sum(numbers, target, partial=[], partial_sum=0): if partial_sum <= target: yield partial if partial_sum >= target: return for i, n in enumerate(numbers): remaining = numbers[i + 1:] yield from subset_sum(remaining, target, partial + [n], partial_sum + n) def bets_list(notes, max): bets = [] for bet in list(subset_sum(notes,max)): bets.append(sum(bet)) return bets class TwoUp(gym.Env): """Two up: two coin=s, one heads up, one tails up, if it lands one heads up one tails up noone wins If you get two tails or two heads someone wins. Lets say 10 players plus the agent, all initialized with random amounts between $10-$250 Can choose to bid heads or tails, or match a bid of another player """ metadata = {'render.modes': ['human']} def __init__(self, initial_wealth=200, max_wealth=10000, max_rounds=10): super(TwoUp, self).__init__() #change this encoding so its just 1 discrete space self.action_space = spaces.Discrete(32) self.reward_range = (0, max_wealth) self.wealth = initial_wealth self.initial_wealth = initial_wealth self.winnings = 0 self.observation_space = spaces.Box(low=-1000, high=1000, shape=(1, 1), dtype=np.float32) self.max_rounds = max_rounds self.max_wealth = max_wealth self.rounds = max_rounds self.side = "" self.coin1 = "" self.coin2 = "" self.round_result = "" self.heads_tails = ["H", "T"] self.curr_bet = 0 def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, action): # if not action: # print("something fucked up") bet_index = action bets_heads = bets_list([10,20,50,100], 250) bets_tails = bets_list([10,20,50,100], 250) bets = bets_heads + bets_tails bet_in_dollars = bets[action] self.curr_bet = bet_in_dollars self.side = "" if action <= 15: self.side = "H" else: self.side = "T" self.rounds -= 1 self.coin1 = self.heads_tails[flip(self.np_random)] self.coin2 = self.heads_tails[flip(self.np_random)] if(self.coin1 == self.coin2 and self.coin1 == self.side): self.round_result = "WIN" self.wealth += bet_in_dollars self.winnings = bet_in_dollars elif(self.coin1 == self.coin2 and self.coin1 != self.side): self.round_result = "LOSE" self.wealth -= bet_in_dollars self.winnings = bet_in_dollars * -1 else: self.round_result = "NO WINNERS" done = self.wealth < 0.01 or self.wealth == self.max_wealth or not self.rounds reward = self.winnings if done else 0.0 return self._get_obs(), reward, done, {} def get_bets(self): return self.betting_pool def reset(self): self.rounds = self.max_rounds self.wealth = self.initial_wealth self.winnings = 0 return self._get_obs() def render(self, mode='human'): print("Side bet", self.side, "Amount: $" , self.curr_bet) print("Flip:", self.coin1, self.coin2, "Side bet", self.side, "Result:", self.round_result, "Winnings: ", self.winnings, "Current wealth: ", self.wealth, "; Rounds left: ", self.rounds) print() def _get_obs(self): return (self.winnings) def reset(self): # re-init everything to draw new parameters etc, but preserve the RNG for # reproducibility and pass in the same hyper-parameters as originally specified: self.__init__() return self._get_obs()
{"/twoup_gamev2.py": ["/DQN.py"]}
73,888
mantissa-aidan/TwoUp
refs/heads/master
/DQN.py
import h5py import numpy as np from collections import deque import tensorflow as tf from keras.models import Sequential from keras.layers import Dense from keras.optimizers import Adam from keras.models import load_model import os import datetime #For GPU training on windows print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) ### For gpu training on WSL # import tensorflow.compat.v1 as tf # tf.enable_eager_execution(tf.ConfigProto(log_device_placement=True)) class DQNAgent: def __init__(self, environment, trained_model=None): # Initialize constant self.environment = environment self.obs_size = environment.observation_space.shape[0] self.action_size = environment.action_space.n self.consecutive_episodes = 100 # Hyperparameters of the training self.learning_rate = 0.0005 self.gamma = 0.99 # discount factor self.replay_memory = 50000 self.replay_size = 128 # Initialize neural network model if trained_model: self.model = self.load_model(filename=trained_model) else: self.model = self.build_model() # Exploration/exploitations parameters self.epsilon = 1.0 self.epsilon_decay = 0.995 self.epsilon_min = 0.001 self.episode_b4_replay = 32 # Define variable self.storage = deque(maxlen=self.replay_memory) self.sum_reward, self.rewards_lst = 0.0, [] def build_model(self): # Neural Network for Deep-Q learning Model model = Sequential() model.add(Dense(64 * self.obs_size, input_dim=self.obs_size, use_bias=True, activation='relu')) model.add(Dense(64 * self.obs_size, use_bias=True, activation='relu')) model.add(Dense(self.action_size, use_bias=True, activation='linear')) model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate)) return model def store(self, state, action, reward, next_state, done): # Save history to storage for replay cnt_state = np.reshape(state, [1, self.obs_size]) new_state = np.reshape(next_state, [1, self.obs_size]) self.storage.append((cnt_state, np.array([action]), np.array([reward]), new_state, np.array([done]))) def action(self, state, reward, done, episode, training=True): # Update cumulative reward self.sum_reward += reward # Episode ends if done: self.rewards_lst.append(self.sum_reward) avg_reward = np.mean(self.rewards_lst[-self.consecutive_episodes: ]) print ('Episode %4d, Reward: %5d, Average rewards %5d' %(episode, self.sum_reward, avg_reward)) self.sum_reward = 0.0 self.epsilon = max(self.epsilon_decay * self.epsilon, self.epsilon_min) return -1 # Episode not ends: return next action else: cnt_state = np.reshape(state, [1, self.obs_size]) # Train agent if training: if episode >= self.episode_b4_replay: self.replay() if np.random.random() < self.epsilon: action = self.environment.action_space.sample() else: act_values = self.model.predict(cnt_state) action = np.argmax(act_values[0]) else: action = self.environment.action_space.sample() # Run trained agent else: act_values = self.model.predict(cnt_state) action = np.argmax(act_values[0]) return action def replay(self): minibatch_idx = np.random.permutation(len(self.storage))[: self.replay_size] states = np.concatenate([self.storage[i][0] for i in minibatch_idx], axis=0) actions = np.concatenate([self.storage[i][1] for i in minibatch_idx], axis=0) rewards = np.concatenate([self.storage[i][2] for i in minibatch_idx], axis=0) next_states = np.concatenate([self.storage[i][3] for i in minibatch_idx], axis=0) dones = np.concatenate([self.storage[i][4] for i in minibatch_idx], axis=0) X_batch = np.copy(states) Y_batch = np.zeros((self.replay_size, self.action_size), dtype=np.float64) qValues_batch = self.model.predict(states) qValuesNewState_batch = self.model.predict(next_states) targetValue_batch = np.copy(rewards) targetValue_batch += (1 - dones) * self.gamma * np.amax(qValuesNewState_batch, axis=1) for idx in range(self.replay_size): targetValue = targetValue_batch[idx] Y_sample = qValues_batch[idx] Y_sample[actions[idx]] = targetValue Y_batch[idx] = Y_sample if dones[idx]: X_batch = np.append(X_batch, np.reshape(np.copy(next_states[idx]), (1, self.obs_size)), axis=0) Y_batch = np.append(Y_batch, np.array([[rewards[idx]] * self.action_size]), axis=0) self.model.fit(X_batch, Y_batch, batch_size=len(X_batch), epochs=1, verbose=0) def save_model(self, filename): self.model.save(filename) def load_model(self, filename): return load_model(filename)
{"/twoup_gamev2.py": ["/DQN.py"]}
73,889
mantissa-aidan/TwoUp
refs/heads/master
/twoup_gamev2.py
import shutil import gym import tempfile from DQN import DQNAgent from tqdm import * import os import datetime import numpy as np from keras.callbacks import TensorBoard import tensorflow as tf def train(environment, model_name=None, key=None): tdir = tempfile.mkdtemp() env = gym.make(environment) env = gym.wrappers.Monitor(env, tdir, force=True) agent = DQNAgent(env) env.seed(0) # agent.load_model("TwoUp-v0_model.h5") EPISODES = 5000 current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") log_dir = 'logs/dqn/' + current_time total_rewards = np.empty(EPISODES) for episode in trange(EPISODES): state, reward, done = env.reset(), 0.0, False action = agent.action(state, reward, done, episode) summary_writer = tf.summary.create_file_writer(log_dir) total_rewards[episode] = reward avg_rewards = total_rewards[max(0, episode - 100):(episode + 1)].mean() while not done: # env.render() next_state, reward, done, _ = env.step(action) agent.store(state, action, reward, next_state, done) state = next_state action = agent.action(state, reward, done, episode) with summary_writer.as_default(): tf.summary.scalar('episode reward', reward, step=episode) # tf.summary.scalar('running avg reward(100)', avg_rewards, step=episode) if model_name and (episode == EPISODES - 1 or episode % 10 == 0): agent.save_model(filename=model_name) pass env.close() if key: gym.upload(tdir, api_key=key) shutil.rmtree(tdir) def run(environment, model_name, key=None): tdir = tempfile.mkdtemp() env = gym.make(environment) env = gym.wrappers.Monitor(env, tdir, force=True) agent = DQNAgent(env, trained_model=model_name) EPISODES = 10 env.seed(0) for episode in range(EPISODES): state, reward, done = env.reset(), 0.0, False action = agent.action(state, reward, done, episode, training=False) while not done: env.render() next_state, reward, done, _ = env.step(action) state = next_state action = agent.action(state, reward, done, episode, training=False) env.close() if key: gym.upload(tdir, api_key=key) shutil.rmtree(tdir) if __name__ == "__main__": environment = 'TwoUp-v0' api_key = "" my_model = environment + '_model_initial_wealth_200_rounds_per_episode_10.h5' # train(environment=environment, key=api_key, model_name=my_model) run(environment=environment, key=api_key, model_name=my_model)
{"/twoup_gamev2.py": ["/DQN.py"]}
73,890
mantissa-aidan/TwoUp
refs/heads/master
/gym/gym/envs/aidan_envs/__init__.py
from gym.envs.aidan_envs.two_up import TwoUp
{"/twoup_gamev2.py": ["/DQN.py"]}
73,891
mantissa-aidan/TwoUp
refs/heads/master
/blackjack_game_v2.py
from tkinter import * from PIL import Image, ImageTk import numpy as np import gym import time import gym import numpy as np import pandas as pd from collections import namedtuple from collections import defaultdict from tqdm import trange images = [] #http://ernie55ernie.github.io/assets/blackjack.html class Window(Frame): def __init__(self, master, pack): self.frame = Frame.__init__(self, master, height = 300, width = 150,relief="raised", background = "dark green") self.master = master self.player_cards = Text(self,height = 10, width = 50,background = "dark green") self.start_window() self.pack = pack def reset(self): self.player_cards.destroy() self.player_cards = Text(self,height = 10, width = 50,background = "dark green") images = [] def getCardPath(self,card_index): suite = ["H", "D", "C", "S"] tens = ["K","Q","J","10"] path = "" if card_index == 0: path = "cards/JPEG/0.jpg" return path if card_index > 1: if card_index == 10: t_i = np.random.randint(3) s_i = np.random.randint(3) path = "cards/JPEG/" + tens[t_i] + suite[s_i] + ".jpg" return path else: s_i = np.random.randint(3) path = "cards/JPEG/" + str(card_index) + suite[s_i] + ".jpg" return path else: s_i = np.random.randint(3) path = "cards/JPEG/" + str(card_index) + "A"+ suite[s_i] + ".jpg" return path return "unexpected item in the bagging area" def update(self, card): im = [] im.append(self.getCardPath(card)) x = 0 for i in (im): card_size = (75, 125) imgs = Image.open(im[x]) imgs = imgs.resize(card_size, Image.ANTIALIAS) mi = ImageTk.PhotoImage(imgs) images.append(mi) self.player_cards.image_create(END, image = mi) x = x + 1 self.player_cards.pack() self.player_cards.config(state=DISABLED) def start_window(self): self.pack() root = Tk() root.title("HitAgent_io") env = gym.make('Blackjack-v0') label1 = Label(root, text="Player", font="bold").pack() sum1 = Label(root, text="Sum:") sum1.pack() player = Window(root,(1,0)) label2 = Label(root, text="Dealer", font="bold").pack() sum2 = Label(root, text="Sum:") sum2.pack() dealer = Window(root,(1,1)) player.update(env.player[0]) dealer.update(env.dealer[0]) def updateLabelText(label,text): label["text"] = text def sample_policy(observation): score, dealer_score, usable_ace = observation return 0 if score >= 20 else 1 def next(sum1, sum2): returns_sum = defaultdict(float) returns_count = defaultdict(float) V = defaultdict(float) discount_factor = 1.0 for i in trange(100000): time.sleep(0.5) observation = env.reset() episodes = [] for t in range(100): player.reset() dealer.reset() root.update_idletasks() action = sample_policy(observation) next_observation, reward, done, _ = env.step(action) for card in env.player: player.update(card) for card in env.dealer: dealer.update(card) episodes.append((observation, action, reward)) updateLabelText(sum1,"Sum:" + str(observation[0])) updateLabelText(sum2,"Sum:" + str(np.sum(env.dealer))) root.update_idletasks() if done: break observation = next_observation # obtain unique observation set observations = set([x[0] for x in episodes]) for i, observation in enumerate(observations): # first occurence of the observation idx = episodes.index([episode for episode in episodes if episode[0] == observation][0]) Q = sum([episode[2] * discount_factor ** i for episode in episodes[idx:]]) returns_sum[observation] += Q returns_count[observation] += 1.0 V[observation] = returns_sum[observation] / returns_count[observation] return V playRoundBtn = Button(root, text='Next', command=lambda:next(sum1, sum2)).pack() # resetDealerBtn = Button(root, text='Reset Dealer', command=lambda:resetDealer()).pack() # resetPlayerBtn = Button(root, text='Reset Player', command=lambda:resetPlayer()).pack() root.mainloop()
{"/twoup_gamev2.py": ["/DQN.py"]}
73,915
Saxena611/bp_real_estate
refs/heads/master
/listings/migrations/0001_initial.py
# Generated by Django 3.0.3 on 2020-08-28 19:09 import datetime from django.db import migrations, models import django.db.models.deletion import multiselectfield.db.fields class Migration(migrations.Migration): initial = True dependencies = [ ('realtors', '0001_initial'), ('areaprops', '0001_initial'), ] operations = [ migrations.CreateModel( name='Listing', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('builder', models.CharField(max_length=200)), ('rera_id', models.CharField(max_length=200)), ('project_id', models.CharField(max_length=200)), ('address', models.CharField(max_length=200)), ('city', models.CharField(default='bhopal', max_length=30)), ('state', models.CharField(default='MP', max_length=30)), ('zipcode', models.CharField(max_length=20)), ('description', models.TextField(blank=True)), ('amenities', multiselectfield.db.fields.MultiSelectField(choices=[('security', 'security'), ('gymnasium', 'gymnasium'), ('waste disposal', 'waste disposal'), ('reserved parking', 'reserved_parking'), ('lift', 'lift'), ('club house', 'club house'), ('shopping center', 'shopping center'), ('rain water harvesting', 'rain water harvesting'), ('water plant', 'water plant'), ('landscape garden', 'landscape garden'), ('kids play area', 'kids play area'), ('cctv', 'cctv'), ('cycle track', 'cycle track')], max_length=165)), ('price_start', models.IntegerField()), ('price_end', models.IntegerField()), ('area_start', models.IntegerField()), ('area_end', models.IntegerField()), ('property_type', models.CharField(choices=[('1/2/3 BHK APARTMENT', '1/2/3 BHK APARTMENT'), ('1/2 BHK APARTMENT', '1/2 BHK APARTMENT'), ('1 BHK APARTMENT', '1 BHK APARTMENT'), ('2 BHK APARTMENT', '2 BHK APARTMENT'), ('3 BHK APARTMENT', '3 BHK APARTMENT'), ('3 BHK DUPLEX', '3 BHK DUPLEX'), ('2 BHK DUPLEX', '2 BHK DUPLEX'), ('VILLA', 'VILLA'), ('BUNGALOW', 'BUNGALOW'), ('PLOT', 'PLOT'), ('PENTHOUSE', 'PENTHOUSE')], max_length=30)), ('possesion', models.CharField(max_length=20)), ('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')), ('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')), ('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')), ('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')), ('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')), ('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')), ('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')), ('is_published', models.BooleanField(default=True)), ('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)), ('area', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='areaprops.Area')), ('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.Realtor')), ], ), ]
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,916
Saxena611/bp_real_estate
refs/heads/master
/areaprops/apps.py
from django.apps import AppConfig class AreapropsConfig(AppConfig): name = 'areaprops'
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,917
Saxena611/bp_real_estate
refs/heads/master
/pages/views.py
from django.shortcuts import render from realtors.models import Realtor from listings.models import Listing from areaprops.models import Area from django.core.paginator import EmptyPage,PageNotAnInteger,Paginator # Create your views here. def index(request): listings = Listing.objects.order_by('-list_date').filter(is_published=True)[:3] areas = Area.objects.order_by('-area_name') context = { "listings":listings, 'arealist':areas } print(context) return render(request,'pages/index.html',context) def about(request): realtors = Realtor.objects.order_by('-hire_date') mvp_realtors = Realtor.objects.all().filter(is_mvp=True) areas = Area.objects.order_by('-area_name') context = { "realtors":realtors, "mvp_realtors":mvp_realtors, 'arealist':areas } return render(request,'pages/about.html',context) def propertyin(request,area_id): property_in = Listing.objects.filter(area=area_id) areas = Area.objects.order_by('-area_name') paginator = Paginator(property_in,2) page = request.GET.get('page') paged_listings = paginator.get_page(page) context = { 'listings':paged_listings, 'arealist':areas } return render(request,'listings/listings.html',context)
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,918
Saxena611/bp_real_estate
refs/heads/master
/pages/urls.py
from django.urls import path from . import views urlpatterns = [ path('',views.index,name='index'), path('<int:area_id>',views.propertyin,name='propertyin'), path('about',views.about,name='about'), ]
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,919
Saxena611/bp_real_estate
refs/heads/master
/listings/models.py
from django.db import models from datetime import datetime from multiselectfield import MultiSelectField from realtors.models import Realtor from areaprops.models import Area # Create your models here # Choices for amenities amenities_choices = ( ('security','security'), ('gymnasium','gymnasium'), ('waste disposal','waste disposal'), ('reserved parking','reserved_parking'), ('lift','lift'), ('club house','club house'), ('shopping center','shopping center'), ('rain water harvesting','rain water harvesting'), ('water plant','water plant'), ('landscape garden','landscape garden'), ('kids play area','kids play area'), ('cctv','cctv'), ('cycle track','cycle track') ) # Type of property type_of_property = ( ("1/2/3 BHK APARTMENT","1/2/3 BHK APARTMENT"), ("1/2 BHK APARTMENT","1/2 BHK APARTMENT"), ("1 BHK APARTMENT","1 BHK APARTMENT"), ("2 BHK APARTMENT","2 BHK APARTMENT"), ("3 BHK APARTMENT","3 BHK APARTMENT"), ("3 BHK DUPLEX","3 BHK DUPLEX"), ("2 BHK DUPLEX","2 BHK DUPLEX"), ("VILLA","VILLA"), ("BUNGALOW","BUNGALOW"), ("PLOT","PLOT"), ("PENTHOUSE","PENTHOUSE") ) # Create your models here. class Listing(models.Model): realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING) title = models.CharField(max_length=200) builder = models.CharField(max_length=200) rera_id = models.CharField(max_length=200) project_id = models.CharField(max_length=200) address = models.CharField(max_length=200) area = models.ForeignKey(Area, on_delete=models.DO_NOTHING) city = models.CharField(max_length=30,default='bhopal') state = models.CharField(max_length=30,default='MP') zipcode = models.CharField(max_length=20) description = models.TextField(blank=True) amenities = MultiSelectField(choices=amenities_choices) price_start = models.IntegerField() price_end = models.IntegerField() area_start = models.IntegerField() area_end = models.IntegerField() property_type = models.CharField(max_length=30,choices=type_of_property) possesion = models.CharField(max_length=20) photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/') photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True) photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True) photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True) photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True) photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True) photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True) is_published = models.BooleanField(default=True) list_date = models.DateTimeField(default=datetime.now, blank=True) def __str__(self): return self.title
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,920
Saxena611/bp_real_estate
refs/heads/master
/listings/views.py
from django.shortcuts import render from .models import Listing from areaprops.models import Area from django.shortcuts import render,get_object_or_404 from django.core.paginator import EmptyPage,PageNotAnInteger,Paginator from .forms import ListingForm # Create your views here. def index(request): listings = Listing.objects.order_by('-list_date').filter(is_published=True) areas = Area.objects.order_by('-area_name') paginator = Paginator(listings,10) page = request.GET.get('page') paged_listings = paginator.get_page(page) context = { 'listings':paged_listings, 'arealist':areas } return render(request,'listings/listings.html',context) def listing(request,listing_id): listing = get_object_or_404(Listing,pk=listing_id) areas = Area.objects.order_by('-area_name') context = { 'listing':listing, 'arealist':areas } return render(request,'listings/listing.html',context) def search(request): return render(request,'listings/search.html') def postproperty(request): prop = ListingForm() return render(request,'listings/postproperty.html',{'form':prop})
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,921
Saxena611/bp_real_estate
refs/heads/master
/contacts/urls.py
from django.urls import path from . import views urlpatterns = [ path('contact',views.contact,name='contact'), path('consulation',views.consulation,name='consulation'), path('postproperty',views.postproperty,name='postproperty'), ]
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,922
Saxena611/bp_real_estate
refs/heads/master
/contacts/views.py
from django.shortcuts import render,redirect from django.contrib import messages from .models import Contact # Create your views here. def postproperty(request): if request.method == "POST": request_name = request.POST['request_name'] request_id = 2 name = request.POST['name'] email = request.POST['email'] phone = request.POST['phone'] message = request.POST['message'] if not request_name == "Post Property & Collaborate": request_name = "Post Property & Collaborate" contact = Contact(request=request_name,request_id=request_id,name=name,email=email,phone=phone,message=message) contact.save() messages.success(request,'Your request has been submitted, arealtor will get back to you soon.') return redirect('index') return def contact(request): if request.method == "POST": request_name = request.POST['request_name'] request_id = request.POST['request_id'] name = request.POST['name'] email = request.POST['email'] phone = request.POST['phone'] message = request.POST['message'] user_id = request.POST['user_id'] contact = Contact(request=request_name,request_id=request_id,name=name,email=email,phone=phone,message=message,user_id=user_id) contact.save() messages.success(request,'Your request has been submitted, arealtor will get back to you soon.') return redirect('index') return def consulation(request): if request.method == "POST": request_name = request.POST['request_name'] request_id = 1 name = request.POST['name'] email = request.POST['email'] phone = request.POST['phone'] message = request.POST['message'] if not request_name == "Free Property Consulation": request_name = "Free Property Consulation" contact = Contact(request=request_name,request_id=request_id,name=name,email=email,phone=phone,message=message) contact.save() messages.success(request,'Your request has been submitted, arealtor will get back to you soon.') return redirect('index') return
{"/pages/views.py": ["/listings/models.py"], "/listings/views.py": ["/listings/models.py"]}
73,927
n-kb/honest-rankings-api
refs/heads/master
/main.py
from flask import Flask from flask import jsonify from flask import request from flask_pymongo import PyMongo from flask_cors import CORS from bson.objectid import ObjectId from os import environ from make_rankings import make_indicator app = Flask(__name__) port = int(environ.get('PORT', 33507)) if (environ.get('MONGODB_URI')): mongo_uri = environ.get('MONGODB_URI') app.config['MONGO_DBNAME'] = mongo_uri.split("/")[1] app.config['MONGO_URI'] = mongo_uri else: app.config['MONGO_DBNAME'] = 'restdb' app.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb' cors = CORS(app) mongo = PyMongo(app) @app.route('/rankings', methods=['GET']) def get_all_rankings(): rankings = mongo.db.rankings output = [] for s in rankings.find({"years_number":{"$exists":True}, "name":{"$exists":True}}).sort([("_id", -1)]).limit(30): output.append( {'id' : str(s['_id']) , 'name' : s['name'] , 'countries_num' : s['countries_num'] , 'years_number' : s['years_number'] , 'last_year' : s['last_year'] , 'lead_name': s['lead_name']}) return jsonify({'result' : output}) @app.route('/ranking/<ranking_id>', methods=['GET']) def get_one_ranking(ranking_id): rankings = mongo.db.rankings s = rankings.find_one({'_id' : ObjectId(ranking_id)}) if s: output = {'name' : s['name'], 'data' : s['data']} else: output = "No such ranking" return jsonify({'result' : output}) @app.route('/new_ranking', methods=['POST']) def add_ranking(): rankings = mongo.db.rankings indicators = request.json['indicators'] data, lead_name, last_year, years_number, countries_num = make_indicator(indicators) ranking_id = rankings.insert( {'data': data , 'last_year': last_year , 'years_number': years_number , 'countries_num': countries_num , 'lead_name': lead_name}) return jsonify({'ranking_id' : str(ranking_id)}) @app.route('/name_ranking', methods=['POST']) def add_name(): rankings = mongo.db.rankings name = request.json['name'] ranking_id = request.json['ranking_id'] rankings.update_one({ '_id':ObjectId(ranking_id) },{ '$set': { 'name': name } }, upsert=False) return jsonify({'result' : "success"}) if __name__ == '__main__': app.run(debug=True, port=port)
{"/main.py": ["/make_rankings.py"]}
73,928
n-kb/honest-rankings-api
refs/heads/master
/make_rankings.py
import pandas as pd import numpy as np from hashlib import md5 import json def harmonize(x, max_value, min_value, year): return (x - min_value) / (max_value - min_value) def make_year_avg(x): existing_values = x.count() total_values = x.fillna(-1).count() missing_values = total_values - existing_values if missing_values > 3: return np.nan else: return x.mean() def make_avg(x): return x.apply(make_year_avg) def make_indicator(indicators_from_app): indicators = [] indicators_names = [] indicators_harmonized = [] years = ["2017", "2016", "2015", "2014", "2013", "2012", "2011", "2010", "2009", "2008", "2007", "2006"] indicators_df = pd.read_csv("./data/indicators-list.csv") countries_df = pd.read_csv("./data/countries-list.csv") for indicator_from_app in indicators_from_app: # I added an empty space by mistake in the code names indicator_code = indicator_from_app["code"] indicator_name = indicator_from_app["name"] indicators_names.append(indicator_name) indicator_df = pd.read_csv("./data/wb-data/API_"+ indicator_code +"%20_DS2_en_csv_v2.csv" , skiprows=4 , index_col="Country Name" , usecols=years + ["Country Name"]) # Remove all rows which are not countries indicator_df = indicator_df.loc[indicator_df.index.isin(countries_df["name"])] # Find max and min prior to harmonization max_value = indicator_df.max(numeric_only=True).max() min_value = indicator_df.min(numeric_only=True).min() # For negative series, use the opposite if indicator_from_app["is_reverse"] == 1: max_value = indicator_df.min(numeric_only=True).min() min_value = indicator_df.max(numeric_only=True).max() indicator_harmonized_df = pd.DataFrame() for year in years: indicator_harmonized_df[year] = indicator_df[year].apply(harmonize, max_value=max_value, min_value=min_value, year=year) indicators.append(indicator_df) indicators_harmonized.append(indicator_harmonized_df) index_df = pd.concat(indicators_harmonized) index_df = index_df.groupby(level=0).apply(make_avg).dropna(axis=1, thresh=70).dropna(axis=0, thresh=1) index_df = index_df.sort_values(index_df.columns[0], ascending=False) last_year = index_df.iloc[:,0].name lead_name = index_df.iloc[:,0].index[0] years_number = len(list(index_df.columns.values)) countries_num = len(list(index_df.iloc[:,0].dropna())) json_data = { "last_year": index_df.iloc[:,0].name, "years" : list(index_df.columns.values), "top_country": lead_name, "bottom_country": index_df.iloc[:,0].dropna().index[-1], "indicators": [], "data": [] } for indicator in indicators_names: try: indicator_name = indicator.split("(")[0].strip() indicator_unit = indicator.split("(")[1].strip().replace(")", "") except IndexError: try: indicator_name = indicator.split(",")[0].strip() indicator_unit = indicator.split(",")[1].strip() except IndexError: indicator_name = indicator indicator_unit = "" json_data["indicators"].append({"name": indicator_name, "unit": indicator_unit}) ranking = list(index_df.iloc[:,0].dropna().index) for country_name in ranking: rank = ranking.index(country_name) + 1 scores = index_df.loc[index_df.index == country_name].values[0] country_code = countries_df.loc[countries_df["name"] == country_name, "code"].tolist()[0] components = [] for indicator in indicators_names: indicator_index = indicators_names.index(indicator) indicator_df = indicators[indicator_index] indicator_normalized_df = indicators_harmonized[indicator_index] score = indicator_df.loc[indicator_df.index == country_name][last_year][0] score_normalized = indicator_normalized_df.loc[indicator_df.index == country_name][last_year][0] components.append({"score": score, "score_normalized": score_normalized}) json_data["data"].append({"country_name": country_name, "country_code": country_code, "rank": rank, "scores": list(scores), "components": components}) return json_data, lead_name, last_year, years_number, countries_num
{"/main.py": ["/make_rankings.py"]}
73,951
JamisonHolt/SrDesign
refs/heads/master
/vtkutils.py
from __future__ import print_function import sys import traceback import vtk # Isosurface extraction def extractSurface(vol, isovalue=0.0): try: iso = vtk.vtkContourFilter() if vtk.vtkVersion.GetVTKMajorVersion() >= 6: iso.SetInputData(vol) else: iso.SetInput(vol) iso.SetValue(0, isovalue) iso.Update() print("Surface extracted") mesh = iso.GetOutput() print(" ", mesh.GetNumberOfPolys(), "polygons") return mesh except Exception as e: print(e) print("Iso-surface extraction failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None # Mesh filtering def cleanMesh(mesh, connectivityFilter=False): try: connect = vtk.vtkPolyDataConnectivityFilter() clean = vtk.vtkCleanPolyData() if (connectivityFilter): if vtk.vtkVersion.GetVTKMajorVersion() >= 6: connect.SetInputData(mesh) else: connect.SetInput(mesh) connect.SetExtractionModeToLargestRegion() clean.SetInputConnection(connect.GetOutputPort()) else: if vtk.vtkVersion.GetVTKMajorVersion() >= 6: clean.SetInputData(mesh) else: clean.SetInput(mesh) clean.Update() print("Surface cleaned") m2 = clean.GetOutput() print(" ", m2.GetNumberOfPolys(), "polygons") return m2 except: print("Surface cleaning failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None def smoothMesh(mesh, nIterations=10): try: smooth = vtk.vtkWindowedSincPolyDataFilter() smooth.SetNumberOfIterations(nIterations) if vtk.vtkVersion.GetVTKMajorVersion() >= 6: smooth.SetInputData(mesh) else: smooth.SetInput(mesh) smooth.Update() print("Surface smoothed") m2 = smooth.GetOutput() print(" ", m2.GetNumberOfPolys(), "polygons") return m2 except: print("Surface smoothing failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None def rotateMesh(mesh, axis=1, angle=0): try: print("Rotating surface: axis=", axis, "angle=", angle) matrix = vtk.vtkTransform() if axis == 0: matrix.RotateX(angle) if axis == 1: matrix.RotateY(angle) if axis == 2: matrix.RotateZ(angle) tfilter = vtk.vtkTransformPolyDataFilter() tfilter.SetTransform(matrix) if vtk.vtkVersion.GetVTKMajorVersion() >= 6: tfilter.SetInputData(mesh) else: tfilter.SetInput(mesh) tfilter.Update() mesh2 = tfilter.GetOutput() return mesh2 except: print("Surface rotating failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None # @profile def reduceMesh(mymesh, reductionFactor): try: deci = vtk.vtkQuadricDecimation() deci.SetTargetReduction(reductionFactor) if vtk.vtkVersion.GetVTKMajorVersion() >= 6: deci.SetInputData(mymesh) else: deci.SetInput(mymesh) deci.Update() print("Surface reduced") m2 = deci.GetOutput() del deci print(" ", m2.GetNumberOfPolys(), "polygons") return m2 except: print("Surface reduction failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None # Mesh I/O def readMesh(name): if name.endswith(".vtk"): return readVTKMesh(name) if name.endswith(".ply"): return readPLY(name) if name.endswith(".stl"): return readSTL(name) print("Unknown file type: ", name) return None def readVTKMesh(name): try: reader = vtk.vtkPolyDataReader() reader.SetFileName(name) reader.Update() print("Input mesh:", name) mesh = reader.GetOutput() del reader return mesh except: print("VTK mesh reader failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None def readSTL(name): try: reader = vtk.vtkSTLReader() reader.SetFileName(name) reader.Update() print("Input mesh:", name) mesh = reader.GetOutput() del reader return mesh except: print("STL Mesh reader failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None def readPLY(name): try: reader = vtk.vtkPLYReader() reader.SetFileName(name) reader.Update() print("Input mesh:", name) mesh = reader.GetOutput() del reader return mesh except: print("PLY Mesh reader failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None def writeMesh(mesh, name): print("Writing", mesh.GetNumberOfPolys(), "polygons to", name) if name.endswith(".vtk"): writeVTKMesh(mesh, name) return if name.endswith(".ply"): writePLY(mesh, name) return if name.endswith(".stl"): writeSTL(mesh, name) return print("Unknown file type: ", name) def writeVTKMesh(mesh, name): try: writer = vtk.vtkPolyDataWriter() if vtk.vtkVersion.GetVTKMajorVersion() >= 6: writer.SetInputData(mesh) else: writer.SetInput(mesh) writer.SetFileTypeToBinary() writer.SetFileName(name) writer.Write() print("Output mesh:", name) except: print("VTK mesh writer failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None def writeSTL(mesh, name): try: writer = vtk.vtkSTLWriter() if vtk.vtkVersion.GetVTKMajorVersion() >= 6: print("writeSTL 1") writer.SetInputData(mesh) else: print("writeSTL 2") writer.SetInput(mesh) writer.SetFileTypeToBinary() writer.SetFileName(name) writer.Write() print("Output mesh:", name) except: print("STL mesh writer failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None def writePLY(mesh, name): try: writer = vtk.vtkPLYWriter() if vtk.vtkVersion.GetVTKMajorVersion() >= 6: writer.SetInputData(mesh) else: writer.SetInput(mesh) writer.SetFileTypeToBinary() writer.SetFileName(name) writer.Write() print("Output mesh:", name) except: print("PLY mesh writer failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None # Volume I/O def readVTKVolume(name): try: reader = vtk.vtkStructuredPointsReader() reader.SetFileName(name) reader.Update() print("Input volume:", name) vol = reader.GetOutput() return vol except: print("VTK volume reader failed") exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) return None # @profile def memquery1(): print("Hiya 1") # @profile def memquery2(): print("Hiya 2") # @profile def memquery3(): print("Hiya 3")
{"/dicom2stl.py": ["/sitk2vtk.py", "/vtkutils.py"]}
73,952
JamisonHolt/SrDesign
refs/heads/master
/Main.py
import numpy as np import pandas as pd import SimpleITK as sitk import matplotlib.pyplot as plt from sklearn import cluster def show_one(img): """ Display a single 2D image without calling plt.show() to open in the browser :param img: The 2D image to be shown :return: None """ dpi = 40 margin = 0.05 nda = sitk.GetArrayFromImage(img) spacing = img.GetSpacing() extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0) figsize = (5, 5) fig = plt.figure(figsize=figsize, dpi=dpi) ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin]) plt.set_cmap("gray") ax.imshow(nda, extent=extent, interpolation=None) def show_all(img, overlay=None, axis='z'): """ Take in all images and display them in the browser on any given axis :param img: The image to be displayed :param overlay: Any overlay of labels that one might want displayed. Defaults to none :param axis: The axis in which to graph each image. Defaults to z :return: None """ xlen, ylen, zlen = img.GetSize() all_images = [] all_overlays = [] if axis == 'z': all_images = [img[:, :, z] for z in xrange(zlen)] if overlay: all_overlays = [overlay[:, :, z] for z in xrange(zlen)] elif axis == 'y': all_images = [img[:, y, :] for y in xrange(ylen)] if overlay: all_overlays = [overlay[:, y, :] for y in xrange(ylen)] elif axis == 'x': all_images = [img[x, :, :] for x in xrange(xlen)] if overlay: all_overlays = [overlay[x, :, :] for x in xrange(xlen)] else: raise Exception('invalid axis') for i, image in enumerate(all_images): if overlay: show_one(sitk.LabelOverlay(image, all_overlays[i])) else: show_one(image) plt.show() def make_empty_img_from_img(img, dimensions=3): """ Take an exising itk image and create a new, empty image from its dimensions :param img: The image to find dimensions for :param dimensions: The number of dimensions in the image :return: The new image """ xlen, ylen, zlen = img.GetSize() dupe = img[:, :, :] for x in xrange(xlen): for y in xrange(ylen): if dimensions == 3: for z in xrange(zlen): dupe.SetPixel(x, y, z, 0) else: dupe.SetPixel(x, y, 0) return dupe def read_image(path): """ Read in a list of dcm images in a given directory :param path: system path towards the directory :return: sitk image with the origin reset to 0, 0, 0 """ reader = sitk.ImageSeriesReader() dicom_filenames = reader.GetGDCMSeriesFileNames(path) reader.SetFileNames(dicom_filenames) reader.LoadPrivateTagsOn() img = reader.Execute() img.SetOrigin((0, 0, 0)) return img def retrieve_overlap(img1, img2, lbl1=1, lbl2=1): """ Take in two images of labels and return an image with only the overlap of the labels :param img1: The first image of labels :param img2: The second image of labels :param lbl1: The label to retrieve from the first image :param lbl2: The label to retrieve from the second image :return: A new image of labels where overlap exists """ xlen, ylen, zlen = img1.GetSize() # Make sure that our images are equal in size to prevent weird invisible bugs xlen2, ylen2, zlen2 = img2.GetSize() assert xlen == xlen2 and ylen == ylen2 and zlen == zlen2 # Copy our image as to not alter the original data new_image = img1[:, :, :] for z in xrange(zlen): for y in xrange(ylen): for x in xrange(xlen): # Set any bit with overlap to 1, else set it to 0 overlap = img1.GetPixel(x, y, z) == lbl1 and img2.GetPixel(x, y, z) == lbl2 if overlap: new_image.SetPixel(x, y, z, 1) else: new_image.SetPixel(x, y, z, 0) return new_image def get_df_from_img(img, dimensions=3): """ Create a pandas dataframe from any given image - useful for statistics operations such as clustering :param img: The image to be converted into a dataframe :param dimensions: The number of dimensions of the image - only supports 2D and 3D images at the moment :return: A pandas dataframe containing the x, y, and z coordinates that exist in the image """ if dimensions == 3: df_dict = {'x': [], 'y': [], 'z': []} xlen, ylen, zlen = img.GetSize() for x in xrange(xlen): for y in xrange(ylen): for z in xrange(zlen): if img.GetPixel(x, y, z): df_dict['x'].append(x) df_dict['y'].append(y) df_dict['z'].append(z) df = pd.DataFrame.from_dict(df_dict) return df elif dimensions == 2: df_dict = {'x': [], 'y': []} xlen, ylen = img.GetSize() for x in xrange(xlen): for y in xrange(ylen): if img.GetPixel(x, y): df_dict['x'].append(x) df_dict['y'].append(y) df = pd.DataFrame.from_dict(df_dict) return df else: raise Exception('Unsupported number of dimensions') def update_img_from_df(df, image, keep=0, dimensions=3, colname='label', inside_value=1, outside_value=0): """ Take a given dataframe and itk image to be written over and update the image to only contain the labeled coordinates :param df: The dataframe to read labels from :param image: The image to be overwritten :param keep: The label in the dattaframe to keep (since there may be multiple labels, e.g. clustering :param dimensions: The number of dimensions in the image :param colname: The name of the column containing the labels :param inside_value: What to update labeled pixels to :param outside_value: What to update unlabeled pixels to :return: None """ for index, row in df.iterrows(): if dimensions == 2: x, y, label = (row['x'], row['y'], row[colname]) if label == keep: image.SetPixel(x, y, inside_value) else: image.SetPixel(x, y, outside_value) elif dimensions == 3: x, y, z, label = (row['x'], row['y'], row['z'], row[colname]) if label == keep: image.SetPixel(x, y, z, inside_value) else: image.SetPixel(x, y, z, outside_value) else: raise Exception('Unsupported number of dimensions') def dbscan_filter(img, eps, use_z=True): df = get_df_from_img(img) df_new = df if not use_z: df_new = df.drop('z', axis=1) fit = cluster.DBSCAN(eps=eps).fit(df_new) labels = fit.labels_ df['label'] = pd.Series(labels) counts = df['label'].value_counts().to_dict() # Remove all non-clusters df = df[df.label != -1] largest_cluster = max(counts.iterkeys(), key=(lambda key: counts[key])) img_filtered = make_empty_img_from_img(img) update_img_from_df(df, img_filtered, keep=largest_cluster) return img_filtered def kmeans_segment(img, num_segments=2, use_z=True): df = get_df_from_img(img) df_new = df if not use_z: df_new = df.drop('z', axis=1) fit = cluster.KMeans(n_clusters=num_segments).fit(df_new) labels = fit.labels_ df['label'] = pd.Series(labels) all_images = [make_empty_img_from_img(img) for i in xrange(num_segments)] x_max = [0 for i in xrange(num_segments)] for index, row in df.iterrows(): x, y, z, label = (row['x'], row['y'], row['z'], row['label']) all_images[label].SetPixel(x, y, z, 1) x_max[label] = max((x_max[label], x)) return all_images, x_max def count_labels(img): xlen, ylen = img.GetSize() count = 0 for x in xrange(xlen): for y in xrange(ylen): if img.GetPixel(x, y): count += 1 return count def filter_by_label_count(img, threshold): start = 0 arr = sitk.GetArrayFromImage(img) end = len(arr) for z in xrange(end): img_single = img[:, :, z] if count_labels(img_single) < threshold: if z == start: start += 1 for z in reversed(xrange(end)): img_single = img[:, :, z] if count_labels(img_single) < threshold: if z == end - 1: end -= 1 return start, end def main(): """ Main function of our program. Executes all of the main steps written in our final paper :return: None """ # Directory where the DICOM files are being stored (in this input_path = './Inputs/valve' # Original image from the filepath img_original = read_image(input_path) # Image with smoothing applied to reduce noise img_smooth = sitk.CurvatureFlow(image1=img_original, timeStep=0.125, numberOfIterations=10) # Create labels on our smoothed image for cardiac tissue and tissue with blood labels_tissue = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=325, upperThreshold=470, insideValue=1) labels_blood = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=450, upperThreshold=800, insideValue=1, outsideValue=0) # IMPORTANT STEP: essentially, this is the key to our algorithm. By finding the "blood" without cardiac tissue, # and then using binary hole filling with a fairly large radius, we are able to label a lot of the mitral valve # area without labeling too much of the other cardiac tissue. Thus, THIS is what lets us single out the mitral # valve tissue from the rest - all we need is the overlap of the two labels labels_tissue_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_tissue, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1) labels_blood_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_blood, radius=[4] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1) labels_valve = retrieve_overlap(labels_blood_no_holes, labels_tissue_no_holes) labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1) labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve_no_holes, radius=[1] * 3, majorityThreshold=0, backgroundValue=1, foregroundValue=0) # Fix intensity scaling on our original smoothed image for pretty diagram purposes img_smooth = sitk.Cast(sitk.RescaleIntensity(img_smooth), labels_tissue_no_holes.GetPixelID()) # Use a density-based clustering algorithm to attempt to remove as much noise as possible labels_valve_filtered = dbscan_filter(labels_valve_no_holes, eps=2, use_z=False) labels_valve_filtered = dbscan_filter(labels_valve_filtered, eps=4) # Find likely start and end points of our image by setting a mininum number of labeled pixels start, end = filter_by_label_count(labels_valve_filtered, 10) img_smooth = img_smooth[:, :, start:end] labels_valve_filtered = labels_valve_filtered[:, :, start:end] # Remove all values distant from the center of our starting location by taking advantage of kmeans df = get_df_from_img(labels_valve_filtered[:, :, 0], dimensions=2) x_mid = df['x'].mean() y_mid = df['y'].mean() df = get_df_from_img(labels_valve_filtered) distance_df = df.drop('z', axis=1) distance_df['x_dist'] = abs(distance_df['x'] - x_mid) distance_df['y_dist'] = abs(distance_df['y'] - y_mid) fit = cluster.KMeans(n_clusters=2).fit(distance_df.drop(['x', 'y'], axis=1)) labels = fit.labels_ df['label'] = pd.Series(labels) counts = df['label'].value_counts().to_dict() largest_cluster = max(counts.iterkeys(), key=(lambda key: counts[key])) update_img_from_df(df, labels_valve_filtered, keep=largest_cluster) # Find likely start and end points of our image by setting a mininum number of labeled pixels start, end = filter_by_label_count(labels_valve_filtered, 10) img_smooth = img_smooth[:, :, start:end] labels_valve_filtered = labels_valve_filtered[:, :, start:end] # Use a segmentation-based clustering algorithm to attempt to find each valve label_segments, x_max = kmeans_segment(labels_valve_filtered, use_z=False) left, right = (label_segments[0], label_segments[1]) if x_max[0] > x_max[1]: left, right = right, left # Finally, we can simply take the furthest point from the likely start/end points in order to get our annulus # this can be done by every z value left_points = {'x': [], 'y': [], 'z': []} right_points = {'x': [], 'y': [], 'z': []} zlen = len(sitk.GetArrayFromImage(left)) for z in xrange(zlen): left_df = get_df_from_img(left[:, :, z], dimensions=2) if len(left_df['y']) > 0: index = left_df['y'].idxmin() row = left_df.iloc[index] left_points['x'].append(int(row['x'])) left_points['y'].append(int(row['y'])) left_points['z'].append(z) right_df = get_df_from_img(right[:, :, z], dimensions=2) if len(right_df['x']) > 0: index = right_df['x'].idxmax() row = right_df.iloc[index] right_points['x'].append(int(row['x'])) right_points['y'].append(int(row['y'])) right_points['z'].append(z) # These both represent the coordinates of our annulus ring. A simple spline can be used for interpolation between # points final_left = pd.DataFrame.from_dict(left_points) final_right = pd.DataFrame.from_dict(right_points) print('Coordinates for one side of the ring') print(final_left) print('\n\nCoordinates for the other side of the ring') print(final_right) final_image = make_empty_img_from_img(left) x = left_points['x'] + right_points['x'] y = left_points['y'] + right_points['y'] z = left_points['z'] + right_points['z'] for x, y, z in zip(x, y, z): final_image.SetPixel(x, y, z, 1) show_all(img_smooth, final_image) if __name__ == '__main__': main()
{"/dicom2stl.py": ["/sitk2vtk.py", "/vtkutils.py"]}
73,953
JamisonHolt/SrDesign
refs/heads/master
/Final.py
import numpy as np import pandas as pd import time import SimpleITK as sitk import matplotlib.pyplot as plt from sklearn import cluster def sitk_show(img, margin=0.05, dpi=40): nda = sitk.GetArrayFromImage(img) spacing = img.GetSpacing() extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0) figsize = (5, 5) fig = plt.figure(figsize=figsize, dpi=dpi) ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin]) plt.set_cmap("gray") ax.imshow(nda, extent=extent, interpolation=None) def show_img(img, overlay=None): for z in range(img.GetHeight()): img_single = img[:, :, z] if overlay: overlay_single = overlay[:, :, z] sitk_show(sitk.LabelOverlay(img_single, overlay_single)) else: sitk_show(img_single) def read_image(path): # Create a DICOM reader and load in all images reader = sitk.ImageSeriesReader() series_id = reader.GetGDCMSeriesIDs(path)[0] series_file_names = reader.GetGDCMSeriesFileNames(path, series_id) reader.SetFileNames(series_file_names) reader.LoadPrivateTagsOn() img = reader.Execute() # Some files have arbitrary origins - normalize this img.SetOrigin((0, 0, 0)) return img def main(): # Read in our test file input_path = './Inputs/valve' img_original = read_image(input_path) # Save our image dimensions for future use xlen, ylen, zlen = img_original.GetSize() # Smooth image to remove abundance of pixels img_smooth = sitk.CurvatureFlow(image1=img_original, timeStep=0.125, numberOfIterations=10) imgWhiteMatter = sitk.ConnectedThreshold(image1=img_smooth, seedList=[(28, 31, 37)], lower=200, upper=470, replaceValue=1) img_smooth = sitk.Cast(sitk.RescaleIntensity(img_smooth), imgWhiteMatter.GetPixelID()) # Segment image to tissue and blood samples # label_tissue = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=200, upperThreshold=470, insideValue=1) label_blood = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=300, upperThreshold=800, insideValue=1) show_img(img_smooth, overlay=label_blood) for y in range(img_original.GetHeight()): imgSmoothIntSingle = img_smooth[:, y, :] imgBloodSingle = label_blood[:, y, :] sitk_show(sitk.LabelOverlay(imgSmoothIntSingle, imgBloodSingle)) if __name__ == '__main__': main()
{"/dicom2stl.py": ["/sitk2vtk.py", "/vtkutils.py"]}
73,954
JamisonHolt/SrDesign
refs/heads/master
/dicom2stl.py
from __future__ import print_function import SimpleITK as sitk import sitk2vtk import vtkutils def img_to_stl(img): # Pad black to the boundaries of the image pad = [5, 5, 5] img = sitk.ConstantPad(img, pad, pad) vtkimg = sitk2vtk.sitk2vtk(img) isovalue = 0 mesh = vtkutils.extractSurface(vtkimg, isovalue) connectivityFilter = False mesh = vtkutils.cleanMesh(mesh, connectivityFilter) smoothIterations = 25 mesh = vtkutils.smoothMesh(mesh, smoothIterations) quad = .90 mesh = vtkutils.reduceMesh(mesh, quad) vtkutils.writeMesh(mesh, "result.stl")
{"/dicom2stl.py": ["/sitk2vtk.py", "/vtkutils.py"]}
73,955
JamisonHolt/SrDesign
refs/heads/master
/Main2.py
import numpy as np import pydicom import os import matplotlib.pyplot as plt from glob import glob from mpl_toolkits.mplot3d.art3d import Poly3DCollection import scipy.ndimage from skimage import morphology from skimage import measure from skimage.transform import resize from sklearn.cluster import KMeans from plotly import __version__ from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot from plotly.tools import FigureFactory as FF from plotly.graph_objs import * def load_scan(path): """ Loop over the image files and store everything into a list :param path: String of the input to be taken in :return: list of DICOM filedataset types encompassing the scan """ slices = [pydicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key=lambda x: int(x.InstanceNumber)) try: slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2]) except: slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation) for s in slices: s.SliceThickness = slice_thickness print("Slice Thickness: %f" % slices[0].SliceThickness) print("Pixel Spacing (row, col): (%f, %f) " % (slices[0].PixelSpacing[0], slices[0].PixelSpacing[1])) return slices def get_pixels_hu(scans): """ Read in a list of scans and parse the pixels from thems :param scans: list of scans to be taken in :return: np.array of the image pixels """ image = np.stack([s.pixel_array for s in scans]) # Convert to int16 (from sometimes int16), # should be possible as values should always be low enough (<32k) image = image.astype(np.int16) # Set outside-of-scan pixels to 1 # The intercept is usually -1024, so air is approximately 0 image[image == -2000] = 0 # Convert to Hounsfield units (HU) intercept = scans[0].RescaleIntercept slope = scans[0].RescaleSlope if slope != 1: image = slope * image.astype(np.float64) image = image.astype(np.int16) image += np.int16(intercept) return np.array(image, dtype=np.int16) def pre_process(data_path, output_path): patient = load_scan(data_path) imgs = get_pixels_hu(patient) np.save(output_path + "fullimages.npy", imgs) return patient def show_houndsfield_hist(imgs_to_process): """ Graphs a histogram of the different houndsfield units contained in our images :param imgs_to_process: :return: """ plt.hist(imgs_to_process.flatten(), bins=50, color='c') plt.xlabel("Hounsfield Units (HU)") plt.ylabel("Frequency") plt.show() def sample_stack(stack, rows=8, cols=8, start_with=0, show_every=1): fig,ax = plt.subplots(rows,cols,figsize=[12,12]) for i in range(rows*cols): ind = start_with + i*show_every ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind) ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray') ax[int(i/rows),int(i % rows)].axis('off') plt.show() def resample(image, scan, new_spacing=[1, 1, 1]): print("Shape before resampling\t", image.shape) # Determine current pixel spacing toFloat = lambda item: float(str(item)) thicc = scan[0].SliceThickness pxl = scan[0].PixelSpacing spacing = np.array([toFloat(thicc), toFloat(pxl[0]), toFloat(pxl[1])]) resize_factor = spacing / new_spacing new_real_shape = image.shape * resize_factor new_shape = np.round(new_real_shape) real_resize_factor = new_shape / image.shape new_spacing = spacing / real_resize_factor image = scipy.ndimage.interpolation.zoom(image, real_resize_factor) print("Shape after resampling\t", image.shape) return image, new_spacing def make_mesh(image, threshold=150, step_size=1): print "Transposing surface" p = image.transpose(2, 1, 0) print "Calculating surface" verts, faces, norm, val = measure.marching_cubes_lewiner(p, threshold, step_size=step_size, allow_degenerate=True) return verts, faces def plotly_3d(verts, faces): x, y, z = zip(*verts) print "Drawing" # Make the colormap single color since the axes are positional not intensity. # colormap=['rgb(255,105,180)','rgb(255,255,51)','rgb(0,191,255)'] colormap = ['rgb(236, 236, 212)', 'rgb(236, 236, 212)'] fig = FF.create_trisurf(x=x, y=y, z=z, plot_edges=False, colormap=colormap, simplices=faces, backgroundcolor='rgb(64, 64, 64)', title="Interactive Visualization") iplot(fig) def plt_3d(verts, faces): print "Drawing" x, y, z = zip(*verts) fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(111, projection='3d') # Fancy indexing: `verts[faces]` to generate a collection of triangles mesh = Poly3DCollection(verts[faces], linewidths=0.05, alpha=1) face_color = [1, 1, 0.9] mesh.set_facecolor(face_color) ax.add_collection3d(mesh) ax.set_xlim(0, max(x)) ax.set_ylim(0, max(y)) ax.set_zlim(0, max(z)) ax.set_facecolor((0.7, 0.7, 0.7)) plt.show() def main(): data_path = "./Inputs/full/" output_path = "./Outputs/full/" g = glob(data_path + '/*.dcm') patient = pre_process(data_path, output_path) file_used = output_path + "fullimages.npy" imgs_to_process = np.load(file_used).astype(np.float64) imgs_after_resamp, spacing = resample(imgs_to_process, patient,) v, f = make_mesh(imgs_after_resamp) plt_3d(v, f) #slice thickness: 2.000 # pixel spacing (row, col): (0.402344, 0.402344) if __name__ == '__main__': main()
{"/dicom2stl.py": ["/sitk2vtk.py", "/vtkutils.py"]}
73,956
JamisonHolt/SrDesign
refs/heads/master
/sitk2vtk.py
import SimpleITK as sitk import vtk # dictionary to convert SimpleITK pixel types to VTK pixelmap = {sitk.sitkUInt8: vtk.VTK_UNSIGNED_CHAR, sitk.sitkInt8: vtk.VTK_CHAR, sitk.sitkUInt16: vtk.VTK_UNSIGNED_SHORT, sitk.sitkInt16: vtk.VTK_SHORT, sitk.sitkUInt32: vtk.VTK_UNSIGNED_INT, sitk.sitkInt32: vtk.VTK_INT, sitk.sitkUInt64: vtk.VTK_UNSIGNED_LONG, sitk.sitkInt64: vtk.VTK_LONG, sitk.sitkFloat32: vtk.VTK_FLOAT, sitk.sitkFloat64: vtk.VTK_DOUBLE, sitk.sitkVectorUInt8: vtk.VTK_UNSIGNED_CHAR, sitk.sitkVectorInt8: vtk.VTK_CHAR, sitk.sitkVectorUInt16: vtk.VTK_UNSIGNED_SHORT, sitk.sitkVectorInt16: vtk.VTK_SHORT, sitk.sitkVectorUInt32: vtk.VTK_UNSIGNED_INT, sitk.sitkVectorInt32: vtk.VTK_INT, sitk.sitkVectorUInt64: vtk.VTK_UNSIGNED_LONG, sitk.sitkVectorInt64: vtk.VTK_LONG, sitk.sitkVectorFloat32: vtk.VTK_FLOAT, sitk.sitkVectorFloat64: vtk.VTK_DOUBLE, sitk.sitkLabelUInt8: vtk.VTK_UNSIGNED_CHAR, sitk.sitkLabelUInt16: vtk.VTK_UNSIGNED_SHORT, sitk.sitkLabelUInt32: vtk.VTK_UNSIGNED_INT, sitk.sitkLabelUInt64: vtk.VTK_UNSIGNED_LONG, } # A function that converts a SimpleITK image to a VTK image, via numpy def sitk2vtk(img, outVol=None, debugOn=False): size = list(img.GetSize()) origin = list(img.GetOrigin()) spacing = list(img.GetSpacing()) sitktype = img.GetPixelID() vtktype = pixelmap[sitktype] ncomp = img.GetNumberOfComponentsPerPixel() # there doesn't seem to be a way to specify the image orientation in VTK # convert the SimpleITK image to a numpy array i2 = sitk.GetArrayFromImage(img) i2_string = i2.tostring() if debugOn: print("data string address inside sitk2vtk", hex(id(i2_string))) # send the numpy array to VTK with a vtkImageImport object dataImporter = vtk.vtkImageImport() dataImporter.CopyImportVoidPointer(i2_string, len(i2_string)) dataImporter.SetDataScalarType(vtktype) dataImporter.SetNumberOfScalarComponents(ncomp) # VTK expects 3-dimensional parameters if len(size) == 2: size.append(1) if len(origin) == 2: origin.append(0.0) if len(spacing) == 2: spacing.append(spacing[0]) # Set the new VTK image's parameters # dataImporter.SetDataExtent(0, size[0]-1, 0, size[1]-1, 0, size[2]-1) dataImporter.SetWholeExtent(0, size[0]-1, 0, size[1]-1, 0, size[2]-1) dataImporter.SetDataOrigin(origin) dataImporter.SetDataSpacing(spacing) dataImporter.Update() vtk_image = dataImporter.GetOutput() # outVol and this DeepCopy are a work-around to avoid a crash on Windows if outVol is not None: outVol.DeepCopy(vtk_image) if debugOn: print("Volume object inside sitk2vtk") print(vtk_image) print("type = ", vtktype) print("num components = ", ncomp) print(size) print(origin) print(spacing) print(vtk_image.GetScalarComponentAsFloat(0, 0, 0, 0)) return vtk_image
{"/dicom2stl.py": ["/sitk2vtk.py", "/vtkutils.py"]}
73,958
billonalex/Mars-Photonic-Simulator
refs/heads/main
/model.py
#from astropy import * """ This class will represent our model. We can define our parameters, make the calculations and plot the results """ class MarsModel: #Constructor def __init__(self, baseline, diameter): #We initiate our parameters here self.baseline = baseline self.diameter = diameter #We can declare and calculate different paramaters that we may need in the future def calculate(self): print('Function to make the simulation') """ The idea is to put the biggest calculations here, and to add optional parameters to personalize our model Maybe we will want to make calculation many times 2 possibilities in the main script : - Declare only one model and personalize it with parameters required - Declare one model per personalization """ #Show result def show(self): print('Function to plot the result') """ When the calculs are done, this function allows us to show the results. """
{"/main.py": ["/model.py"]}
73,959
billonalex/Mars-Photonic-Simulator
refs/heads/main
/main.py
from model import MarsModel if(__name__ == "__main__"): """ Here is the first option to declare and make a simulation ! """ my_model = MarsModel(baseline=10, diameter=1) my_model.calculate() my_model.show()
{"/main.py": ["/model.py"]}
73,960
billonalex/Mars-Photonic-Simulator
refs/heads/main
/model/diffraction.py
""" Has three functions, one for single slit diffraction intensity, one for double slit diffraction intensity and one for diffraction grating. The functions are named single_slit_diffraction_intensity, double_slit_diffraction_intensity and grated_diffraction_intensity """ import numpy as np # single slit diffraction def single_slit_diffraction_intensity (slit_width, wavelength, screen_distance, X): """ Takes in slit_width, wavelength, screen distance and a numpy array X(an array of distances from the center). Outputs an array of normalized intensities corresponding to X. """ return ((np.sin((np.pi*slit_width*X)/(wavelength*screen_distance)))/((np.pi*slit_width*X)/(wavelength*screen_distance)))**2 def double_slit_diffraction_intensity (slit_width, wavelength, screen_distance, distance_between_slits, X) : """ Takes in slit_width, wavelength, screen distance, distance between the two strings and a numpy array X(an array of distances from the center). Outputs an array of normalized intensities corresponding to X. """ return (((np.sin((np.pi*slit_width*X)/(wavelength*screen_distance)))/((np.pi*slit_width*X)/(wavelength*screen_distance)))**2)*((np.cos((np.pi*distance_between_slits*X)/(wavelength*screen_distance)))**2) def grated_diffraction_intensity (slit_width, wavelength, screen_distance, distance_between_slits, number_of_slits, X): """ Takes in slit_width, wavelength, screen distance, distance between the two strings, the total number of slits and a numpy array X(an array of distances from the center). Outputs an array of normalized intensities corresponding to X. """ term1 = np.sin(np.pi*X*slit_width/(wavelength*screen_distance))/(np.pi*X*slit_width/(wavelength*screen_distance)) term2 = (np.sin(number_of_slits*np.pi*distance_between_slits*X/(wavelength*screen_distance)))/(number_of_slits*np.sin((np.pi*distance_between_slits*X)/(wavelength*screen_distance))) return (term1**2)*(term2**2)
{"/main.py": ["/model.py"]}
73,964
SMesser/book-bot
refs/heads/master
/story/seed_data.py
CHARACTERS = [ {'name': 'Allison', 'gender': 1}, {'name': 'Ananda', 'gender': -1}, {'name': 'Charisma', 'gender': 2}, {'name': 'Clark', 'gender': -1}, {'name': 'Clakka', 'gender': 1}, {'name': 'Ender', 'gender': -1}, {'name': 'Entropy', 'gender': -1}, {'name': 'Ffyrnig Aderyn', 'gender': 1}, {'name': 'Haditha', 'gender': 1}, {'name': 'Kitten Deathpounce', 'gender': 1}, {'name': 'Jess Couteau', 'gender': 1}, {'name': 'Magaid Rhoswyn', 'gender': 1}, {'name': 'Maggie', 'gender': 1}, {'name': 'Marelli', 'gender': 1}, {'name': 'Nog', 'gender': 3}, {'name': 'Nosferatu', 'gender': -1}, {'name': 'Schlock', 'gender': 0}, {'name': 'Surolam', 'gender': 1}, {'name': 'Tachyon', 'gender': 1}, {'name': 'Val Deathspeaker', 'gender': 1}, ] GROUPS = [ {'name': 'Blood Legion'}, {'name': 'Grey Council'}, {'name': 'LLC, Incorporated'}, {'name': 'Professional Bowlers'' Association'}, {'name': 'Q Continuum'}, {'name': 'Universal Animation Society'}, ] LOCATIONS = [ {'name': 'Axanar'}, {'name': 'Bluebell Park'}, {'name': 'Dis'}, {'name': 'Earth'}, {'name': 'Eros'}, {'name': 'Makemake'}, {'name': 'Mars'}, {'name': 'Mercury'}, {'name': 'Jupiter'}, {'name': 'Neptune'}, {'name': 'Oisri'}, {'name': 'Philadelphia'}, {'name': 'Pluto'}, {'name': 'Saturn'}, {'name': 'Tam Elbrun'}, {'name': 'Texas'}, {'name': 'Battle Room'}, {'name': 'Moon'}, {'name': 'Eiffel Tower'}, {'name': 'Locust Court'}, {'name': 'Statue of Liberty'}, {'name': 'Titan'}, {'name': 'Uranus'}, {'name': 'Venus'}, ] TITLES = [ {'name': 'Captain'}, {'name': 'Feast-master'}, {'name': 'Head chef'}, {'name': 'General'}, {'name': 'Leader'}, {'name': 'President'}, {'name': 'Press Secretary'}, {'name': 'Principal'}, {'name': 'Prime Minister'}, {'name': 'Premier'}, {'name': 'Supreme Leader'}, {'name': 'Surgeon General'}, {'name': 'Tribune'}, {'name': 'Tsar'}, ]
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,965
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_group_creation.py
from django.test import TestCase from story.actions import GroupCreationAction from story.tests.actions.action_test_case import GenericActionTestMixin class GroupCreationTestCase(GenericActionTestMixin, TestCase): action_class = GroupCreationAction
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,966
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_group_decay.py
from django.test import TestCase from story.actions import GroupDecayAction from story.tests.actions.action_test_case import GenericActionTestMixin class GroupDecayTestCase(GenericActionTestMixin, TestCase): action_class = GroupDecayAction
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,967
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_travel.py
from django.test import TestCase from story.actions import TravelAction from story.models import Character from story.tests.actions.action_test_case import GenericActionTestMixin from story.tests.factories import CharacterFactory, LocationFactory class TravelTestCase(GenericActionTestMixin, TestCase): action_class = TravelAction def test_single_location_blocks_motion(self): """If there is only one location, characters can't travel""" place = LocationFactory() CharacterFactory(location=place) self.assertEqual(self.action_class.weight_available(), 0) def test_travel_predictable_with_single_character_and_two_locations(self): """If there is only one location, characters can't travel""" old_place, new_place = LocationFactory.create_batch(2) char = CharacterFactory(location=old_place) self.action_class.get_kwargs() char = Character.objects.get(pk=char.pk) self.assertEqual(char.location, new_place)
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,968
SMesser/book-bot
refs/heads/master
/story/migrations/0001_initial.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-17 22:43 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Character', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(unique=True)), ('gender', models.SmallIntegerField(choices=[(-1, 'Male'), (1, 'Female')])), ], ), migrations.CreateModel( name='Group', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(unique=True)), ], ), migrations.CreateModel( name='Location', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(unique=True)), ], ), migrations.CreateModel( name='Office', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='story.Group')), ('officer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='story.Character')), ], ), migrations.CreateModel( name='Title', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(unique=True)), ], ), migrations.AddField( model_name='office', name='title', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='story.Title'), ), migrations.AddField( model_name='character', name='location', field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='story.Location'), ), migrations.AlterUniqueTogether( name='office', unique_together=set([('group', 'title')]), ), ]
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,969
SMesser/book-bot
refs/heads/master
/story/management/commands/tweet.py
import tweepy import secrets import sys from traceback import print_exc from django.core.management.base import BaseCommand from random import randint from story.actions import ACTION_LIST MAX_TWEET_LENGTH = 140 MAX_ESTIMATE_ACTION_LENGTH = 1 for action_class in ACTION_LIST: for verb in action_class.VERBS: if len(verb) >= MAX_ESTIMATE_ACTION_LENGTH: MAX_ESTIMATE_ACTION_LENGTH = len(verb) class Command(BaseCommand): def handle(self, *args, **options): msg = '' parts = [] while len(msg) + 2 < MAX_TWEET_LENGTH - MAX_ESTIMATE_ACTION_LENGTH: # Construct multiple sentences if we can do so while staying under # tweet length. This isn't guaranteed in either direction, but it's # a decent rough estimate. parts.append(self.construct_message()) msg = ' '.join(parts) self.tweet(msg) def tweet(self, message): """Post the given message.""" auth = tweepy.OAuthHandler(secrets.consumer_key, secrets.consumer_secret) auth.set_access_token(secrets.access_token, secrets.access_token_secret) api = tweepy.API(auth) auth.secure = True print('Posting message "{}"'.format(message)) api.update_status(status=message) def choose_action_class(self): """Choose a class of random DB actions.""" action_weights = { action: action.weight_available() for action in ACTION_LIST } total_weight = sum(action_weights.values()) item = randint(1, total_weight) for action_class, action_weight in action_weights.items(): if action_weight < item: item -= action_weight else: return action_class return None def construct_message(self): """Execute a random DB action and return a string describing it.""" action_class = self.choose_action_class() if action_class is None: return 'Nothing much happened.' try: return action_class.execute() except Exception: print_exc(file=sys.stdout) mode = action_class.__name__.strip('Action').title() return 'Chaos raged across the universe of {}.'.format(mode)
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,970
SMesser/book-bot
refs/heads/master
/story/migrations/0003_auto_20160811_1259.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-08-11 12:59 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('story', '0002_auto_20160723_1419'), ] operations = [ migrations.AddField( model_name='group', name='influences', field=models.ManyToManyField(to='story.Location'), ), migrations.AddField( model_name='group', name='members', field=models.ManyToManyField(to='story.Character'), ), ]
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,971
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_group_spread.py
from django.test import TestCase from story.actions import GroupSpreadAction from story.tests.actions.action_test_case import GenericActionTestMixin class GroupSpreadTestCase(GenericActionTestMixin, TestCase): action_class = GroupSpreadAction
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,972
SMesser/book-bot
refs/heads/master
/story/tests/actions/action_test_case.py
from story.actions import ACTION_LIST class GenericActionTestMixin(object): longMessage = True action_class = None @staticmethod def extract_arguments_from_verb(verb): """Utility to find the expected keyword arguments for formatting""" arguments = set() remainder = str(verb) while '{' in remainder: position = remainder.index('{') remainder = remainder[position + 1:] position = remainder.index('}') new_arg = remainder[:position] arguments.add(new_arg) return arguments def test_verb_structure(self): """Check basic structure of each Action's VERBS constant""" expected_arguments = None for verb in self.action_class.VERBS: self.assertIsInstance(verb, basestring) if expected_arguments is None: expected_arguments = self.extract_arguments_from_verb(verb) else: self.assertEqual( expected_arguments, self.extract_arguments_from_verb(verb), msg='Inconsistent arguments for "{}" in {}'.format( verb, self.action_class ) ) def test_verb_terminators(self): """Confirm every sentence ends with proper punctuation""" verb_terminators = '.' for verb in self.action_class.VERBS: self.assertIn( verb[-1], verb_terminators, msg='{} verb "{}" does not end in an allowed character'.format( self.action_class.__name__, verb ) ) def test_action_class_in_action_list(self): """Confirm this class of action is available""" self.assertIn(self.action_class, ACTION_LIST)
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,973
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_join_group.py
from django.test import TestCase from story.actions import JoinGroupAction from story.tests.actions.action_test_case import GenericActionTestMixin from story.tests.factories import ( CharacterFactory, GroupFactory, LocationFactory ) class JoinGroupTestCase(GenericActionTestMixin, TestCase): action_class = JoinGroupAction def test_empty_database_implies_zero_weight(self): """Confirm an empty database has zero weight for this action.""" self.assertEqual(self.action_class.weight_available(), 0) def test_characters_without_groups_implies_zero_weight(self): """A characters with no Group gives zero weight.""" new_place = LocationFactory() CharacterFactory(location=new_place) self.assertEqual(self.action_class.weight_available(), 0) def test_no_characters_implies_zero_weight(self): """A location with no character gives zero weight.""" new_group = GroupFactory() new_place = LocationFactory() new_group.influences.add(new_place) self.assertEqual(self.action_class.weight_available(), 0) def test_only_char_at_group_location_in_group_implies_zero_weight(self): """Can't add existing members to a group""" place = LocationFactory() group = GroupFactory() char = CharacterFactory(location=place) group.members.add(char) group.influences.add(place) self.assertEqual(self.action_class.weight_available(), 0) def test_character_not_at_group_location_implies_zero_weight(self): """Can't join a distant group""" char_place, group_place = LocationFactory.create_batch(2) group = GroupFactory() CharacterFactory(location=char_place) group.influences.add(group_place) self.assertEqual(self.action_class.weight_available(), 0) def test_nonmember_at_group_location_single_choice(self): """Can join a local group""" place = LocationFactory() group = GroupFactory() CharacterFactory(location=place) group.influences.add(place) self.assertEqual(self.action_class.weight_available(), 1) def test_nonmember_at_group_location_yields_predicted_choice(self): """Acceptable character can join local group""" place = LocationFactory() group = GroupFactory() char, member = CharacterFactory.create_batch(2, location=place) group.influences.add(place) group.members.add(member) self.assertEqual( self.action_class.get_kwargs(), { 'character': char.name, 'group': group.name } ) def test_nonmember_at_group_location_yields_predicted_choice(self): """Acceptable character can join local group""" place = LocationFactory() group = GroupFactory() char = CharacterFactory(location=place) group.influences.add(place) self.action_class.get_kwargs() self.assertEqual( {char}, set(group.members.all()) )
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,974
SMesser/book-bot
refs/heads/master
/story/tests/test_seed_data.py
from collections import Sequence from django.test import TestCase from story.seed_data import CHARACTERS, GROUPS, LOCATIONS, TITLES DATA_LISTS = [CHARACTERS, GROUPS, LOCATIONS, TITLES] class SeedDataTests(TestCase): def test_data_structure(self): """Confirm consistency and format of seed data lists""" for data_list in DATA_LISTS: expected_keys = None self.assertIsInstance(data_list, Sequence) for variant in data_list: self.assertIsInstance(variant, dict) if expected_keys is None: expected_keys = set(variant.keys()) for key in expected_keys: self.assertIsInstance(key, basestring) else: self.assertEqual(expected_keys, set(variant.keys()))
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,975
SMesser/book-bot
refs/heads/master
/story/tests/factories.py
from factory import DjangoModelFactory, Faker from factory.fuzzy import FuzzyChoice from story.models import Character, Group, Location, Office, Title GENDER_CHOICES = [number for number, name in Character.GENDERS] class CharacterFactory(DjangoModelFactory): class Meta: model = Character name = Faker('name') gender = FuzzyChoice(GENDER_CHOICES) class GroupFactory(DjangoModelFactory): class Meta: model = Group name = Faker('company') class LocationFactory(DjangoModelFactory): class Meta: model = Location name = Faker('name') class OfficeFactory(DjangoModelFactory): class Meta: model = Office class TitleFactory(DjangoModelFactory): class Meta: model = Title name = Faker('bs')
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,976
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_discovery.py
from django.test import TestCase from story.actions import DiscoveryAction from story.models import Location from story.seed_data import LOCATIONS from story.tests.actions.action_test_case import GenericActionTestMixin from story.tests.factories import LocationFactory class DiscoveryTestCase(GenericActionTestMixin, TestCase): action_class = DiscoveryAction def test_no_remaining_locations_means_no_discovery(self): """If all pre-generated locations exist, don't make more""" for loc_kwargs in LOCATIONS: Location.objects.create(**loc_kwargs) self.assertEqual(self.action_class.weight_available(), 0) def test_do_not_re_create_existing_location(self): """Existing Locations should not be re-discovered.""" place = LocationFactory() self.assertNotEqual( self.action_class.get_kwargs()['place'], place.name.title() ) def test_get_kwargs_makes_location(self): """get_kwargs() should instantiate the Location""" new_place_name = self.action_class.get_kwargs()['place'] self.assertEqual( new_place_name, Location.objects.get().name.title() )
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,977
SMesser/book-bot
refs/heads/master
/story/admin.py
from django.contrib.admin import site from story.models import Character, Group, Location, Title site.register(Character) site.register(Group) site.register(Location) site.register(Title)
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,978
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_leave_group.py
from django.test import TestCase from story.actions import LeaveGroupAction from story.tests.actions.action_test_case import GenericActionTestMixin class LeaveGroupTestCase(GenericActionTestMixin, TestCase): action_class = LeaveGroupAction
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,979
SMesser/book-bot
refs/heads/master
/story/tests/actions/test_character_creation.py
from django.test import TestCase from story.actions import CharacterCreationAction from story.models import Character from story.seed_data import CHARACTERS from story.tests.actions.action_test_case import GenericActionTestMixin from story.tests.factories import LocationFactory class CharacterCreationTestCase(GenericActionTestMixin, TestCase): action_class = CharacterCreationAction def test_no_characters_made_gives_positive_weight(self): """Must be able to create characters when none are present.""" LocationFactory() self.assertGreaterEqual(self.action_class.weight_available(), 1) def test_no_locations_made_gives_zero_weight(self): """Must NOT be able to create characters when there are no places.""" self.assertEqual(self.action_class.weight_available(), 0) def test_all_pregen_characters_made_gives_zero_weight(self): """Only Create new characters as long as seed data is available.""" place = LocationFactory() for char_kwargs in CHARACTERS: Character.objects.create(location=place, **char_kwargs) self.assertEqual(self.action_class.weight_available(), 0) def test_single_location_predicts_reported_location(self): """get_kwargs() must report the single available location""" place = LocationFactory() self.assertEqual( str(self.action_class.get_kwargs()['place']), str(place) ) def test_single_location_predicts_character_location(self): """get_kwargs() puts new characters at the single available location""" place = LocationFactory() self.action_class.get_kwargs() char = Character.objects.get() self.assertEqual(char.location, place)
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,980
SMesser/book-bot
refs/heads/master
/story/models.py
from __future__ import unicode_literals from django.db.models import ( ForeignKey, ManyToManyField, Model, SmallIntegerField, TextField ) class Character(Model): MALE = -1 NEUTER = 0 FEMALE = 1 OTHER = 2 MUFTALE = 3 GENDERS = ( (MALE, 'Male'), (FEMALE, 'Female'), (NEUTER, 'Neuter'), (MUFTALE, 'Muftale'), (OTHER, 'Varies / Other') ) name = TextField(unique=True) gender = SmallIntegerField(choices=GENDERS) location = ForeignKey( 'story.Location', null=True, blank=True, default=None ) def __str__(self): return self.name class Office(Model): class Meta: unique_together = (('group', 'title'),) officer = ForeignKey('story.Character') group = ForeignKey('story.Group') title = ForeignKey('story.Title') def __str__(self): return '{} {}: {}'.format( self.group.name, self.title.name, self.officer.name ) class Group(Model): name = TextField(unique=True) influences = ManyToManyField('story.Location') members = ManyToManyField('story.Character') def __str__(self): return self.name class Location(Model): name = TextField(unique=True) def __str__(self): return self.name class Title(Model): name = TextField(unique=True) def __str__(self): return self.name
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,981
SMesser/book-bot
refs/heads/master
/twitterbot/settings.py
from dotenv import load_dotenv from os import environ from os.path import abspath, dirname, join from random import choice from string import printable BASE_DIR = dirname(dirname(abspath(__file__))) load_dotenv(abspath(join(BASE_DIR, '.env'))) DEBUG = bool(int(environ.get('DEBUG', '0'))) if DEBUG: SECRET_KEY = environ.get('SECRET_KEY') else: SECRET_KEY = ''.join(choice(printable) for _ in range(30)) ALLOWED_HOSTS = ['127.0.0.1'] # Application definition INSTALLED_APPS = [ 'story', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'twitterbot.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'twitterbot.wsgi.application' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': join(BASE_DIR, 'db.sqlite3'), } } AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/' RESOURCES = abspath(join(BASE_DIR, 'resources')) STATIC_ROOT = abspath(join(RESOURCES, 'collected_static_files'))
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,982
SMesser/book-bot
refs/heads/master
/story/migrations/0002_auto_20160723_1419.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-07-23 14:19 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('story', '0001_initial'), ] operations = [ migrations.AlterField( model_name='character', name='gender', field=models.SmallIntegerField(choices=[(-1, 'Male'), (1, 'Female'), (0, 'Neuter'), (3, 'Muftale'), (2, 'Varies / Other')]), ), ]
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,983
SMesser/book-bot
refs/heads/master
/story/tests/test_pathological_dbs.py
from django.test import TestCase from story.management.commands.tweet import ACTION_LIST from story.models import Character, Group, Location, Title from story.seed_data import CHARACTERS, GROUPS, LOCATIONS, TITLES class StoryTestCase(TestCase): """Superclass for testing with custom asserts and basic structural tests. Since this class has an empty setUp(), it confirms an empty database is sane. """ longMessage = True def assertWeightsNonnegative(self): """Assert all weights are non-negative.""" for action_class in ACTION_LIST: weight = action_class.weight_available() self.assertGreaterEqual(weight, 0) self.assertIsInstance(weight, int) def assertSomeActionPossible(self): """Confirm the sum of weights is positive.""" self.assertGreater( sum([ action_class.weight_available() for action_class in ACTION_LIST ]), 0 ) def test_setup_database_consistent(self): """Whatever the initial database is, confirm no negative weights""" self.assertWeightsNonnegative() def test_some_action_possible(self): """Whatever the initial database is, confirm some action is available""" self.assertSomeActionPossible() def test_spot_check_actions(self): for action_class in ACTION_LIST: if action_class.weight_available() > 0: self.assertIsInstance( action_class.get_kwargs(), dict, msg='Failure for {}'.format(action_class.__name__) ) class FullDBTests(StoryTestCase): """Confirm database filled with all pre-gen records is sane. Test methods are inherited from StoryTestCase. """ def setUp(self): for kwargs in CHARACTERS: Character.objects.create(**kwargs) for kwargs in GROUPS: Group.objects.create(**kwargs) for kwargs in LOCATIONS: Location.objects.create(**kwargs) for kwargs in TITLES: Title.objects.create(**kwargs)
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,984
SMesser/book-bot
refs/heads/master
/story/actions.py
from django.db.models import Count from random import choice from story.models import Character, Group, Location, Title from story.seed_data import CHARACTERS, GROUPS, LOCATIONS, TITLES def create_from_seed_list(model, seed_list): """Pick one dict of kwargs from a static list and instantiate it.""" if model.objects.count() >= len(seed_list): raise ValueError('Cannot make more {} records.'.format(model.name)) # Make a copy of the original list. Remove items from the duplicate list as # we try them. This prevents repeated attempts at duplicates and speeds # finding new kwargs if seed_list is large. remaining = list(seed_list) created = False while not created: kwargs = choice(seed_list) record, created = model.objects.get_or_create(**kwargs) remaining.remove(kwargs) return record def get_related_model_from_through_model(through, fieldname): """Get model class referenced by a through model's field""" field_attr = getattr(through, fieldname).field try: return field_attr.related_model except AttributeError: # Handle older versions of Django's non-public API return field_attr.related.parent_model def create_from_through_table(through, field1, field2): """Create and return a random new record for a through table Inefficient if the through table is near fully-populated. Assumes there are not-yet-instantiated relationships. """ model1 = get_related_model_from_through_model(through, field1) model2 = get_related_model_from_through_model(through, field2) created = False while not created: arg1 = model1.objects.order_by('?')[0] arg2 = model2.objects.order_by('?')[0] record, created = through.objects.get_or_create( **{field1: arg1, field2:arg2} ) return record class Action(object): """Broadly-structured actions that may happen""" @classmethod def weight_available(cls): """Given DB state, estimate number of possibilities for this action""" return 0 @classmethod def execute(cls): """Update database and return a string to be tweeted.""" kwargs = cls.get_kwargs() verb = choice(cls.VERBS) return verb.format(**kwargs) @classmethod def get_kwargs(cls): """Determine the records involved in the action and update them. Returns a dictionary of keyword arguments to format <cls>.VERBS. """ raise NotImplementedError class TravelAction(Action): VERBS = [ '{character} went from {origin} to {destination}.', '{character} drove from {origin} to {destination}.', '{character} flew from {origin} to {destination}.', '{character} walked from {origin} to {destination}.', '{character} traveled from {origin} to {destination}.', '{character} moved from {origin} to {destination}.', ] @classmethod def weight_available(cls): return max( Character.objects.filter(location__isnull=False).count() * (Location.objects.count() - 1), 0 ) @classmethod def get_kwargs(cls): character = Character.objects.filter(location__isnull=False).order_by( '?' )[0] location = Location.objects.exclude(id=character.location.id).order_by( '?' )[0] origin = character.location.name character.location = location character.save() return { 'character': character.name, 'origin': origin, 'destination': location.name, } class CharacterCreationAction(Action): VERBS = [ '{character} arrived at {place}.', '{character} drove to {place}.', '{character} flew to {place}.', '{character} walked to {place}.', '{character} traveled to {place}.', '{character} materialized at {place}.', '{character} appeared at {place}.', '{character} was discovered at {place}.', ] @classmethod def weight_available(cls): if Character.objects.count() <= len(CHARACTERS): return min([ Location.objects.count(), 7, len(CHARACTERS) - Character.objects.count() ]) else: return 0 @classmethod def get_kwargs(cls): character = create_from_seed_list(Character, CHARACTERS) character.location = Location.objects.order_by('?')[0] character.save() return { 'character': character.name, 'place': character.location.name, } class DiscoveryAction(Action): VERBS = [ '{place} was discovered.', '{place} materialized.', '{place} appeared.' ] @classmethod def weight_available(cls): if len(LOCATIONS) > Location.objects.count(): return 1 else: return 0 @classmethod def get_kwargs(cls): location = create_from_seed_list(Location, LOCATIONS) return {'place': location.name.title()} class GroupCreationAction(Action): VERBS = [ 'The {group} arose at {place}.', '{place} was the birthplace of {group}.', ] @classmethod def weight_available(cls): if not Location.objects.exists(): return 0 if len(GROUPS) > Group.objects.count(): return 1 else: return 0 @classmethod def get_kwargs(cls): group = create_from_seed_list(Group, GROUPS) location = Location.objects.order_by('?')[0] group.influences.add(location) return {'group': group.name, 'place': location.name} class GroupSpreadAction(Action): VERBS = [ '{group} gained influence in {place}.', '{place} fell under the sway of {group}.', ] @classmethod def weight_available(cls): return min( 7, Group.objects.count() * Location.objects.count() -\ Group.influences.through.objects.count(), ) @classmethod def get_kwargs(cls): influence = create_from_through_table( Group.influences.through, 'group', 'location' ) return { 'group': influence.group.name, 'place': influence.location.name } class GroupDecayAction(Action): VERBS = [ '{group} lost influence in {place}.', '{group} left {place}.', ] @classmethod def weight_available(cls): return Group.influences.through.objects.count() @classmethod def get_kwargs(cls): influence = Group.influences.through.objects.order_by('?')[0] influence.delete() return { 'group': influence.group.name, 'place': influence.location.name } class JoinGroupAction(Action): VERBS = [ '{character} was inducted into {group}.', '{group} recruited {character}.', '{character} joined {group}.', ] @classmethod def find_possible_joins(cls): """Find possible Join-Group Locations Character must be in a place where the group has influence to join the group. """ annotated_locations = Location.objects.annotate( num_char=Count('character'), num_group=Count('group') ) join_locations = set(annotated_locations.filter( num_char__gte=1, num_group__gte=1 )) # Restrict join_locations if all characters there belong to all groups # at that location. Copy the set so that we don't edit the set over # which we iterate. for loc in set(join_locations): possible_characters = set(loc.character_set.all()) location_groups = set(loc.group_set.all()) for char in set(possible_characters): possible_groups = location_groups - set(char.group_set.all()) if len(possible_groups) == 0: possible_characters.remove(char) if len(possible_characters) == 0: join_locations.remove(loc) return join_locations @classmethod def weight_available(cls): return min(7, len(cls.find_possible_joins())) @classmethod def get_kwargs(cls): event_location = choice(list(cls.find_possible_joins())) possible_characters = set(event_location.character_set.all()) location_groups = set(event_location.group_set.all()) for char in set(possible_characters): possible_groups = location_groups - set(char.group_set.all()) if len(possible_groups) == 0: possible_characters.remove(char) character = choice(list(possible_characters)) group = choice(list(location_groups - set(character.group_set.all()))) group.members.add(character) return { 'character': character.name, 'group': group.name } class LeaveGroupAction(Action): VERBS = [ '{character} retired from {group}.', '{group} expelled {character}.', '{character} left {group}.' ] @classmethod def weight_available(cls): return min( 7, Group.members.through.objects.count() ) @classmethod def get_kwargs(cls): membership = Group.members.through.objects.order_by('?')[0] membership.delete() return { 'character': membership.character.name, 'group': membership.group.name } ACTION_LIST = [ CharacterCreationAction, DiscoveryAction, GroupCreationAction, GroupDecayAction, GroupSpreadAction, JoinGroupAction, LeaveGroupAction, TravelAction, ]
{"/story/tests/actions/test_group_creation.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_group_decay.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_travel.py": ["/story/actions.py", "/story/models.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/management/commands/tweet.py": ["/story/actions.py"], "/story/tests/actions/test_group_spread.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/action_test_case.py": ["/story/actions.py"], "/story/tests/actions/test_join_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_seed_data.py": ["/story/seed_data.py"], "/story/tests/factories.py": ["/story/models.py"], "/story/tests/actions/test_discovery.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/admin.py": ["/story/models.py"], "/story/tests/actions/test_leave_group.py": ["/story/actions.py", "/story/tests/actions/action_test_case.py"], "/story/tests/actions/test_character_creation.py": ["/story/actions.py", "/story/models.py", "/story/seed_data.py", "/story/tests/actions/action_test_case.py", "/story/tests/factories.py"], "/story/tests/test_pathological_dbs.py": ["/story/management/commands/tweet.py", "/story/models.py", "/story/seed_data.py"], "/story/actions.py": ["/story/models.py", "/story/seed_data.py"]}
73,985
bitsauce/RoadFollowing-ddpg
refs/heads/master
/utils.py
from collections import deque import numpy as np import tensorflow as tf import types import cv2 import scipy.signal def preprocess_frame(frame): frame = frame[:-12, 6:-6] # Crop to 84x84 frame = np.dot(frame[..., 0:3], [0.299, 0.587, 0.114]) frame = frame / 255.0 return np.expand_dims(frame, axis=-1) class FrameStack(): def __init__(self, initial_frame, stack_size=4, preprocess_fn=None): # Setup initial state self.frame_stack = deque(maxlen=stack_size) initial_frame = preprocess_fn(initial_frame) if preprocess_fn else initial_frame for _ in range(stack_size): self.frame_stack.append(initial_frame) self.state = np.stack(self.frame_stack, axis=-1) self.preprocess_fn = preprocess_fn def add_frame(self, frame): self.frame_stack.append(self.preprocess_fn(frame)) self.state = np.stack(self.frame_stack, axis=-1) def get_state(self): return self.state class VideoRecorder(): def __init__(self, filename, frame_size): self.video_writer = cv2.VideoWriter( filename, cv2.VideoWriter_fourcc(*"MPEG"), 30, (frame_size[1], frame_size[0])) def add_frame(self, frame): self.video_writer.write(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)) def release(self): self.video_writer.release() def __del__(self): self.release() def build_mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def create_polyak_update_ops(source_scope, target_scope, polyak=0.995): source_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=source_scope) target_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=target_scope) assert len(source_params) == len(target_params), "Source and target param count are different" for src, tgt in zip(source_params, target_params): assert src.shape == tgt.shape, "Source and target param shapes are different" polyak_update_op = tf.group([tgt.assign(polyak * tgt + (1.0 - polyak) * src) for src, tgt in zip(source_params, target_params)]) assign_op = tf.group([tgt.assign(src) for src, tgt in zip(source_params, target_params)]) return polyak_update_op, assign_op def create_counter_variable(name): counter = types.SimpleNamespace() counter.var = tf.Variable(0, name=name, trainable=False) counter.inc_op = tf.assign(counter.var, counter.var + 1) return counter def create_mean_metrics_from_dict(metrics): # Set up summaries for each metric update_metrics_ops = [] summaries = [] for name, (value, update_op) in metrics.items(): summaries.append(tf.summary.scalar(name, value)) update_metrics_ops.append(update_op) return tf.summary.merge(summaries), tf.group(update_metrics_ops) def clip_grad(optimizer, params, loss, grad_clip): gvs = optimizer.compute_gradients(loss, var_list=params) capped_gvs = [(tf.clip_by_value(grad, -grad_clip, grad_clip), var) for grad, var in gvs] return optimizer.apply_gradients(capped_gvs) def discount(x, gamma): return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1] def compute_v_and_adv(rewards, values, bootstrapped_value, gamma, lam=1.0): rewards = np.array(rewards) values = np.array(list(values) + [bootstrapped_value]) v = discount(np.array(list(rewards) + [bootstrapped_value]), gamma)[:-1] delta = rewards + gamma * values[1:] - values[:-1] adv = discount(delta, gamma * lam) return v, adv def compute_returns(rewards, bootstrap_value, terminals, gamma): returns = [] R = bootstrap_value for i in reversed(range(len(rewards))): R = rewards[i] + (1.0 - terminals[i]) * gamma * R returns.append(R) returns = reversed(returns) return np.array(list(returns)) def compute_gae(rewards, values, bootstrap_values, terminals, gamma, lam): rewards = np.array(rewards) values = np.array(list(values) + [bootstrap_values]) terminals = np.array(terminals) deltas = rewards + (1.0 - terminals) * gamma * values[1:] - values[:-1] return scipy.signal.lfilter([1], [1, -gamma * lam], deltas[::-1], axis=0)[::-1] def compute_gae_old(rewards, values, bootstrap_value, terminals, gamma, lam): values = np.array(list(values) + [bootstrap_value]) last_gae_lam = 0 advantages = np.zeros_like(rewards) for i in reversed(range(len(rewards))): delta = rewards[i] + gamma * values[i + 1] * (1.0 - terminals[i]) - values[i] advantages[i] = last_gae_lam = delta + gamma * lam * (1.0 - terminals[i]) * last_gae_lam return advantages
{"/vae/inspect_vae.py": ["/utils.py"], "/ddpg.py": ["/utils.py"], "/vae/collect_data.py": ["/utils.py"], "/train.py": ["/ddpg.py", "/vae/models.py", "/utils.py"]}
73,986
bitsauce/RoadFollowing-ddpg
refs/heads/master
/vae/inspect_vae.py
from tkinter import * from tkinter.ttk import * import os, sys sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import numpy as np import argparse from PIL import Image, ImageTk import matplotlib.pyplot as plt import matplotlib.animation as animation from models import MlpVAE, ConvVAE parser = argparse.ArgumentParser(description="Visualizes the features learned by the VAE") parser.add_argument("-reconstruct", action="store_true") parser.add_argument("--model_name", type=str, required=True) parser.add_argument("--models_dir", type=str, default=".") parser.add_argument("--model_type", type=str, default="mlp") parser.add_argument("--z_dim", type=int, default=10) args = parser.parse_args() input_shape = (84, 84, 1) if args.model_type == "cnn": VAEClass = ConvVAE elif args.model_type == "mlp": VAEClass = MlpVAE else: raise Exception("No model type \"{}\"".format(args.model_type)) vae = VAEClass(input_shape, z_dim=args.z_dim, model_name=args.model_name, models_dir=args.models_dir, training=False) vae.init_session(init_logging=False) if not vae.load_latest_checkpoint(): print("Failed to load latest checkpoint for model \"{}\"".format(args.model_name)) if args.reconstruct: from RoadFollowingEnv.car_racing import RoadFollowingEnv from utils import preprocess_frame from pyglet.window import key def make_env(title=None, frame_skip=0): env = RoadFollowingEnv(title=title, encode_state_fn=lambda x: preprocess_frame(x.frame), throttle_scale=0.1, max_speed=30.0, frame_skip=frame_skip) return env env = make_env() action = np.zeros(env.action_space.shape[0]) restart = False def key_press(k, mod): global restart if k==0xff0d: restart = True if k==key.LEFT: action[0] = -1.0 if k==key.RIGHT: action[0] = +1.0 if k==key.UP: action[1] = +1.0 if k==key.DOWN: action[1] = -1.0 def key_release(k, mod): if k==key.LEFT and action[0]==-1.0: action[0] = 0 if k==key.RIGHT and action[0]==+1.0: action[0] = 0 if k==key.UP: action[1] = 0 if k==key.DOWN: action[1] = 0 env.render() env.viewer.window.on_key_press = key_press env.viewer.window.on_key_release = key_release done = True def update(*args): global done, restart, action, env, im1, im2 if done or restart: env.reset() s, r, done, info = env.step(action) env.render(mode="human") reconstruted_state = vae.reconstruct([s]) im1.set_array(s[:, :, 0]) im2.set_array(reconstruted_state[0].reshape(84, 84)) return im1, im2 fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True) im1 = ax1.imshow(np.zeros((84,84)), cmap="gray", animated=True, vmin=0, vmax=1) im2 = ax2.imshow(np.zeros((84,84)), cmap="gray", animated=True, vmin=0, vmax=1) ani = animation.FuncAnimation(fig, update, interval=16, blit=True) plt.show() env.close() else: class UI(): def __init__(self, z_dim, generate_fn, slider_range=3, image_scale=4): # Setup tkinter window self.window = Tk() self.window.title("VAE Inspector") self.window.style = Style() self.window.style.theme_use("clam") # ('clam', 'alt', 'default', 'classic') # Setup image self.image = Label(self.window) self.image.pack(side=LEFT, padx=50, pady=20) self.image_scale = image_scale self.update_image(np.ones((84, 84)) * 127) self.generate_fn = generate_fn # Setup sliders for latent vector z slider_frames = [] self.z_vars = [DoubleVar() for _ in range(z_dim)] for i in range(z_dim): # On slider change event def create_slider_event(i, z_i, label): def event(_=None, generate=True): label.configure(text="z[{}]={}{:.2f}".format(i, "" if z_i.get() < 0 else " ", z_i.get())) if generate: self.generate_fn(np.array([z_i.get() for z_i in self.z_vars])) return event if i % 20 == 0: sliders_frame = Frame(self.window) slider_frames.append(sliders_frame) # Create widgets inner_frame = Frame(sliders_frame) # Frame for side-by-side label and slider layout label = Label(inner_frame, font="TkFixedFont") # Create event function on_value_changed = create_slider_event(i, self.z_vars[i], label) on_value_changed(generate=False) # Call once to set label text # Create slider slider = Scale(inner_frame, value=0.0, variable=self.z_vars[i], orient=HORIZONTAL, length=200, from_=-slider_range, to=slider_range, command=on_value_changed) # Pack slider.pack(side=RIGHT, pady=10) label.pack(side=LEFT, padx=10) inner_frame.pack(side=TOP) for f in reversed(slider_frames): f.pack(side=RIGHT, padx=20, pady=20) def update_image(self, image_array): image_size = (84 * self.image_scale, 84 * self.image_scale) pil_image = Image.fromarray(image_array).resize(image_size, resample=Image.NEAREST) self.tkimage = ImageTk.PhotoImage(image=pil_image) self.image.configure(image=self.tkimage) def mainloop(self): self.generate_fn(np.array([z_i.get() for z_i in self.z_vars])) self.window.mainloop() def generate(z): generated_image = vae.generate_from_latent([z])[0] * 255 ui.update_image(generated_image.reshape(input_shape)[:, :, 0]) ui = UI(vae.sample.shape[1], generate, slider_range=10) ui.mainloop()
{"/vae/inspect_vae.py": ["/utils.py"], "/ddpg.py": ["/utils.py"], "/vae/collect_data.py": ["/utils.py"], "/train.py": ["/ddpg.py", "/vae/models.py", "/utils.py"]}
73,987
bitsauce/RoadFollowing-ddpg
refs/heads/master
/ddpg.py
import os import re import shutil import numpy as np import tensorflow as tf import tensorflow_probability as tfp from utils import build_mlp, create_polyak_update_ops, create_counter_variable, create_mean_metrics_from_dict, clip_grad class DDPG(): """ Deep deterministic policy gradient implementation """ def __init__(self, input_shape, action_space, action_noise, initial_actor_lr=3e-4, initial_critic_lr=3e-4, lr_decay=0.998, discount_factor=0.99, polyak=0.995, grad_norm=5e-3, output_dir="./"): """ input_shapes (list): List of the input shapes. E.g. input_shapes[0] = (width, height, depth) action_space (gym.Box): A class where: action_space.low = Array of minimum possible value for each action action_space.high = Array of maximum possible value for each action action_space.shape = Number of actions in the action space output_dir (string): Name of the model """ # Verify action space assert len(action_space.shape) == 1, "The implementation supports only 1D action, continous spaces" num_actions = action_space.shape[0] self.action_noise = action_noise self.action_space = action_space # Create counters self.train_step_counter = create_counter_variable(name="train_step_counter") self.predict_step_counter = create_counter_variable(name="predict_step_counter") self.episode_counter = create_counter_variable(name="episode_counter") # Create placeholders self.input_states = tf.placeholder(shape=(None, *input_shape), dtype=tf.float32, name="input_state_placeholder") # s self.input_states_next = tf.placeholder(shape=(None, *input_shape), dtype=tf.float32, name="input_next_state_placeholder") # s' self.taken_actions = tf.placeholder(shape=(None, num_actions), dtype=tf.float32, name="taken_action_placeholder") # a self.rewards = tf.placeholder(shape=(None,), dtype=tf.float32, name="rewards_placeholder") # r self.terminals = tf.placeholder(shape=(None,), dtype=tf.float32, name="terminals_placeholder") # d self.is_weights = tf.placeholder(shape=(None,), dtype=tf.float32, name="is_weights_placeholder") # w states = self.input_states states_next = self.input_states_next hidden_sizes=(500,300) #with tf.variable_scope("rdn_target"): # target = build_mlp(states_next, [500, 300], tf.nn.leaky_relu, tf.nn.sigmoid, trainable=False) #with tf.variable_scope("rdn"): # prediction = build_mlp(states_next, [10, 300], tf.nn.leaky_relu, tf.nn.sigmoid, trainable=True) #self.rdn_diff = tf.reduce_mean((target - prediction)**2, axis=-1) #self.rdn_diff_norm = tf.clip_by_value(self.rdn_diff / 1e-4, 0, 1.0) #intrinsic_reward = self.rdn_diff_norm #self.rdn_loss = tf.reduce_mean(self.rdn_diff) with tf.variable_scope("main"): # μ(s; θ), Q(s, a; ϕ), Q(s, μ(s; θ); ϕ) self.actor_mean, self.Q_value, self.Q_value_of_actor = self.build_mlp_actor_critic(states, self.taken_actions, action_space, hidden_sizes=hidden_sizes) with tf.variable_scope("target"): # μ(s'; θ_{targ}), _, Q(s', μ(s'; θ_{targ}); ϕ_{targ}) self.target_actor_mean, _, self.Q_target_value = self.build_mlp_actor_critic(states_next, self.taken_actions, action_space, hidden_sizes=hidden_sizes) # Create polyak update ops self.update_target_params_op, init_target_params_op = create_polyak_update_ops("main/", "target/", polyak=polyak) # Critic (MSBE) loss = min_θ mse(Q(s, a), r + gamma * Q(s', μ(s'; θ_{targ}); ϕ_{targ})) self.total_reward = self.rewards# + intrinsic_reward self.Q_target = tf.stop_gradient(self.total_reward + discount_factor * (1.0 - self.terminals) * self.Q_target_value) self.Q_delta = self.Q_value - self.Q_target self.critic_loss = tf.reduce_mean((self.Q_delta)**2 * self.is_weights) # Policy loss = max_θ Σ Q(s, μ(s; θ); ϕ) self.actor_loss = -tf.reduce_mean(self.Q_value_of_actor * self.is_weights) # Minimize loss actor_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="main/pi/") critic_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="main/q/") #rdn_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="rdn/") actor_lr = tf.train.exponential_decay(initial_actor_lr, self.episode_counter.var, 1, lr_decay, staircase=True) critic_lr = tf.train.exponential_decay(initial_critic_lr, self.episode_counter.var, 1, lr_decay, staircase=True) self.train_actor_op = clip_grad(tf.train.AdamOptimizer(learning_rate=actor_lr, epsilon=1e-5), actor_params, self.actor_loss, grad_norm) self.train_critic_op = clip_grad(tf.train.AdamOptimizer(learning_rate=critic_lr, epsilon=1e-5), critic_params, self.critic_loss, grad_norm) #self.train_rdn_op = clip_grad(tf.train.AdamOptimizer(learning_rate=0.0001), rdn_params, self.rdn_loss, grad_norm) # Create session self.sess = tf.Session() # Set up critic metrics metrics = {} metrics["losses/episodic/critic"] = tf.metrics.mean(self.critic_loss) #metrics["losses/episodic/rdn"] = tf.metrics.mean(self.rdn_loss) for i in range(num_actions): metrics["actor.train/episodic/action_{}/taken_actions".format(i)] = tf.metrics.mean(tf.reduce_mean(self.taken_actions[:, i])) metrics["actor.train/episodic/action_{}/mean".format(i)] = tf.metrics.mean(tf.reduce_mean(self.actor_mean[:, i])) metrics["critic.train/episodic/Q_value"] = tf.metrics.mean(self.Q_value) metrics["critic.train/episodic/Q_target"] = tf.metrics.mean(self.Q_target) metrics["critic.train/episodic/Q_delta"] = tf.metrics.mean(self.Q_delta) self.episodic_critic_summaries, self.update_critic_metrics_op = create_mean_metrics_from_dict(metrics) # Set up actor metrics metrics = {} metrics["losses/episodic/actor"] = tf.metrics.mean(self.actor_loss) self.episodic_actor_summaries, self.update_actor_metrics_op = create_mean_metrics_from_dict(metrics) # Set up stepwise summaries summaries = [] summaries.append(tf.summary.scalar("train/critic_lr", critic_lr)) summaries.append(tf.summary.histogram("train/input_states", states)) summaries.append(tf.summary.histogram("train/input_states_next", states_next)) #summaries.append(tf.summary.histogram("train/rdn_diff_norm", self.rdn_diff_norm)) #summaries.append(tf.summary.histogram("train/rdn_diff", self.rdn_diff)) self.critic_stepwise_summaries_op = tf.summary.merge(summaries) summaries = [] summaries.append(tf.summary.scalar("train/actor_lr", actor_lr)) self.actor_stepwise_summaries_op = tf.summary.merge(summaries) summaries = [] for i in range(num_actions): summaries.append(tf.summary.scalar("actor.predict/action_{}/mean".format(i), self.actor_mean[0, i])) self.prediction_summaries = tf.summary.merge(summaries) # Run variable initializers self.sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()]) # Initialize θ_{targ} <- θ and ϕ_{targ} <- ϕ self.sess.run(init_target_params_op) # Set up model saver and dirs self.output_dir = output_dir self.saver = tf.train.Saver() self.model_dir = "{}/checkpoints/".format(self.output_dir) self.log_dir = "{}/logs/".format(self.output_dir) self.video_dir = "{}/videos/".format(self.output_dir) self.dirs = [self.model_dir, self.log_dir, self.video_dir] for d in self.dirs: os.makedirs(d, exist_ok=True) def build_mlp_actor_critic(self, input_states, taken_actions, action_space, hidden_sizes=(300,), activation=tf.nn.leaky_relu, output_activation=tf.sigmoid): # Actor branch with tf.variable_scope("pi"): # μ(s; θ) = mlp(s) pi_normalized = build_mlp(input_states, list(hidden_sizes) + list(action_space.shape), activation, output_activation) pi = action_space.low + pi_normalized * (action_space.high - action_space.low) # Scale μ to action space # Critic brach with tf.variable_scope("q"): # Q(s, a; ϕ) = mlp(concat(s, a)) q = tf.squeeze(build_mlp(tf.concat([input_states, taken_actions], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) # Critic on actor branch with tf.variable_scope("q", reuse=True): # Q(s, μ(s; θ); ϕ) = mlp(concat(s, μ(s; θ))) q_pi = tf.squeeze(build_mlp(tf.concat([input_states, pi], axis=-1), list(hidden_sizes) + [1], activation, None), axis=1) return pi, q, q_pi def init_logging(self): self.train_writer = tf.summary.FileWriter(self.log_dir, self.sess.graph) def load_latest_checkpoint(self): model_checkpoint = tf.train.latest_checkpoint(self.model_dir) if model_checkpoint: try: self.saver.restore(self.sess, model_checkpoint) print("Model checkpoint restored from {}".format(model_checkpoint)) return True except: return False def save(self): model_checkpoint = os.path.join(self.model_dir, "model.ckpt") self.saver.save(self.sess, model_checkpoint, global_step=self.episode_counter.var) print("Model checkpoint saved to {}".format(model_checkpoint)) def train(self, input_states, taken_actions, rewards, input_states_next, terminals, w): feed_dict = { self.input_states: input_states, self.input_states_next: input_states_next, self.taken_actions: taken_actions, self.rewards: rewards, self.terminals: terminals, self.is_weights: w } # Train critic _, _, critic_summary, deltas = self.sess.run([self.train_critic_op, self.update_critic_metrics_op, self.critic_stepwise_summaries_op, self.Q_delta], feed_dict=feed_dict) # Train actor actor_summary = self.sess.run([self.train_actor_op, self.update_actor_metrics_op, self.actor_stepwise_summaries_op], feed_dict=feed_dict)[-1] # Train rnd pred #self.sess.run(self.train_rdn_op, feed_dict=feed_dict) # Update target networks self.sess.run(self.update_target_params_op) # Write to summary step_idx = self.sess.run(self.train_step_counter.var) self.train_writer.add_summary(critic_summary, step_idx) self.train_writer.add_summary(actor_summary, step_idx) self.sess.run(self.train_step_counter.inc_op) # Inc step counter return deltas def predict(self, input_states, greedy=False, write_to_summary=False): # Return μ(s; θ) if greedy, else return action sampled from N(μ(s; θ), σ) action, summary, Q_value = self.sess.run([self.actor_mean, self.prediction_summaries, self.Q_value_of_actor], feed_dict={ self.input_states: input_states }) if not greedy: action = np.clip(action + self.action_noise(), self.action_space.low, self.action_space.high) if write_to_summary: self.train_writer.add_summary(summary, self.get_predict_step_counter()) self.sess.run(self.predict_step_counter.inc_op) return action, Q_value def encode(self, vae_input): return self.sess.run(self.vae.mean, feed_dict={ self.vae.input_states: vae_input }) def get_episode_counter(self): return self.sess.run(self.episode_counter.var) def get_predict_step_counter(self): return self.sess.run(self.predict_step_counter.var) def write_value_to_summary(self, summary_name, value, step): summary = tf.Summary() summary.value.add(tag=summary_name, simple_value=value) self.train_writer.add_summary(summary, step) def write_dict_to_summary(self, summary_name, params, step): summary_op = tf.summary.text(summary_name, tf.stack([tf.convert_to_tensor([k, str(v)]) for k, v in params.items()])) self.train_writer.add_summary(self.sess.run(summary_op), step) def write_episodic_summaries(self, episode_idx): self.train_writer.add_summary(self.sess.run(self.episodic_critic_summaries), episode_idx) self.train_writer.add_summary(self.sess.run(self.episodic_actor_summaries), episode_idx) self.sess.run([self.episode_counter.inc_op, tf.local_variables_initializer()]) self.action_noise.reset()
{"/vae/inspect_vae.py": ["/utils.py"], "/ddpg.py": ["/utils.py"], "/vae/collect_data.py": ["/utils.py"], "/train.py": ["/ddpg.py", "/vae/models.py", "/utils.py"]}
73,988
bitsauce/RoadFollowing-ddpg
refs/heads/master
/vae/collect_data.py
import argparse import gzip import pickle import sys import os sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import numpy as np from RoadFollowingEnv.car_racing import RoadFollowingEnv from utils import preprocess_frame parser = argparse.ArgumentParser(description="Script to collect data for VAE training") parser.add_argument("--output", type=str, required=True) parser.add_argument("--num_images", type=int, default=10000) parser.add_argument("-append", action="store_true", help="Append data onto existing dataset") args = parser.parse_args() if args.append: with gzip.open(args.output, "rb") as f: images = [x for x in pickle.load(f)] else: images = [] def make_env(title=None, frame_skip=0): env = RoadFollowingEnv(title=title, encode_state_fn=lambda x: preprocess_frame(x.frame), throttle_scale=0.1, max_speed=30.0, frame_skip=frame_skip) return env if __name__ == "__main__": from pyglet.window import key env = make_env() a = np.zeros(env.action_space.shape[0]) def key_press(k, mod): global restart if k==0xff0d: restart = True if k==key.LEFT: a[0] = -1.0 if k==key.RIGHT: a[0] = +1.0 if k==key.UP: a[1] = +1.0 if k==key.DOWN: a[1] = -1.0 def key_release(k, mod): if k==key.LEFT and a[0]==-1.0: a[0] = 0 if k==key.RIGHT and a[0]==+1.0: a[0] = 0 if k==key.UP: a[1] = 0 if k==key.DOWN: a[1] = 0 env.render() env.viewer.window.on_key_press = key_press env.viewer.window.on_key_release = key_release while len(images) < args.num_images: env.reset() restart = False while len(images) < args.num_images: if len(images) % (args.num_images // 100) == 0: print("{}%".format(int(len(images) / args.num_images * 100))) s, r, done, info = env.step(a) env.render(mode="human") images.append(s) if done or restart: break env.close() with gzip.open(args.output, "wb") as f: pickle.dump(np.stack(images, axis=0), f)
{"/vae/inspect_vae.py": ["/utils.py"], "/ddpg.py": ["/utils.py"], "/vae/collect_data.py": ["/utils.py"], "/train.py": ["/ddpg.py", "/vae/models.py", "/utils.py"]}
73,989
bitsauce/RoadFollowing-ddpg
refs/heads/master
/vae/models.py
import numpy as np import tensorflow as tf import tensorflow_probability as tfp import os def kl_divergence(mean, logstd_sq, name="kl_divergence"): with tf.variable_scope(name): return -0.5 * tf.reduce_sum(1.0 + logstd_sq - tf.square(mean) - tf.exp(logstd_sq), axis=1) def bce_loss(labels, logits, targets): return tf.nn.sigmoid_cross_entropy_with_logits( labels=labels, logits=logits ) def bce_loss_v2(labels, logits, targets, epsilon=1e-10): with tf.variable_scope("bce"): return -(labels * tf.log(epsilon + targets) + (1 - labels) * tf.log(epsilon + 1 - targets)) def mse_loss(labels, logits, targets): return (labels - targets)**2 class VAE(): def __init__(self, input_shape, build_encoder_fn, build_decoder_fn, z_dim=512, beta=1.0, learning_rate=1e-4, kl_tolerance=0.5, model_name="vae", models_dir=".", loss_fn=bce_loss, training=True, reuse=tf.AUTO_REUSE, **kwargs): # Create vae self.input_shape = input_shape self.z_dim = z_dim self.beta = beta self.kl_tolerance = kl_tolerance with tf.variable_scope("vae", reuse=reuse): # Get and verify input self.input_states = tf.placeholder(shape=(None, *input_shape), dtype=tf.float32, name="input_state_placeholder") verify_input_op = tf.Assert(tf.reduce_all(tf.logical_and(self.input_states >= 0, self.input_states <= 1)), ["min=", tf.reduce_min(self.input_states), "max=", tf.reduce_max(self.input_states)], name="verify_input") with tf.control_dependencies([verify_input_op]): if training: self.input_states = tf.image.random_flip_left_right(self.input_states) else: self.input_states = tf.multiply(self.input_states, 1, name="input_state_identity") # Encode image with tf.variable_scope("encoder", reuse=False): encoded = build_encoder_fn(self.input_states) # Get encoded mean and std self.mean = tf.layers.dense(encoded, z_dim, activation=None, name="mean") self.logstd_sq = tf.layers.dense(encoded, z_dim, activation=None, name="logstd_sqare") # Sample normal distribution self.normal = tfp.distributions.Normal(self.mean, tf.exp(0.5 * self.logstd_sq), validate_args=True) if training: self.sample = tf.squeeze(self.normal.sample(1), axis=0) else: self.sample = self.mean # Decode random sample with tf.variable_scope("decoder", reuse=False): decoded = build_decoder_fn(self.sample) # Reconstruct image self.reconstructed_logits = tf.layers.flatten(decoded, name="reconstructed_logits") self.reconstructed_states = tf.nn.sigmoid(self.reconstructed_logits, name="reconstructed_states") # Reconstruction loss self.flattened_input = tf.layers.flatten(self.input_states, name="flattened_input") self.reconstruction_loss = tf.reduce_mean( tf.reduce_sum( loss_fn(labels=self.flattened_input, logits=self.reconstructed_logits, targets=self.reconstructed_states), axis=1 ) ) # KL divergence loss self.kl_loss = kl_divergence(self.mean, self.logstd_sq, name="kl_divergence") if self.kl_tolerance > 0: self.kl_loss = tf.maximum(self.kl_loss, self.kl_tolerance * self.z_dim) self.kl_loss = tf.reduce_mean(self.kl_loss) # Total loss self.loss = self.reconstruction_loss + self.beta * self.kl_loss # Set model dirs self.model_name = model_name self.models_dir = os.path.join(models_dir, "models", model_name) self.log_dir = os.path.join(models_dir, "logs", model_name) self.dirs = [self.models_dir, self.log_dir] # Epoch variable self.step_idx = tf.Variable(0, name="step_idx", trainable=False) self.inc_step_idx = tf.assign(self.step_idx, self.step_idx + 1) # Create optimizer self.saver = tf.train.Saver() if training: # Summary self.mean_kl_loss, self.update_mean_kl_loss = tf.metrics.mean(self.kl_loss) self.mean_reconstruction_loss, self.update_mean_reconstruction_loss = tf.metrics.mean(self.reconstruction_loss) self.merge_summary = tf.summary.merge([ tf.summary.scalar("kl_loss", self.mean_kl_loss), tf.summary.scalar("reconstruction_loss", self.mean_reconstruction_loss) ]) # Create optimizer self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) self.train_step = self.optimizer.minimize(self.loss) for d in self.dirs: os.makedirs(d, exist_ok=True) def init_session(self, sess=None, init_logging=True): if sess is None: self.sess = tf.Session() self.sess.run(tf.global_variables_initializer()) else: self.sess = sess if init_logging: self.train_writer = tf.summary.FileWriter(os.path.join(self.log_dir, "train"), self.sess.graph) self.val_writer = tf.summary.FileWriter(os.path.join(self.log_dir, "val"), self.sess.graph) def generate_from_latent(self, z): return self.sess.run(self.reconstructed_states, feed_dict={ self.sample: z }) def reconstruct(self, input_states): return self.sess.run(self.reconstructed_states, feed_dict={ self.input_states: input_states }) def encode(self, input_states): return self.sess.run(self.mean, feed_dict={ self.input_states: input_states }) def save(self): model_checkpoint = os.path.join(self.models_dir, "model.ckpt") self.saver.save(self.sess, model_checkpoint, global_step=self.step_idx) print("Model checkpoint saved to {}".format(model_checkpoint)) def load_latest_checkpoint(self): # Load checkpoint model_checkpoint = tf.train.latest_checkpoint(self.models_dir) if model_checkpoint: try: self.saver.restore(self.sess, model_checkpoint) print("Model checkpoint restored from {}".format(model_checkpoint)) return True except Exception as e: print(e) return False def get_step_idx(self): return tf.train.global_step(self.sess, self.step_idx) def train_one_epoch(self, train_images, batch_size): np.random.shuffle(train_images) self.sess.run(tf.local_variables_initializer()) for i in range(train_images.shape[0] // batch_size): self.sess.run([self.train_step, self.update_mean_kl_loss, self.update_mean_reconstruction_loss], feed_dict={ self.input_states: train_images[i*batch_size:(i+1)*batch_size] }) self.train_writer.add_summary(self.sess.run(self.merge_summary), self.get_step_idx()) self.sess.run(self.inc_step_idx) def evaluate(self, val_images, batch_size): self.sess.run(tf.local_variables_initializer()) for i in range(val_images.shape[0] // batch_size): self.sess.run([self.update_mean_kl_loss, self.update_mean_reconstruction_loss], feed_dict={ self.input_states: val_images[i*batch_size:(i+1)*batch_size] }) self.val_writer.add_summary(self.sess.run(self.merge_summary), self.get_step_idx()) return self.sess.run([self.mean_reconstruction_loss, self.mean_kl_loss]) class ConvVAE(VAE): def __init__(self, input_shape, **kwargs): def build_encoder(x): x = tf.layers.conv2d(x, filters=32, kernel_size=4, strides=2, activation=tf.nn.relu, padding="valid", name="conv1") x = tf.layers.conv2d(x, filters=64, kernel_size=4, strides=2, activation=tf.nn.relu, padding="valid", name="conv2") x = tf.layers.conv2d(x, filters=128, kernel_size=4, strides=2, activation=tf.nn.relu, padding="valid", name="conv3") x = tf.layers.conv2d(x, filters=256, kernel_size=4, strides=2, activation=tf.nn.relu, padding="valid", name="conv4") self.encoded_shape = x.shape[1:] x = tf.layers.flatten(x, name="flatten") return x def build_decoder(z): x = tf.layers.dense(z, np.prod(self.encoded_shape), activation=None, name="dense1") x = tf.reshape(x, (-1, *self.encoded_shape)) x = tf.layers.conv2d_transpose(x, filters=128, kernel_size=4, strides=2, activation=tf.nn.relu, padding="valid", name="deconv1") x = tf.layers.conv2d_transpose(x, filters=64, kernel_size=5, strides=2, activation=tf.nn.relu, padding="valid", name="deconv2") x = tf.layers.conv2d_transpose(x, filters=32, kernel_size=5, strides=2, activation=tf.nn.relu, padding="valid", name="deconv3") x = tf.layers.conv2d_transpose(x, filters=input_shape[-1], kernel_size=4, strides=2, activation=None, padding="valid", name="deconv4") assert x.shape[1:] == input_shape, f"{x.shape[1:]} != {input_shape}" return x super().__init__(input_shape, build_encoder, build_decoder, **kwargs) class MlpVAE(VAE): def __init__(self, input_shape, encoder_sizes=(512, 256), decoder_sizes=(256, 512), **kwargs): def build_mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None): for h in hidden_sizes[:-1]: x = tf.layers.dense(x, units=h, activation=activation) return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation) def build_encoder(x): x = tf.layers.flatten(x, name="flattened_input") return build_mlp(x, hidden_sizes=encoder_sizes, activation=tf.nn.relu, output_activation=tf.nn.relu) def build_decoder(z): return build_mlp(z, hidden_sizes=list(decoder_sizes) + [np.prod(input_shape)], activation=tf.nn.relu, output_activation=None) super().__init__(input_shape, build_encoder, build_decoder, **kwargs)
{"/vae/inspect_vae.py": ["/utils.py"], "/ddpg.py": ["/utils.py"], "/vae/collect_data.py": ["/utils.py"], "/train.py": ["/ddpg.py", "/vae/models.py", "/utils.py"]}
73,990
bitsauce/RoadFollowing-ddpg
refs/heads/master
/vae/train_vae.py
import argparse import gzip import os import pickle import shutil import sys import numpy as np import tensorflow as tf from models import ConvVAE, MlpVAE, bce_loss, bce_loss_v2, mse_loss if __name__ == "__main__": parser = argparse.ArgumentParser(description="Trains a VAE on input data") parser.add_argument("--model_name", type=str, default=None) parser.add_argument("--dataset", type=str, default="data/data10k.pklz") parser.add_argument("--loss_type", type=str, default="bce") parser.add_argument("--model_type", type=str, default="mlp") parser.add_argument("--beta", type=int, default=1) parser.add_argument("--z_dim", type=int, default=64) parser.add_argument("--learning_rate", type=float, default=1e-4) parser.add_argument("--batch_size", type=int, default=100) args = parser.parse_args() with gzip.open(args.dataset, "rb") as f: images = pickle.load(f) np.random.seed(0) np.random.shuffle(images) val_split = int(images.shape[0] * 0.1) train_images = images[val_split:] val_images = images[:val_split] w, h = images.shape[1:3] print("train_images.shape", train_images.shape) print("val_images.shape", val_images.shape) print("") print("Training parameters:") for k, v, in vars(args).items(): print(f" {k}: {v}") print("") if args.model_name is None: args.model_name = "{}_{}_zdim{}_beta{}_{}".format(args.loss_type, args.model_type, args.z_dim, args.beta, os.path.splitext(os.path.basename(args.dataset))[0]) if args.loss_type == "bce": loss_fn = bce_loss elif args.loss_type == "bce_v2": loss_fn = bce_loss_v2 elif args.loss_type == "mse": loss_fn = mse_loss else: raise Exception("No loss function \"{}\"".format(args.loss_type)) if args.model_type == "cnn": VAEClass = ConvVAE elif args.model_type == "mlp": VAEClass = MlpVAE else: raise Exception("No model type \"{}\"".format(args.model_type)) vae = VAEClass(input_shape=(w, h, 1), z_dim=args.z_dim, beta=args.beta, learning_rate=args.learning_rate, loss_fn=loss_fn, model_name=args.model_name) vae.init_session() min_val_loss = float("inf") counter = 0 print("Training") while True: epoch = vae.get_step_idx() if (epoch + 1) % 10 == 0: print(f"Epoch {epoch + 1}") # Save model vae.save() # Calculate evaluation metrics val_loss, _ = vae.evaluate(val_images, args.batch_size) # Train one epoch vae.train_one_epoch(train_images, args.batch_size) # Early stopping if val_loss < min_val_loss: counter = 0 min_val_loss = val_loss else: counter += 1 if counter >= 10: print("No improvement in last 10 epochs, stopping") break
{"/vae/inspect_vae.py": ["/utils.py"], "/ddpg.py": ["/utils.py"], "/vae/collect_data.py": ["/utils.py"], "/train.py": ["/ddpg.py", "/vae/models.py", "/utils.py"]}
73,991
bitsauce/RoadFollowing-ddpg
refs/heads/master
/train.py
import os import random import shutil import time from collections import deque import cv2 import gym import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from skimage import transform from stable_baselines.ddpg.noise import OrnsteinUhlenbeckActionNoise from ddpg import DDPG from vae.models import ConvVAE, MlpVAE from replay_buffers import PrioritizedReplayBuffer from RoadFollowingEnv.car_racing import RoadFollowingEnv from utils import VideoRecorder, preprocess_frame def reward1(state): # -10 for driving off-track if state.num_contacts == 0: return -10 * 0.1 # + 1 x throttle reward = state.velocity * 0.001 #reward -= 0.01 return reward def create_encode_state_fn(model, with_measurements=False, stack=None): def encode_state(state): frame = preprocess_frame(state.frame) encoded_state = model.encode([frame])[0] if with_measurements: encoded_state = np.append(encoded_state, [state.throttle, state.steering, state.velocity / 30.0]) if isinstance(stack, int): s1 = np.array(encoded_state) if not hasattr(state, "stack"): state.stack = [np.zeros_like(encoded_state) for _ in range(stack)] state.stack_idx = 0 state.stack[state.stack_idx % stack] = s1 state.stack_idx += 1 concat_state = np.concatenate(state.stack) return concat_state return np.array(encoded_state) return encode_state def make_env(title=None, frame_skip=0, encode_state_fn=None): env = RoadFollowingEnv(title=title, encode_state_fn=encode_state_fn, reward_fn=reward1, throttle_scale=0.1, #steer_scale=0.25, max_speed=30, frame_skip=frame_skip) env.seed(0) return env def test_agent(test_env, model, record=False): # Init test env state, terminal, total_reward = test_env.reset(), False, 0 rendered_frame = test_env.render(mode="rgb_array") # Init video recording video_filename = os.path.join(model.video_dir, "episode{}.avi".format(model.get_episode_counter())) video_recorder = VideoRecorder(video_filename, frame_size=rendered_frame.shape) video_recorder.add_frame(rendered_frame) # While non-terminal state while not terminal: # Take deterministic actions at test time (noise_scale=0) action = model.predict([state], greedy=True)[0][0] state, reward, terminal, _ = test_env.step(action) # Add frame rendered_frame = test_env.render(mode="rgb_array") if video_recorder: video_recorder.add_frame(rendered_frame) total_reward += reward # Release video if video_recorder: video_recorder.release() return total_reward, test_env.reward def train(params, model_name, save_interval=10, eval_interval=10, record_eval=True, restart=False): # Load pre-trained variational autoencoder z_dim = 64 vae = ConvVAE(input_shape=(84, 84, 1), z_dim=z_dim, models_dir="vae", model_name="mse_cnn_zdim64_beta1_data10k", training=False) vae.init_session(init_logging=False) if not vae.load_latest_checkpoint(): raise Exception("Failed to load VAE") # State encoding fn with_measurements = False#True stack = None encode_state_fn = create_encode_state_fn(vae, with_measurements=with_measurements, stack=stack) # Create env print("Creating environment") env = make_env(model_name, frame_skip=0, encode_state_fn=encode_state_fn) test_env = make_env(model_name + " (Test)", encode_state_fn=encode_state_fn) # Traning parameters actor_lr = params["actor_lr"] critic_lr = params["critic_lr"] discount_factor = params["discount_factor"] polyak = params["polyak"] initial_std = params["initial_std"] grad_norm = params["grad_norm"] replay_size = params["replay_size"] replay_alpha = params["replay_alpha"] replay_beta = params["replay_beta"] batch_size = params["batch_size"] num_episodes = params["num_episodes"] train_steps_per_episode = params["train_steps_per_episode"] num_exploration_episodes = params["num_exploration_episodes"] print("") print("Training parameters:") for k, v, in params.items(): print(f" {k}: {v}") print("") # Environment constants input_shape = np.array([z_dim]) if with_measurements: input_shape[0] += 3 if stack is not None: input_shape[0] *= stack num_actions = env.action_space.shape[0] action_min = env.action_space.low action_max = env.action_space.high action_noise = OrnsteinUhlenbeckActionNoise(mean=np.array([0.0, 0.5]), sigma=initial_std) # Create model print("Creating model") model = DDPG(input_shape, env.action_space, action_noise, initial_actor_lr=actor_lr, initial_critic_lr=critic_lr, discount_factor=discount_factor, polyak=polyak, lr_decay=1.0, grad_norm=grad_norm, output_dir=os.path.join("models", model_name)) # Prompt to load existing model if any if not restart: if os.path.isdir(model.log_dir) and len(os.listdir(model.log_dir)) > 0: answer = input("Model \"{}\" already exists. Do you wish to continue (C) or restart training (R)? ".format(model_name)) if answer.upper() == "C": model.load_latest_checkpoint() elif answer.upper() == "R": shutil.rmtree(model.output_dir) for d in model.dirs: os.makedirs(d) else: raise Exception("There are already log files for model \"{}\". Please delete it or change model_name and try again".format(model_name)) else: shutil.rmtree(model.output_dir) for d in model.dirs: os.makedirs(d) model.init_logging() model.write_dict_to_summary("hyperparameters", params, 0) # Create replay buffer replay_buffer = PrioritizedReplayBuffer(replay_size, alpha=replay_alpha) #replay_buffer = ReplayBuffer(replay_size) episode_counter = -num_exploration_episodes # For every episode while episode_counter < num_episodes: print(f"Episode {episode_counter} (Step {model.get_predict_step_counter()})") # Save model periodically if episode_counter % save_interval == 0: model.save() # Run evaluation periodically if episode_counter % eval_interval == 0: eval_reward, eval_score = test_agent(test_env, model, record=record_eval) model.write_value_to_summary("eval/episodic/score", eval_score, episode_counter) model.write_value_to_summary("eval/episodic/reward", eval_reward, episode_counter) # Reset environment state, terminal_state, total_reward, total_q = env.reset(), False, 0, 0 # While episode not done while not terminal_state: if episode_counter < 0: action = np.array([-1.0+np.random.rand()*2.0, 0.7+np.random.rand()*0.3]) q_value = 0 else: # Sample action given state action, q_value = model.predict([state], greedy=False, write_to_summary=True) action, q_value = action[0], q_value[0] total_q += q_value env.value_label.text = "Q(s, a)={:.2f}".format(model.sess.run(model.Q_value_of_actor, feed_dict={model.input_states: [state]})[0]) # Perform action new_state, reward, terminal_state, _ = env.step(action) env.render() total_reward += reward # Store tranisition replay_buffer.add(state, action, reward, new_state, terminal_state) state = new_state # Train for one epoch over replay data print("Training...") n = train_steps_per_episode for i in range(n): if i % (train_steps_per_episode // 10) == 0: print("{}%".format(i / train_steps_per_episode * 100)) # Sample mini-batch randomly states, taken_actions, rewards, states_next, terminals, w, eid = replay_buffer.sample(batch_size, beta=replay_beta) assert states.shape == (batch_size, *input_shape) assert states_next.shape == (batch_size, *input_shape) assert taken_actions.shape == (batch_size, num_actions) assert rewards.shape == (batch_size,) assert terminals.shape == (batch_size,) # Optimize network deltas = model.train(states, taken_actions, rewards, states_next, terminals, w) replay_buffer.update_priorities(eid, np.abs(deltas) + 1e-6) # Write episodic values model.write_value_to_summary("train/episodic/score", env.reward, episode_counter) model.write_value_to_summary("train/episodic/reward", total_reward, episode_counter) model.write_value_to_summary("train/episodic/q_value", total_q, episode_counter) model.write_episodic_summaries(episode_counter) episode_counter += 1 if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Trains an agent in a the RoadFollowing environment") # Hyper parameters parser.add_argument("--actor_lr", type=float, default=1e-4) parser.add_argument("--critic_lr", type=float, default=1e-3) parser.add_argument("--discount_factor", type=float, default=0.9) parser.add_argument("--polyak", type=float, default=0.995) parser.add_argument("--initial_std", type=float, default=0.4) parser.add_argument("--grad_norm", type=float, default=5e-3) parser.add_argument("--replay_size", type=int, default=int(1e4)) parser.add_argument("--replay_alpha", type=float, default=0.6) parser.add_argument("--replay_beta", type=float, default=0.4) parser.add_argument("--batch_size", type=int, default=64) parser.add_argument("--num_episodes", type=int, default=200) parser.add_argument("--num_exploration_episodes", type=int, default=10) parser.add_argument("--train_steps_per_episode", type=int, default=500) # Training vars parser.add_argument("--model_name", type=str, required=True) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--save_interval", type=int, default=10) parser.add_argument("--eval_interval", type=int, default=10) parser.add_argument("--record_eval", type=bool, default=True) parser.add_argument("-restart", action="store_true") params = vars(parser.parse_args()) # Remove non-hyperparameters model_name = params["model_name"]; del params["model_name"] seed = params["seed"]; del params["seed"] save_interval = params["save_interval"]; del params["save_interval"] eval_interval = params["eval_interval"]; del params["eval_interval"] record_eval = params["record_eval"]; del params["record_eval"] restart = params["restart"]; del params["restart"] # Reset tf and set seed tf.reset_default_graph() if isinstance(seed, int): tf.random.set_random_seed(seed) np.random.seed(seed) random.seed(0) # Call main func train(params, model_name, save_interval=save_interval, eval_interval=eval_interval, record_eval=record_eval, restart=restart)
{"/vae/inspect_vae.py": ["/utils.py"], "/ddpg.py": ["/utils.py"], "/vae/collect_data.py": ["/utils.py"], "/train.py": ["/ddpg.py", "/vae/models.py", "/utils.py"]}
73,995
SanchitMinocha/Experimental-Multi-sensor-Reservoir-Area-Estimation
refs/heads/main
/utility/config.py
# Required credentials for Texas Water Area Calculator App service_account = "...... Add your service account here ......" key_file= 'Path to private json key file'
{"/Multi-sensor-reservoir-area-estimation.py": ["/utility/config.py"]}
73,996
SanchitMinocha/Experimental-Multi-sensor-Reservoir-Area-Estimation
refs/heads/main
/Multi-sensor-reservoir-area-estimation.py
#!/usr/bin/env python # coding: utf-8 import ee # import folium # import geehydro from datetime import datetime as dt # from IPython.display import Image from scipy import stats import pandas as pd import time # import matplotlib.pyplot as plt import numpy as np import geopandas as gpd import plotly.graph_objs as go import plotly import fiona fiona.drvsupport.supported_drivers['KML'] = 'rw' # initialize the connection to the server from utility.config import service_account,key_file credentials = ee.ServiceAccountCredentials(service_account, key_file) ee.Initialize(credentials) ########################### General helpful functions ##################### # Mapdisplay function is taken directly from free course EEwPython for visualisation purpose # def Mapdisplay(center, dicc, Tiles="OpensTreetMap",zoom_start=10): # ''' # :param center: Center of the map (Latitude and Longitude). # :param dicc: Earth Engine Geometries or Tiles dictionary # :param Tiles: Mapbox Bright,Mapbox Control Room,Stamen Terrain,Stamen Toner,stamenwatercolor,cartodbpositron. # :zoom_start: Initial zoom level for the map. # :return: A folium.Map object. # ''' # mapViz = folium.Map(location=center,tiles=Tiles, zoom_start=zoom_start) # for k,v in dicc.items(): # if ee.image.Image in [type(x) for x in v.values()]: # folium.TileLayer( # tiles = v["tile_fetcher"].url_format, # attr = 'Google Earth Engine', # overlay =True, # name = k # ).add_to(mapViz) # else: # folium.GeoJson( # data = v, # name = k # ).add_to(mapViz) # mapViz.add_child(folium.LayerControl()) # return mapViz # Coverts a polygon geometry object to earth engine feature def poly2feature(polygon,shp_file_flag): if(polygon.type=='MultiPolygon'): all_cords=[] for poly in polygon.geoms: x,y = poly.exterior.coords.xy all_cords.append(np.dstack((x,y)).tolist()) if(shp_file_flag): g=ee.Geometry.MultiPolygon(all_cords).buffer(1000) #buffer for shape file else: g=ee.Geometry.MultiPolygon(all_cords)#.buffer(2500) in meters # no buffer for kml file because the polygons are already made with buffer else: x,y = polygon.exterior.coords.xy cords = np.dstack((x,y)).tolist() if(shp_file_flag): g=ee.Geometry.Polygon(cords).buffer(1000) #buffer for shape file else: g=ee.Geometry.Polygon(cords)#.buffer(2500) in meters # no buffer for kml file because the polygons are already made with buffer feature = ee.Feature(g) return feature # Function to convert feature collection to dictionary. def fc_2_dict(fc): prop_names = fc.first().propertyNames() prop_lists = fc.reduceColumns( reducer=ee.Reducer.toList().repeat(prop_names.size()), selectors=prop_names).get('list') return ee.Dictionary.fromLists(prop_names, prop_lists) # Prepares a mosaic using image collection of one day def day_mosaic(date,imcol,satellite): d = ee.Date(date) img=imcol.first() im = imcol.filterDate(d, d.advance(1, "day")).mosaic() if(satellite=='landsat-08'): sun_azimuth=img.get('SUN_AZIMUTH') sun_altitude=img.get('SUN_ELEVATION') elif(satellite=='sentinel-2'): sun_azimuth=img.get('MEAN_SOLAR_AZIMUTH_ANGLE') sun_altitude=ee.Number(90).subtract(img.get('MEAN_SOLAR_ZENITH_ANGLE')) else: return im.set("system:time_start", d.millis(),"DATE_ACQUIRED", d.format("YYYY-MM-dd")) return im.set("system:time_start", d.millis(),"DATE_ACQUIRED", d.format("YYYY-MM-dd"), "SUN_AZIMUTH",sun_azimuth,"SUN_ALTITUDE",sun_altitude) # Prepares an image collection of mosaics by date def mosaicByDate(imcol,satellite): # imcol: An image collection # returns: An image collection # Converting collection to list imlist = imcol.toList(imcol.size()) # Extracting unique dates in the collection unique_dates = imlist.map(lambda image: ee.Image(image).date().format("YYYY-MM-dd")).distinct() mosaic_imlist = unique_dates.map(lambda date: day_mosaic(date,imcol,satellite)) return ee.ImageCollection(mosaic_imlist) ##################### Methods to compute water area for landsat/Sentinel2 ######################### ################## Helpful functions ofr methods computing water area ################ # Function to rename bands def renameBands_Landsat8(x,product): if(product=='SR'): bands = ['SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7', 'QA_PIXEL'] elif(product=='TOA'): bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'BQA'] elif(product=='S2_SR'): bands = ['L_B', 'L_G', 'L_R', 'L_NIR', 'L_SWIR1', 'L_SWIR2', 'qa_pixel'] new_bands = ['B', 'G', 'R', 'NIR', 'SWIR1', 'SWIR2', 'qa_pixel'] return x.select(bands).rename(new_bands) # Function to create cloud mask def createCloudAndShadowBand(x,product): qa = x.select('qa_pixel'); if(product=='SR'): cloudbitnumber=2**3+2**8+2**9 cloudshadowbitnumber=2**4+2**10+2**11 elif(product=='TOA'): cloudbitnumber=2**4+2**5+2**6 cloudshadowbitnumber=2**7+2**8 elif(product=='S2_SR'): cloudbitnumber=2**10 cloudshadowbitnumber=2**11 #cirrus bit number, to keep similarity with TOA and SR products,it is named cloud shadow bit cloudBitMask = ee.Number(cloudbitnumber).int(); cloudShadowBitMask = ee.Number(cloudshadowbitnumber).int(); cloud = qa.bitwiseAnd(cloudBitMask).eq(cloudbitnumber); cloudShadow = qa.bitwiseAnd(cloudShadowBitMask).eq(cloudshadowbitnumber); mask = (ee.Image(0).where(cloud.eq(1), 1) .where(cloudShadow.eq(1), 1) .rename('cloud_mask')); return x.addBands(mask) ########################### Helpful functions for DSWE ################ # INDICES def calc_mndwi(image): mndwi = ee.Image(0).expression( '((g - swir1)/(g + swir1)) * 10000', { 'g': image.select("G"), 'swir1': image.select("SWIR1") }) return mndwi.toInt16().rename("MNDWI") def calc_mbsr(image): mbsr = ee.Image(0).expression( '(g + r) - (nir + swir1)', { 'g': image.select("G"), 'r': image.select("R"), 'nir': image.select("NIR"), 'swir1': image.select("SWIR1") }) return mbsr.toInt16().rename("MBSR") def calc_ndvi(image): ndvi = ee.Image(0).expression( '((nir - r)/(nir + r)) * 10000', { 'nir': image.select("NIR"), 'r': image.select("R") }) return ndvi.toInt16().rename("NDVI") def calc_awesh(image): awesh = ee.Image(0).expression( 'blue + A*g - B*(nir+swir1) - C*swir2', { 'blue': image.select('B'), 'g': image.select('G'), 'nir': image.select('NIR'), 'swir1': image.select('SWIR1'), 'swir2': image.select('SWIR2'), 'A': 2.5, 'B': 1.5, 'C': 0.25 }) return awesh.toInt16().rename("AWESH") # wrapper def calc_indices(image): bands = ee.Image([ calc_mndwi(image), calc_mbsr(image), calc_ndvi(image), calc_awesh(image), image.select("B"), image.select("NIR"), image.select("SWIR1"), image.select("SWIR2"), image.select("cloud_mask") ]) return bands.set('system:time_start', image.get('system:time_start')) # DSWE test functions def test1(image): return image.select("MNDWI").gt(124) ## wigt - default value 0.0124 * 10000 def test2(image): return image.select("MBSR").gt(0) ## mbsrv>mbsrn -> mbsr=mbsrv-mbsrn>0 def test3(image): return image.select("AWESH").gt(0) ## awgt - default value 0 def test4(image): x = (image.select("MNDWI").gt(-5000) ## pswt_1_mndwi - default value -0.044 * 10000 .add(image.select("SWIR1").lt(900)) ## pswt_1_swir1 - default value 900 .add(image.select("NIR").lt(1500)) ## pswt_1_nir - default value 1500 .add(image.select("NDVI").lt(7000)) ## pswt_1_ndvi - default value 0.7 * 10000 ) return x.eq(4) def test5(image): x = (image.select("MNDWI").gt(-5000) ## pswt_2_mndwi - default value -0.5 * 10000 .add(image.select("B").lt(1000)) ## pswt_2_blue - default value 1000 .add(image.select("NIR").lt(2500)) ## pswt_2_nir - default value 2500 .add(image.select("SWIR1").lt(3000)) ## pswt_2_swir1 - default value .add(image.select("SWIR2").lt(1000)) ) return x.eq(5) def cloudTest(image): return image.select('cloud_mask').eq(1) # wrapper/multiplier function def addTests(image): x1 = test1(image) x2 = test2(image).multiply(10); x3 = test3(image).multiply(100); x4 = test4(image).multiply(1000); x5 = test5(image).multiply(10000); cld = cloudTest(image); res = x1.add(x2).add(x3).add(x4).add(x5).rename('test') .where(cld.eq(1), -1) .set('system:time_start', image.get('system:time_start')); return res # DSWE CLASSES def isDSWE0(image): y1 = image.lte(10).add(image.gte(0)).eq(2) y2 = image.eq(100).add(image.eq(1000)).eq(1) y = y1.add(y2).gt(0) .rename("DSWE0") .set('system:time_start', image.get('system:time_start')) return y def isDSWE1(image): y1 = image.gte(11101).add(image.lte(11111)).eq(2) y2 = image.eq(1111).add(image.eq(10111)).add(image.eq(11011)).eq(1) y = y1.add(y2).gt(0) .rename("DSWE1") .set('system:time_start', image.get('system:time_start')) return y def isDSWE2(image): y1 = image.eq(111).add(image.eq(1011)).add(image.eq(1101)).add(image.eq(1110)).add(image.eq(10011)) .add(image.eq(10101)).add(image.eq(10110)).add(image.eq(11001)).add(image.eq(11010)).add(image.eq(11100)).eq(1) y = y1.gt(0) .rename("DSWE2") .set('system:time_start', image.get('system:time_start')) return y def isDSWE3(image): y = image.eq(11000) .rename("DSWE3") .set('system:time_start', image.get('system:time_start')) return y def isDSWE4(image): y1 = image.eq(11).add(image.eq(101)).add(image.eq(110)).add(image.eq(1001)).add(image.eq(1010)) .add(image.eq(1100)).add(image.eq(10000)).add(image.eq(10001)).add(image.eq(10010)).add(image.eq(10100)).eq(1) y = y1.gt(0) .rename("DSWE4") .set('system:time_start', image.get('system:time_start')) return y def isDSWE9(image): y = image.eq(-1) .rename("DSWE9") .set('system:time_start', image.get('system:time_start')) return y #################### DSWE Function ######################### def dswe(image,product): ''' DSWE ==== Apply DSWE algorithm to a single image Arguments: ---------- image: ee.Image object (must be Landsat-8 SR Collection-2 TR-1 product) product: 'SR','TOA' or 'S2_SR' ''' # Reading a DEM dem=ee.Image('CGIAR/SRTM90_V4').select('elevation') # Add Hill shade sun_altitude=image.get('SUN_ALTITUDE') sun_azimuth=image.get('SUN_AZIMUTH') image=image.addBands(ee.Terrain.hillshade(dem,sun_azimuth,sun_altitude).rename('hillshade')) # Calculating slope slope=ee.Terrain.slope(dem).rename('slope') # Add cloud mask dnd rename bands img = createCloudAndShadowBand(renameBands_Landsat8(image,product),product) # Calculate indices indices = calc_indices(img) # Perform comparisons of various indices with thresholds and outputs the result of each test in a bit tests = addTests(indices) # Classify pixels into different classes to create interpreted dswe band dswe = ee.Image(-1) .where(isDSWE0(tests), 0) .where(isDSWE1(tests), 1) .where(isDSWE2(tests), 2) .where(isDSWE3(tests), 3) .where(isDSWE4(tests), 4) .where(isDSWE9(tests), 9) .updateMask(img.select('qa_pixel').mask()) .rename("DSWE") # Classifying pixels having hill shade less than equal to 110 as not water(0) dswe=dswe.where(image.select('hillshade').lte(110),0) # Classifying pixels using interpreted DSWE and slope dswe=dswe.where((dswe.eq(4) and slope.gte(5.71)).Or # 10% slope = 5.71° (dswe.eq(3) and slope.gte(11.31)).Or # 20% slope = 11.31° (dswe.eq(2) and slope.gte(16.7)).Or # 30% slope = 16.7° (dswe.eq(1) and slope.gte(16.7)), 0); # 30% slope = 16.7° return dswe ########################### MNDWI Function #################### def mndwi(image,product): if(product=='SR'): img = createCloudAndShadowBand(renameBands_Landsat8(image,product),product) elif(product=='S2_SR'): img = createCloudAndShadowBand(image,product) mndwi = img.normalizedDifference(['G', 'SWIR1']).rename("MNDWI") return mndwi.addBands(img.select('cloud_mask')) ########################## NDWI Function #################### def ndwi(image,product): if(product=='SR'): img = createCloudAndShadowBand(renameBands_Landsat8(image,product),product) elif(product=='S2_SR'): img = createCloudAndShadowBand(image,product) ndwi = img.normalizedDifference(['G', 'NIR']).rename("NDWI") return ndwi.addBands(img.select('cloud_mask')) ################# Landsat-8 function ################### def landsat_water_area_calculation_in_image(inp_image,method,clip_feature=None,product='SR',image_return=False): # Clipping image if(clip_feature): pro_image=inp_image.clip(clip_feature) else: pro_image=inp_image # Calculating method if(method=='NDWI'): pro_image=ndwi(pro_image,product) # Removing land pixels based on ndwi<=-0.05 water_classified=pro_image.select('NDWI').gt(0) cloud_classified=pro_image.select('cloud_mask').eq(1) elif(method=='MNDWI'): pro_image=mndwi(pro_image,product) # Removing land pixels based on mndwi<=0 water_classified=pro_image.select('MNDWI').gt(0) cloud_classified=pro_image.select('cloud_mask').eq(1) elif(method=='DSWE'): pro_image=dswe(pro_image,product) # Removing land pixels based on dswe=0 & 9 water_classified=pro_image.gt(0).add(pro_image.lte(4)).eq(2) cloud_classified=pro_image.eq(9) # Returning water image if yes if(image_return): if(method=='DSWE'): return pro_image.where(water_classified.eq(1),1).where(water_classified.eq(0),0).updateMask(pro_image.neq(9)) else: return pro_image.select(method).where(water_classified.eq(1),1).where(water_classified.eq(0),0).updateMask( pro_image.select('cloud_mask').neq(1)) # Total pixels total_pixels=ee.Number(pro_image.select(method).reduceRegion(reducer=ee.Reducer.count(),geometry=clip_feature.geometry(), scale=30).get(method)) # Counting cloud pixels cloud_image=pro_image.select(method).updateMask(cloud_classified) cloud_pixels=ee.Number(cloud_image.reduceRegion(reducer=ee.Reducer.count(),geometry=clip_feature.geometry(), scale=30).get(method)) # Counting water pixels and calculating area water_image=pro_image.select(method).updateMask(water_classified) water_pixels=ee.Number(water_image.reduceRegion(reducer=ee.Reducer.count(),geometry=clip_feature.geometry(), scale=30).get(method)) return ee.Feature(None,{'Satellite':'Landsat-08_'+method, 'Date':inp_image.get('DATE_ACQUIRED'), 'Water Area':water_pixels.multiply(30).multiply(30).divide(1000000), 'Total Area':total_pixels.multiply(30).multiply(30).divide(1000000), 'Cloud Percent Area':cloud_pixels.divide(total_pixels).multiply(100)}) ############### Sentinel-1 Functions ################# # Filters speckle noise def Specklefilter(image): vv = image.select('VV') #select the VV polarization band vv_smoothed = vv.focal_median(30,'square','meters').rename('VV_Filtered') #Apply a focal median filter return image.addBands(vv_smoothed) #Add filtered VV band to original image # Reservoir Water area calculation for sentinel-1 data def sentinel1_water_area_calculation_in_image(inp_image,clip_feature=None,image_return=False): # Speckle filter pro_image=Specklefilter(inp_image) # clipping image if required if(clip_feature): pro_image=pro_image.clip(clip_feature) pro_image=pro_image.select('VV_Filtered') # Removing land pixels based on VV>=-13 dB water_classified=pro_image.lt(-13) # Returning image if yes if(image_return): return pro_image.where(water_classified.eq(1),1).where(water_classified.eq(0),0) # Counting total pixels, water pixels and calculating area # Total pixels total_pixels=ee.Number(pro_image.reduceRegion(reducer=ee.Reducer.count(), geometry=clip_feature.geometry(),scale=30).get('VV_Filtered')) # Water pixels pro_image=pro_image.updateMask(water_classified) water_pixels=ee.Number(pro_image.reduceRegion(reducer=ee.Reducer.count(), geometry=clip_feature.geometry(),scale=30).get('VV_Filtered')) return ee.Feature(None,{'Satellite':'Sentinel-1', 'Product': 'S1_GRD', 'Date':inp_image.get('DATE_ACQUIRED'), 'Water Area':water_pixels.multiply(30).multiply(30).divide(1000000), 'Total Area':total_pixels.multiply(30).multiply(30).divide(1000000), 'Cloud Percent Area': ee.Number(0)}) ################## Sentinel-2 Functions ############### # Having a uniform resolution in sentinel image def sentinel2_refineresolution(image,band,scale): band_img=image.select(band) image=image.resample('bilinear').reproject(**{ 'crs': band_img.projection().crs(), 'scale': scale }); return image # Renaming bands def renameBands_sentinel2(x,product): if(product=='S2_SR'): bands = ['B2', 'B3', 'B4', 'B8', 'B11', 'B12', 'QA60'] new_bands = ['B', 'G', 'R', 'NIR', 'SWIR1', 'SWIR2', 'qa_pixel'] return x.select(bands).rename(new_bands) # Transforming Sentinel-2 to Landsat-8 def sentinel2_to_landsat8(image,product): image=renameBands_sentinel2(image,product) # Linear transformations landsat_b=image.select('B').multiply(0.9570).add(0.0003).rename('L_B') landsat_g=image.select('G').multiply(1.0304).add(0.0015).rename('L_G') landsat_r=image.select('R').multiply(0.9533).add(0.0041).rename('L_R') landsat_nir=image.select('NIR').multiply(1.0157).add(0.0139).rename('L_NIR') landsat_swir1=image.select('SWIR1').multiply(0.9522).add(0.0034).rename('L_SWIR1') landsat_swir2=image.select('SWIR2').multiply(0.9711).add(0.0004).rename('L_SWIR2') return image.addBands([landsat_b,landsat_g,landsat_r,landsat_nir,landsat_swir1,landsat_swir2]) # Water area calculation in sentinel-2 image def sentinel2_water_area_calculation_in_image(inp_image,method,clip_feature=None,product='S2_SR',image_return=False): # Clipping image if(clip_feature): pro_image=inp_image.clip(clip_feature) else: pro_image=inp_image # Sentinel2 to Landsat8 pro_image = sentinel2_to_landsat8(pro_image,product) # Calculating method if(method=='NDWI'): pro_image=ndwi(pro_image,product) # Removing land pixels based on ndwi<=0 water_classified=pro_image.select('NDWI').gt(0) cloud_classified=pro_image.select('cloud_mask').eq(1) elif(method=='MNDWI'): pro_image=mndwi(pro_image,product) # Removing land pixels based on mndwi<=0 water_classified=pro_image.select('MNDWI').gt(0) cloud_classified=pro_image.select('cloud_mask').eq(1) elif(method=='DSWE'): pro_image=dswe(pro_image,product) # Removing land pixels based on dswe=0 & 9 water_classified=pro_image.gt(0).add(pro_image.lte(4)).eq(2) cloud_classified=pro_image.eq(9) # Returning water image if yes if(image_return): if(method=='DSWE'): return pro_image.where(water_classified.eq(1),1).where(water_classified.eq(0),0).updateMask(pro_image.neq(9)) else: return pro_image.select(method).where(water_classified.eq(1),1).where(water_classified.eq(0),0).updateMask( pro_image.select('cloud_mask').neq(1)) # Total pixels total_pixels=ee.Number(pro_image.select(method).reduceRegion(reducer=ee.Reducer.count(),geometry=clip_feature.geometry(), scale=60).get(method)) # Counting cloud pixels cloud_image=pro_image.select(method).updateMask(cloud_classified) cloud_pixels=ee.Number(cloud_image.reduceRegion(reducer=ee.Reducer.count(),geometry=clip_feature.geometry(), scale=60).get(method)) # Counting water pixels and calculating area water_image=pro_image.select(method).updateMask(water_classified) water_pixels=ee.Number(water_image.reduceRegion(reducer=ee.Reducer.count(),geometry=clip_feature.geometry(), scale=60).get(method)) return ee.Feature(None,{'Satellite':'Sentinel-2_'+method, 'Date':inp_image.get('DATE_ACQUIRED'), 'Water Area':water_pixels.multiply(60).multiply(60).divide(1000000), 'Total Area':total_pixels.multiply(60).multiply(60).divide(1000000), 'Cloud Percent Area':cloud_pixels.divide(total_pixels).multiply(100)}) ###################################### Main Script ############################ ########################## Local computation ################################# def calculate_time_series(reservoir, start_date, end_date, cloud_cover_percent=20): ############# User Input ############## reservoir_name=reservoir inp_start_date=start_date inp_end_date=end_date unique_str=reservoir_name+'_'+inp_start_date+'_'+inp_end_date sentinel_1=True sentinel_2=True landsat_08=True products=['SR'] # used for Landsat-08 methods=['DSWE','NDWI','MNDWI'] # used for Sentinel-2 and Landsat-08 cloud_percent=cloud_cover_percent #used for Landsat-08 and Sentinel-2 ############# Reading Reservoir shapefiles and boundary polygons ########### print('Reading reservoir data ......') # Filepath to Unmonitored Reservoirs polygon KML file res_poly_file = "reservoir_data/Unmonitored_texas_reservoirs.kml" # Filepath to Major Reservoirs shapefile res_shp_file = "reservoir_data/Major_texas_reservoirs.kml" # Reading shapefile and verify if reservoir data is there from_shp_file=True #flag for reservoir from shp file or kml file poly_reservoirs=gpd.read_file(res_shp_file, driver='KML') name_column='Name' if (~(poly_reservoirs['Name'].str.contains(reservoir_name).any())): # Reading data from kml file if reservoir data is not in shp file poly_reservoirs=gpd.read_file(res_poly_file, driver='KML') from_shp_file=False name_column='Name' # Calculating reservoir centeroid for lat-lon poly_reservoirs['Center_point'] = poly_reservoirs['geometry'].to_crs('+proj=cea').centroid.to_crs( poly_reservoirs['geometry'].crs) # Extracting reservoir information reservoir_data=poly_reservoirs[poly_reservoirs[name_column]==reservoir_name].reset_index(drop=True) res_lat=reservoir_data['Center_point'].y[0] res_lon=reservoir_data['Center_point'].x[0] res_bbox=reservoir_data['geometry'].bounds ######################### Earth Engine Computation #################### print('Calculating Water Area time series ......') clipping_feature=poly2feature(reservoir_data.geometry[0],from_shp_file) # setting the Area of Interest (AOI) Reservoir_AOI = ee.Geometry.Rectangle(res_bbox.values.reshape(-1).tolist()) water_area_list=ee.List([]) cloud_filter=[ee.Filter.gt('Cloud Percent Area',0),ee.Filter.lte('Cloud Percent Area',cloud_percent)] if(landsat_08): for product in products: if(product=='SR'): landsat_collection_id="LANDSAT/LC08/C02/T1_L2" elif(product=='TOA'): landsat_collection_id="LANDSAT/LC08/C01/T1_TOA" # filter area and dates landsat_AOI = ee.ImageCollection(landsat_collection_id).filterBounds(Reservoir_AOI).filterDate(inp_start_date,inp_end_date) # make a mosaic if needed landsat_AOI_mosaic = mosaicByDate(landsat_AOI,'landsat-08') for method in methods: # water area calculation landsat_water_area_collection=landsat_AOI_mosaic.map(lambda image: landsat_water_area_calculation_in_image(image,method,clipping_feature,product)) # Adding water area collection to final list water_area_list=water_area_list.add(landsat_water_area_collection) #temp_df=pd.DataFrame(fc_2_dict(landsat_water_area_collection.filter(cloud_filter)).getInfo()) #result_df=result_df.append(temp_df) print('Calculated data for Landsat-08.') if(sentinel_1): # filter area and dates sentinel_1_AOI = ee.ImageCollection('COPERNICUS/S1_GRD').filterBounds(Reservoir_AOI).filterDate(inp_start_date, inp_end_date) # make a mosaic if needed sentinel_1_AOI_mosaic=mosaicByDate(sentinel_1_AOI,'sentinel-1') # water area calculation sentinel_1_water_area_collection=sentinel_1_AOI_mosaic.map(lambda image: sentinel1_water_area_calculation_in_image(image, clipping_feature)) # Adding water area collection to final list water_area_list=water_area_list.add(sentinel_1_water_area_collection) #temp_df=pd.DataFrame(fc_2_dict(sentinel_1_water_area_collection.filter(cloud_filter)).getInfo()) #result_df=result_df.append(temp_df) print('Calculated data for Sentinel-1.') if(sentinel_2): # filter area and dates sentinel_2_AOI = ee.ImageCollection('COPERNICUS/S2_SR').filterBounds(Reservoir_AOI).filterDate(inp_start_date, inp_end_date) #Refining resolution sentinel_2_AOI = sentinel_2_AOI.map(lambda image : sentinel2_refineresolution(image, 'B1', 60)) # make a mosaic if needed sentinel_2_AOI_mosaic=mosaicByDate(sentinel_2_AOI,'sentinel-2') for method in methods: # water area calculation sentinel2_water_area_collection=sentinel_2_AOI_mosaic.map(lambda image: sentinel2_water_area_calculation_in_image(image,method,clipping_feature)) # Adding water area collection to final list water_area_list=water_area_list.add(sentinel2_water_area_collection) #temp_df=pd.DataFrame(fc_2_dict(sentinel2_water_area_collection.filter(cloud_filter)).getInfo()) #result_df=result_df.append(temp_df) print('Calculated data for Sentinel-2.') # Merging/Flattening all water area collections resulting_water_area_collection=ee.FeatureCollection.flatten(ee.FeatureCollection(water_area_list)) # Converting the feature collection to dataframe print('Exporting Water Area Data to Pandas Dataframe ....') result_df=pd.DataFrame(fc_2_dict(resulting_water_area_collection.filter(cloud_filter)).getInfo()) if(len(result_df)>3): result_df['Date']=pd.to_datetime(result_df['Date']) result_df['Week']=result_df['Date'].dt.isocalendar().week result_df['Year']=result_df['Date'].dt.year #result_df=result_df[result_df['Cloud Percent Area']<=cloud_percent][result_df['Water Area']>0] result_df['Water Area Z']=stats.zscore(result_df['Water Area']) filtered_df=result_df[abs(result_df['Water Area Z'])<=1.2] water_ts_df=filtered_df.groupby(['Year','Week']).agg({'Water Area':['mean','min','max','count'],'Date':['first']}) water_ts_df = water_ts_df.reset_index(level=['Year',"Week"]) water_ts_df.columns = [f'{i}_{j}' if j != '' else f'{i}' for i,j in water_ts_df.columns] water_ts_df=water_ts_df.sort_values(by='Date_first') fig = go.Figure([ go.Scatter( name='Water Area', x=water_ts_df['Date_first'], y=water_ts_df['Water Area_mean'], mode='lines', line=dict(color='rgb(31, 119, 180)'), ), go.Scatter( name='Upper Bound', x=water_ts_df['Date_first'], y=water_ts_df['Water Area_max'], mode='lines', marker=dict(color="#444"), line=dict(width=0), showlegend=False ), go.Scatter( name='Lower Bound', x=water_ts_df['Date_first'], y=water_ts_df['Water Area_min'], marker=dict(color="#444"), line=dict(width=0), mode='lines', fillcolor='rgba(10, 15, 75, 0.2)', fill='tonexty', showlegend=False ) ]) fig.update_layout( yaxis_title='Water Area (Km<sup>2</sup>)', xaxis_title='Date', title='Water Area time series for '+reservoir_name, title_font={'color':'red', 'size':20}, hovermode="x", #yaxis_range=(1.5,4.5), showlegend=False ) fig.update_xaxes(rangeslider_visible=True) #fig.show() ######################## Exporting Results from Earth Engine to Drive ############### # # Export the Water Area FeatureCollection to a CSV file. # print('Exporting Water Area CSV file ......') # task = ee.batch.Export.table.toDrive(**{ # 'collection': resulting_water_area_collection, # 'description': unique_str, # 'fileFormat': 'CSV' # }) # task.start() # while task.active(): # print('Polling for task (id: {}).'.format(task.id)) # time.sleep(5) # print('Exported successfully!') return (plotly.offline.plot(fig,include_plotlyjs=False,output_type='div'),[res_lon,res_lat]) else: return ('<div> There is not enough data to create time series. Please select a longer time period for time series plot. <div>') #print(calculate_time_series('Cox Lake / Raw Water Lake / Recycle Lake','2018-01-01','2020-12-31',20)) #test case
{"/Multi-sensor-reservoir-area-estimation.py": ["/utility/config.py"]}
74,010
paloomers/ammp
refs/heads/main
/crop_img.py
import cv2 import math import numpy as np # def main(): # img = cv2.imread("basketballdude.png") # crop_img = crop_around_point(img, 100, 100, 300, 300) # cv2.imshow("cropped", crop_img) # cv2.waitKey(0) # x,y is top left, box_width/height is box shape4 def crop_around_bounding_box(to_crop, x, y, box_width, box_height, output_width, output_height): # calculate center of box center_x = math.ceil(x + box_width/2) center_y = math.ceil(y + box_height/2) # calculate edges start_y = math.floor(center_y - output_height / 2) end_y = math.floor(center_y + output_height / 2) start_x = math.floor(center_x - output_width / 2) end_x = math.floor(center_x + output_width / 2) # if around any edges, fill in w/ black if start_x < 0 or end_x > to_crop.shape[1] or start_y < 0 or end_y > to_crop.shape[0]: pad_left_x = 0 - start_x if(pad_left_x < 0): pad_left_x = 0 pad_right_x = end_x - to_crop.shape[1] if(pad_right_x < 0): pad_right_x = 0 pad_left_y = 0 - start_y if(pad_left_y < 0): pad_left_y = 0 pad_right_y = end_y - to_crop.shape[0] if(pad_right_y < 0): pad_right_y = 0 padded = np.pad(to_crop, ((pad_left_y,pad_right_y),(pad_left_x, pad_right_x), (0,0))) return padded[start_y + pad_left_y:end_y + pad_left_y, start_x + pad_left_x:end_x + pad_left_x].copy() return to_crop[start_y:end_y, start_x:end_x].copy() def crop_around_point(to_crop, center_x, center_y, output_width, output_height): start_y = math.floor(center_y - output_height / 2) end_y = math.floor(center_y + output_height / 2) start_x = math.floor(center_x - output_width / 2) end_x = math.floor(center_x + output_width / 2) # if around any edges, fill in w/ black if start_x < 0 or end_x > to_crop.shape[1] or start_y < 0 or end_y > to_crop.shape[0]: pad_left_x = 0 - start_x if(pad_left_x < 0): pad_left_x = 0 pad_right_x = end_x - to_crop.shape[1] if(pad_right_x < 0): pad_right_x = 0 pad_left_y = 0 - start_y if(pad_left_y < 0): pad_left_y = 0 pad_right_y = end_y - to_crop.shape[0] if(pad_right_y < 0): pad_right_y = 0 padded = np.pad(to_crop, ((pad_left_y,pad_right_y),(pad_left_x, pad_right_x), (0,0))) return padded[start_y + pad_left_y:end_y + pad_left_y, start_x + pad_left_x:end_x + pad_left_x].copy() return to_crop[start_y:end_y, start_x:end_x].copy() # if __name__ == '__main__': # main()
{"/main.py": ["/stabilize.py", "/optical.py"], "/optical.py": ["/my_optical.py", "/crop_img.py"], "/stabilize.py": ["/crop_img.py"]}
74,011
paloomers/ammp
refs/heads/main
/main.py
import argparse import os from skimage import io import numpy as np import cv2 import stabilize import optical import cascade import code import time # def parse_args(): # """ Perform command-line argument parsing. """ # parser = argparse.ArgumentParser( # description="Project 6 Final Project!") # parser.add_argument( # '--video', # required=True, # lp='Which video to use') # parser.add_argument( # '--cascade', # required=True, # help='Cascade used to find object') # return parser.parse_args() def record_video(cam_number): print("Recording Video From Camera") recorded_video_name = "./videos/recorded.mp4" # video length in seconds video_length = 3 cap = cv2.VideoCapture(cam_number) # Changing camera capture resolution from default cap.set(cv2.CAP_PROP_FRAME_WIDTH,1280) cap.set(cv2.CAP_PROP_FRAME_HEIGHT,720) cam_width = int(cap.get(3)) cam_height = int(cap.get(4)) frame_rate = cap.get(5) # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(recorded_video_name,fourcc,frame_rate,(cam_width,cam_height)) video_started = False max_num_frames = video_length * int(frame_rate) num_frames_captured = 0 while(cap.isOpened()): ret, frame = cap.read() if ret==True: cv2.namedWindow('camera-input', cv2.WINDOW_NORMAL) cv2.imshow('camera-input', frame) if (cv2.waitKey(1) & 0xFF == ord('s')) and (not video_started): print("Recording Started") video_started = True if(video_started): if (num_frames_captured < max_num_frames): # write the frame out.write(frame) num_frames_captured += 1 else: break else: break # Release everything if job is finished cap.release() out.release() cv2.destroyAllWindows() return recorded_video_name def main(): # args = parse_args() # imagePath = args.video casPath = "haarcascade_frontalface_default.xml" # casPath = "./gatheringCascade/cascadecopy.xml" # INPUT_FILE_NAME = "./videos/v1-airpod.mp4" INPUT_FILE_NAME = "0" # Use Integers for Camera (ex. Webcam) OUTPUT_FILE_NAME = "output.avi" # Scale for size of output video relative to input video output_scale = 0.7 # method = "optical" method = "cascade" # Check if INPUT_FILE_NAME is int try: camera_number = int(INPUT_FILE_NAME) # Record Webcam Video # Press 's' to start recording INPUT_FILE_NAME = record_video(camera_number) except ValueError: INPUT_FILE_NAME = INPUT_FILE_NAME if (method == "optical"): # Plays Input Video # optical.play_video(INPUT_FILE_NAME) # Processes Video, and Generates Output Video optical.process_video(INPUT_FILE_NAME,OUTPUT_FILE_NAME,output_scale) # Plays Output Video optical.play_video(OUTPUT_FILE_NAME) elif (method == "cascade"): # Plays Input Video cascade.play_video(INPUT_FILE_NAME) # Processes Video, and Generates Output Video cascade.process_video(INPUT_FILE_NAME,OUTPUT_FILE_NAME,casPath,output_scale) # Plays Output Video cascade.play_video(OUTPUT_FILE_NAME) else: print("method not supported") if __name__ == '__main__': main()
{"/main.py": ["/stabilize.py", "/optical.py"], "/optical.py": ["/my_optical.py", "/crop_img.py"], "/stabilize.py": ["/crop_img.py"]}
74,012
paloomers/ammp
refs/heads/main
/my_optical.py
from scipy import signal import numpy as np import cv2 from skimage import filters from matplotlib import pyplot as plt from PIL import Image from pylab import * import random # Pass in old and new frame (both gray) and x and y coordinate of point to optical flow on, odd single integer for window size def my_optical_flow(old_frame,new_frame,row,col,window_size): # Written for 1 point not multiple # Optionally blur images for better estimation k = 3 # gauss kernel size image_1 = cv2.GaussianBlur(old_frame.copy(),(k,k),0) image_2 = cv2.GaussianBlur(new_frame.copy(),(k,k),0) row = int(np.rint(row)) col = int(np.rint(col)) # kernel_x = np.array([[-1., 1.], [-1., 1.]]) # kernel_y = np.array([[-1., -1.], [1., 1.]]) # kernel_t = np.array([[1., 1.], [1., 1.]]) kernel_x = np.array([[-1., 1.], [-1., 1.]]) * 0.25 kernel_y = np.array([[-1., -1.], [1., 1.]]) * 0.25 kernel_t = np.array([[1., 1.], [1., 1.]]) * 0.25 # Optionally normalize pixels # image_1 = image_1 / 255. # image_2 = image_2 / 255. # Calculate I_x, I_y, I_t mode = 'same' fx = signal.convolve2d(image_1, kernel_x, mode=mode) fy = signal.convolve2d(image_1, kernel_y, mode=mode) ft = np.add( signal.convolve2d(image_2, kernel_t, mode=mode), signal.convolve2d(image_1, (-1 * kernel_t), mode=mode) ) # window_size is odd, all the pixels with offset in between [-w, w] are inside the window w = int(window_size/2) # Finding values within window Ix = fx[row-w:row+w+1, col-w:col+w+1].flatten() Iy = fy[row-w:row+w+1, col-w:col+w+1].flatten() It = ft[row-w:row+w+1, col-w:col+w+1].flatten() A_T = np.array((Ix,Iy)) A = np.transpose(A_T) b = np.expand_dims(np.array(It),axis=1) u,v = np.linalg.pinv(A_T @ A) @ (A_T @ b) # Use optical flow comps (u,v) to calc new points + returns 1x2 array return np.float32(np.array([[row+v,col+u]])) def main(): # CODE TO SHOW OPTICAL FLOW DIAGRAMS FOR 2 FRAMES w = 7 # Window size Image1 = Image.open('basketball1.png').convert('L') Image2 = Image.open('basketball2.png').convert('L') Image1 = np.array(Image1) Image2 = np.array(Image2) # finding the good features features = cv2.goodFeaturesToTrack(Image1,100,0.01,5) features = np.int0(features) c = "r" # color for plot plt.subplot(1,2,1) # Plot 1 for open cv implementation plt.title("Optical Flow Vectors (OpenCV)") plt.imshow(Image1,cmap = cm.gray) # Parameters for lucas kanade optical flow lk_params = dict( winSize = (w,w), maxLevel = 0, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) for f in features: # Compute flow feature = np.float32(np.expand_dims(f,axis=0)) new_p, st, er = cv2.calcOpticalFlowPyrLK(Image1, Image2, feature, None, **lk_params) new_row, new_col = new_p.ravel() u = new_col - f[0,1] # dx, change in col v = new_row - f[0,0] # dy, change in row # Plot Arrow plt.arrow(f[0,0],f[0,1],u,v,head_width =5, head_length =5, color = c) plt.subplot(1,2,2) # Plot 2 for open cv implementation plt.title("Optical Flow Vectors (Our implementation)") plt.imshow(Image1,cmap = cm.gray) for f in features: row = f[0,0] col = f[0,1] new_p = my_optical_flow(Image1,Image2,row,col,w) new_row, new_col = new_p.ravel() u = new_col - col # dx, change in col v = new_row - row # dy, change in row # Plot Arrow plt.arrow(row,col,u,v,head_width =5, head_length =5, color = c) plt.show() if __name__ == '__main__': main()
{"/main.py": ["/stabilize.py", "/optical.py"], "/optical.py": ["/my_optical.py", "/crop_img.py"], "/stabilize.py": ["/crop_img.py"]}
74,013
paloomers/ammp
refs/heads/main
/optical.py
import numpy as np import cv2 import time import imutils # pip install imutils import my_optical import crop_img x_0 = 900 y_0 = 500 # just flips a video for testing right now def process_frame(frame): return cv2.flip(frame,0) # when clicking a point on the screen, set it to be the tracked point # i should add a boolean to make sure this is during being clicked later def select_point(event, x, y, flags, params): global x_0, y_0 if event == cv2.EVENT_LBUTTONDOWN: x_0 = x y_0 = y # cv2.circle(frame, point, 5, (0, 0, 255), 2) cv2.destroyWindow('point_selector') # Plays input video, creates and saves new video def process_video(INPUT, OUTPUT, output_scale): global x_0, y_0 cap = cv2.VideoCapture(INPUT) if(cap.isOpened() == False): print("Error opening video") print("Processing Video and Creating Output") input_width = int(cap.get(3)) input_height = int(cap.get(4)) frame_rate = cap.get(5) output_width = int(input_width * output_scale) output_height = int(input_height * output_scale) # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc('M','J','P','G') out = cv2.VideoWriter(OUTPUT,fourcc,frame_rate, (output_width,output_height)) # config for opencv optical flow lk_params = dict(winSize=(15,15), maxLevel=4, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) i = 0 ret, prev_frame = cap.read() # show the first frame cv2.namedWindow('point_selector',cv2.WINDOW_NORMAL) cv2.setMouseCallback('point_selector', select_point) cv2.imshow('point_selector', prev_frame) key = cv2.waitKey(10000) #change to your own waiting time 1000 = 1 second old_coords = np.array([[x_0, y_0]], dtype=np.float32) prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY) while(cap.isOpened()): # display first frame and ask user to pick a point ret, frame = cap.read() gray_frame = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY) if ret==True: # OPENCV Built-in OF # new_points, status, error = cv2.calcOpticalFlowPyrLK(prev_gray, gray_frame, old_coords, None, **lk_params) # Using our implementation window_size = 15 # window size for custom OF new_points = my_optical.my_optical_flow(prev_gray,gray_frame,old_coords[0,0],old_coords[0,1],window_size) x_0, y_0 = new_points.ravel() old_coords = new_points # Processes a new frame for the output video # new_frame = process_frame(frame) new_frame = crop_img.crop_around_point( frame, x_0, y_0,output_width,output_height ) # draw a dot on the point being tracked cv2.circle(frame, (x_0, y_0), 5, (0, 255, 0), -1) # write the new frame out.write(new_frame) prev_frame = frame prev_gray = gray_frame # Display tracking info frame cv2.namedWindow('tracking', cv2.WINDOW_NORMAL) cv2.imshow('tracking', frame) if cv2.waitKey(1) & 0xFF == ord('q'): # May add a separate key for starting real-time video capture print("Exited using 'q'") break else: break # Release everything if job is finished cap.release() out.release() cv2.destroyAllWindows() # Plays Video File def play_video(FILENAME): cap = cv2.VideoCapture(FILENAME) if(cap.isOpened() == False): print("Error opening output video") print("Playing " + FILENAME) frame_rate = cap.get(5) while(cap.isOpened()): ret, frame = cap.read() if ret == False: break # Display frame cv2.namedWindow(FILENAME, cv2.WINDOW_NORMAL) cv2.imshow(FILENAME, frame) if cv2.waitKey(10) & 0xFF == ord('q'): print("Exited using 'q'") break # Release everything if job is finished cap.release() cv2.destroyAllWindows()
{"/main.py": ["/stabilize.py", "/optical.py"], "/optical.py": ["/my_optical.py", "/crop_img.py"], "/stabilize.py": ["/crop_img.py"]}
74,014
paloomers/ammp
refs/heads/main
/gatheringCascade/alter.py
import os import cv2 import numpy as np from PIL import Image, ImageOps def rename(): printnum = 1 for i in os.listdir('/Users/Paloma/Desktop/CV/ammp/gatheringCascade/neg'): print(printnum) print(i) os.rename('/Users/Paloma/Desktop/CV/ammp/gatheringCascade/neg/'+i, str(printnum)+".jpg") print(i) printnum += 1 def alter(): os.chdir(r"/Users/Paloma/Desktop/CV/ammp/gatheringCascade/neg") for i in os.listdir('/Users/Paloma/Desktop/CV/ammp/gatheringCascade/neg'): print(i) im = Image.open(i) im = im.resize((100, 100), Image.ANTIALIAS) im = ImageOps.grayscale(im) im.save(i) def create_pos_n_neg(): for img in os.listdir('/Users/Paloma/Desktop/CV/ammp/gatheringCascade/neg'): print(img) line = 'neg'+'/'+img+'\n' with open('bg.txt','a') as f: f.write(line) #rename() #alter() create_pos_n_neg()
{"/main.py": ["/stabilize.py", "/optical.py"], "/optical.py": ["/my_optical.py", "/crop_img.py"], "/stabilize.py": ["/crop_img.py"]}
74,015
paloomers/ammp
refs/heads/main
/stabilize.py
import numpy as np import cv2 import time import imutils # pip install imutils import crop_img # just flips a video for testing right now def process_frame(frame): return cv2.flip(frame,0) # Plays input video, creates and saves new video def process_video(INPUT, OUTPUT, casPath, output_scale): faceCascade = cv2.CascadeClassifier(casPath) cap = cv2.VideoCapture(INPUT) if(cap.isOpened() == False): print("Error opening video") print("Processing Video and Creating Output") input_width = int(cap.get(3)) input_height = int(cap.get(4)) frame_rate = cap.get(5) output_width = int(input_width * output_scale) output_height = int(input_height * output_scale) # Define the codec and create VideoWriter object fourcc = cv2.VideoWriter_fourcc('M','J','P','G') out = cv2.VideoWriter(OUTPUT,fourcc,frame_rate, (output_width,output_height)) # Ignore this (used to avoid errors if nothing found) x_1,y_1,w_1,h_1 = (100,100,input_width/4,input_height/4) i = 0 while(cap.isOpened()): ret, frame = cap.read() if ret==True: # FACE RECOGNITION CODE gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale( gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30) ) # TODO: Deal w/ multiple faces found + no face found in current frame #if faces are detected: if(len(faces) != 0): #if this is the first frame or there's only one face, return the first face if(i==0 or len(faces) == 1): x_1,y_1,w_1,h_1 = faces[0] x_0, y_0 = x_1,y_1 # otherwise return the face closest to the previous face else: min_dist = 10000 for (x, y, w, h) in faces: #calculates distance between previous face and current face dist = np.linalg.norm(np.array((x ,y)) - np.array((x_0,y_0))) #if min distance, saves this face if(dist < min_dist): x_1, y_1, w_1, h_1 = x, y, w, h min_dist = dist #saves current face as previous face x_0, y_0 = x_1, y_1 i = 1 # Processes a new frame for the output video # new_frame = process_frame(frame) new_frame = crop_img.crop_around_bounding_box( frame, x_1, y_1, w_1, h_1 ,output_width,output_height ) # Draw a rectangle around the face cv2.rectangle(frame, (x_1, y_1), (x_1+w_1, y_1+h_1), (0, 255, 0), 2) # write the new frame out.write(new_frame) # Display tracking info frame cv2.namedWindow('tracking', cv2.WINDOW_NORMAL) cv2.imshow('tracking', frame) if cv2.waitKey(1) & 0xFF == ord('q'): # May add a separate key for starting real-time video capture print("Exited using 'q'") break else: break # Release everything if job is finished cap.release() out.release() cv2.destroyAllWindows() # Plays Video File def play_video(FILENAME): cap = cv2.VideoCapture(FILENAME) if(cap.isOpened() == False): print("Error opening output video") print("Playing " + FILENAME) frame_rate = cap.get(5) while(cap.isOpened()): ret, frame = cap.read() if ret == False: break # Display frame cv2.namedWindow(FILENAME, cv2.WINDOW_NORMAL) cv2.imshow(FILENAME, frame) if cv2.waitKey(10) & 0xFF == ord('q'): print("Exited using 'q'") break # Release everything if job is finished cap.release() cv2.destroyAllWindows()
{"/main.py": ["/stabilize.py", "/optical.py"], "/optical.py": ["/my_optical.py", "/crop_img.py"], "/stabilize.py": ["/crop_img.py"]}
74,019
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/components/expand_trait_entities.py
from spacy.tokens import Span class ExpandTraitEntities(object): name = "expand_trait_entities" def _get_token(self, doc, x): try: return doc[x] except IndexError: return None def _expand_start(self, doc, ent_start): # To get the preceding token, we need to get ent_start -1 # Expansion is greedy to end - this ensures it does not expand # into previous new entities try: max_end = max([e.end for e in self.new_ents]) except ValueError: pass else: if ent_start <= max_end: return ent_start if self._preceeding_token_is_expandable(doc, ent_start): ent_start = self._expand_start(doc, ent_start - 1) return ent_start def _expand_end(self, doc, ent_end): if self._succeeding_token_is_expandable(doc, ent_end): ent_end = self._expand_end(doc, ent_end + 1) return ent_end def __call__(self, doc): self.new_ents = [] for ent in doc.ents: if ent.label_ == "TRAIT": ent_start = self._expand_start(doc, ent.start) ent_end = self._expand_end(doc, ent.end) new_ent = Span(doc, ent_start, ent_end, label=ent.label) self.new_ents.append(new_ent) else: self.new_ents.append(ent) doc.ents = self.new_ents return doc def _preceeding_token_is_expandable(self, doc, ent_start): return self._token_is_expandable(doc, ent_start - 1) def _succeeding_token_is_expandable(self, doc, ent_end): return self._token_is_expandable(doc, ent_end) def _token_is_expandable(self, doc, ent_end): token = self._get_token(doc, ent_end) if token: return (token.pos_ in ['NUM', 'SYM', 'ADV', 'ADJ'] or token.dep_ in ['quantmod'] or token.lower_ in ['mm', 'cm', 'inch', 'inches']) and not token.ent_type_ # # @staticmethod # # def _token_is_entity(token): # # return token.ent_type_ # def _is_overlapping(self, token): # print(token) # return True
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,020
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/tags.py
class Tags(object): def __init__(self, tags): self._tags = tags def to_bilou(self): return self._convert_tags_to_biluo_tags(self._tags) def _convert_tags_to_biluo_tags(self, tags): tags_to_convert = set(tags) try: tags_to_convert.remove('O') except KeyError: pass biluo_tags = [] for tag_to_convert in tags_to_convert: biluo_tags = self._convert_tag_to_bilou(tag_to_convert, tags) return biluo_tags def _convert_tag_to_bilou(self, tag_to_convert, tags): biluo_tags = [] for i, tag in enumerate(tags): prev_tag = tags[i-1] if i else None next_tag = None if i+1 >= len(tags) else tags[i+1] if tag == tag_to_convert: # This will expand concurrently matched tags if prev_tag == tag_to_convert and next_tag == tag_to_convert: bilou_tag = f'I-{tag_to_convert}' elif prev_tag == tag_to_convert: bilou_tag = f'L-{tag_to_convert}' elif next_tag == tag_to_convert: bilou_tag = f'B-{tag_to_convert}' else: bilou_tag = tag biluo_tags.append(bilou_tag) return biluo_tags class BiGramTags(Tags): def to_bilou(self): unigram_tags = self._bigram_to_unigram(self._tags) return self._convert_tags_to_biluo_tags(unigram_tags) def _bigram_to_unigram(self, bigram_tags): unigram_tags = [] for i, t in enumerate(bigram_tags): prev_tag = bigram_tags[i-1] if i > 0 else None tags = {t, prev_tag} tags.discard(None) if len(tags) > 1: tags.discard('O') unigram_tags.append(tags.pop()) # The last tag is not a repeated bigram # So should always be included in the unigrams unigram_tags.append(bigram_tags[-1]) return unigram_tags def _convert_tag_to_bilou(self, tag_to_convert, tags): biluo_tags = [] for i, tag in enumerate(tags): next_tag = None if i+1 >= len(tags) else tags[i+1] prev_bilou_tag = biluo_tags[-1] if biluo_tags else None if tag == tag_to_convert: if prev_bilou_tag == f'B-{tag_to_convert}': bilou_tag = f'L-{tag_to_convert}' elif next_tag == tag_to_convert: bilou_tag = f'B-{tag_to_convert}' else: bilou_tag = f'U-{tag_to_convert}' else: bilou_tag = tag biluo_tags.append(bilou_tag) return biluo_tags
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,021
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/parse_page.py
from leventis.bhl.page import BHLPage from leventis.nlp import NLP class ParsePage(object): nlp = NLP('bi-gram-bernoulli-naive-bayes-model.pkl') def __init__(self, page_id): self._data = {} bhl_page = BHLPage(page_id) self.doc = self.nlp(bhl_page.get_normalised_text()) self._parse_page() @property def taxa(self): return set(self._data.keys()) @property def traits(self): return {k: v for k, v in self._data.items() if v} def _parse_page(self): taxon_subject = None for sent in self.doc.sents: taxa_ents = set( [ent for ent in sent.ents if ent.label_ == 'TAXON']) trait_ents = { ent.string for ent in sent.ents if ent.label_ == 'TRAIT'} if taxa_ents: [self._data.setdefault(taxa_ent._.taxon_name, set()) for taxa_ent in taxa_ents] if len(taxa_ents) == 1: taxon_subject = taxa_ents.pop() else: # TODO: Could add traits in if they immediately precede/succede e.g. 15989542 taxon_subject = None if taxon_subject and trait_ents: if trait_ents: self._data[taxon_subject._.taxon_name] |= trait_ents
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,022
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/nlp.py
from pathlib import Path import joblib import spacy from spacy.pipeline import EntityRuler from leventis.components.entity_matcher import BiGramEntityMatcher from leventis.components.sentenizer import Sentenizer from leventis.components.expand_trait_entities import ExpandTraitEntities from leventis.components.normalise_taxon_entities import NormaliseTaxonEntities from leventis.components.abbreviated_names import AbbreviatedNames from leventis.helpers import nlp_add_or_replace_pipe class NLP(object): # Relative data path random data_path = Path(__file__).parent / 'data' model_path = data_path / 'models' def __init__(self, model_name): self.model = joblib.load(self.model_path / model_name) self.trait_patterns_file = self.data_path / 'trait_patterns.jsonl' self._nlp = self._build_nlp_pipeline() def _build_nlp_pipeline(self): nlp = spacy.load("en_core_sci_sm", disable=['ner']) nlp_add_or_replace_pipe( nlp, Sentenizer(), Sentenizer.name, before='parser' ) nlp_add_or_replace_pipe( nlp, BiGramEntityMatcher(self.model), BiGramEntityMatcher.name ) nlp_add_or_replace_pipe( nlp, NormaliseTaxonEntities(), NormaliseTaxonEntities.name, after=BiGramEntityMatcher.name ) nlp_add_or_replace_pipe( nlp, AbbreviatedNames(nlp), AbbreviatedNames.name, after=NormaliseTaxonEntities.name ) nlp_add_or_replace_pipe( nlp, EntityRuler(nlp).from_disk(self.trait_patterns_file), 'trait_ner', after=AbbreviatedNames.name ) nlp_add_or_replace_pipe( nlp, ExpandTraitEntities(), ExpandTraitEntities.name, last=True ) return nlp def __call__(self, text): return self._nlp(text)
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,023
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/helpers.py
import re re_number = re.compile(r'\d+') re_abbreviated_form = re.compile(r'\b[A-Z]\.') # def abbreviate_scientific_name(scientific_name): # # FIX THIS # name_parts = scientific_name.split() # if len(name_parts) > 1: # return '{}. {}'.format( # name_parts[0][:1], # ' '.join(name_parts[1:]) # ) def scientific_name_contains_number(scientific_name): return bool(re_number.search(scientific_name)) def is_abbreviated_form(word): return bool(re_abbreviated_form.search(word)) def nlp_add_or_replace_pipe(nlp, pipe, name, **kwargs): if name in nlp.pipe_names: nlp.replace_pipe(name, pipe) else: nlp.add_pipe(pipe, name, **kwargs)
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,024
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/components/abbreviated_names.py
from spacy.tokens import Span from leventis.helpers import is_abbreviated_form class AbbreviatedNames(object): name = "abbreviated_names" def __init__(self, nlp): Span.set_extension("taxon_name", default=None, force=True) Span.set_extension("taxon_is_abbreviated", default=False, force=True) def __call__(self, doc): for ent_index, ent in enumerate(doc.ents): if ent.label_ == "TAXON": # If this is abbreviated, rewind to a previous non-abbreviated taxon # To locate the family name if is_abbreviated_form(ent.string): ent._.set("taxon_is_abbreviated", True) # Loop back through previous entities, finding the first non-abbreviated # form with same first letter for i in range(ent_index - 1, -1, -1): prev_ent = doc.ents[i] if not is_abbreviated_form(prev_ent.string) and prev_ent.string[0] == ent.string[0]: # Create a new taxonomic name with the parts from the two taxa name_parts = [ prev_ent.string.split()[0], ent.string.split()[1] ] ent._.set("taxon_name", ' '.join(name_parts)) break # If we haven't found a non-abbreviated form, just use the full name if not ent._.get("taxon_name"): ent._.set("taxon_name", ent.string.strip()) else: ent._.set("taxon_name", ent.string.strip()) return doc
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,025
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/preprocess.py
import re new_line_regex = re.compile(r'\n|\r') extra_spaces_regex = re.compile(r'\s+') variety_regex = re.compile(r'(\s)[v|y]a[v|r][,|.]\s?', re.IGNORECASE) paragraph_hypenation_regex = re.compile(r'(\w)-[\s\n]+') def text_preprocessor(text): text = normalise_variety(text) text = replace_paragraph_hyphenation(text) text = remove_new_lines(text) text = remove_extra_spaces(text) return text def normalise_variety(text): # Correct common mispellings of variety (var.) # var, => var. # yar. => var. # yav, => var. return variety_regex.sub(r'\g<1>var. ', text) def replace_paragraph_hyphenation(text): return paragraph_hypenation_regex.sub(r'\g<1>', text) def remove_new_lines(text): return new_line_regex.sub(' ', text) def remove_extra_spaces(text): return extra_spaces_regex.sub(' ', text)
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,026
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/components/entity_matcher.py
from spacy.gold import spans_from_biluo_tags from leventis.tags import Tags, BiGramTags class EntityMatcher(object): name = 'entity_matcher' def __init__(self, model): self.model = model self.features = model._features() def __call__(self, doc): features = self.features.doc_to_features(doc) predicted_tags = self.model.predict(features) biluo_tags = self.predicted_to_bilou(predicted_tags) doc.ents = spans_from_biluo_tags(doc, biluo_tags) return doc def predicted_to_bilou(self, predicted_tags): tags = self.get_tags(predicted_tags) return tags.to_bilou() def get_tags(self, predicted_tags): return Tags(predicted_tags) class BiGramEntityMatcher(EntityMatcher): name = 'bigram_entity_matcher' def get_tags(self, predicted_tags): return BiGramTags(predicted_tags)
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,027
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/bhl/page.py
import requests import requests_cache from PIL import Image from leventis.preprocess import text_preprocessor requests_cache.install_cache('bhl.cache') class BHLPage(object): def __init__(self, page_id): self.page_id = page_id @property def image(self): return self.get_image() @property def text(self): return self.get_text() def get_image(self): image_url = f'https://www.biodiversitylibrary.org/pageimage/{self.page_id}' r = self._get_request(image_url) return Image.open(r.raw) def get_text(self): text_url = f'https://www.biodiversitylibrary.org/pagetext/{self.page_id}' r = self._get_request(text_url) return r.text def get_normalised_text(self): return text_preprocessor(self.text) @staticmethod def _get_request(url): r = requests.get(url, stream=True) r.raise_for_status() return r
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,028
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/components/sentenizer.py
import re # Custom sentenizer, which respects scientific names with var. and cf. class Sentenizer(object): name = 'sentenizer' var_form_regex = re.compile('^var|cf$') def __call__(self, doc): previous_word = doc[0].text length = len(doc) for index, token in enumerate(doc): if (token.text == '.' and self.var_form_regex.match(previous_word) and index != (length - 1)): doc[index+1].sent_start = False previous_word = token.text return doc
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,029
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/components/normalise_taxon_entities.py
from spacy.tokens import Span class NormaliseTaxonEntities(object): name = "normalise_taxon_entities" tussenvoegsels = {'van', 'de', 'den', 'der', 'te', 'ten', 'ter', 'el', 'don'} et_al = {'et', 'al'} sect = {'sect', 'subsect'} subspecies = {'subsp', 'sp'} first_word_black_list = {'The', 'Its'} second_word_black_list = {'by', 'we', 'is', 'and', 'as', 'to', 'census'} def __call__(self, doc): ents = [] for ent in doc.ents: if ent.label_ == "TAXON": word_parts = ent.string.split() if len(word_parts) < 2: continue if self._is_blacklisted_first_word(word_parts[0]): continue if self._is_blacklisted_second_word(word_parts[1]): continue # I'm still getting some latin descriptive names marked # as taxon - even without the first letter capitalised - see 15995196 if not self._is_capitalised_first_letter(word_parts[0]): continue if self._second_word_is_single_letter(word_parts[1]): continue # If this is a tussenvoegsel, continue to the next ent # skip adding it to the doc.ents if self._is_tussenvoegsel(word_parts[1]) or self._is_et_al(word_parts[1]): continue if self._is_sect(word_parts[1]): try: first_sibling = doc[ent.start + 2] second_sibling = doc[ent.start + 3] except IndexError: pass else: if not any([first_sibling.ent_type_, second_sibling.ent_type_]) and first_sibling.text == '.': new_ent = Span( doc, ent.start, ent.end + 2, label=ent.label) ents.append(new_ent) continue try: first_sibling = doc[ent.start + 2] second_sibling = doc[ent.start + 3] except IndexError: pass else: if not any([first_sibling.ent_type_, second_sibling.ent_type_]) and self._is_subspecies(first_sibling.string) and second_sibling.text == '.': new_ent = Span( doc, ent.start, ent.end + 3, label=ent.label ) ents.append(new_ent) continue ents.append(ent) doc.ents = ents return doc def _is_tussenvoegsel(self, word): return word in self.tussenvoegsels def _is_et_al(self, word): return word in self.et_al def _is_sect(self, word): return word in self.sect def _is_subspecies(self, word): return word in self.subspecies def _is_blacklisted_first_word(self, word): return word in self.first_word_black_list def _is_blacklisted_second_word(self, word): return word in self.second_word_black_list def _is_capitalised_first_letter(self, word): return word[0].isupper() def _second_word_is_single_letter(self, word): return len(word) <= 1
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,030
NaturalHistoryMuseum/leventis
refs/heads/master
/setup.py
import io import os import re from setuptools import find_packages from setuptools import setup from glob import glob with open('README.rst') as readme_file: readme = readme_file.read() requirements = [ 'requests', 'requests-cache', 'scikit-learn==0.21.2', 'click', 'pandas', 'Pillow', 'scispacy==0.2.2', 'joblib', 'nltk' ] setup_requirements = [] test_requirements = [] setup( name="leventis", version="0.1.0", url="https://github.com/NaturalHistoryMuseum/leventis", license='MIT', author="Ben Scott", author_email="ben@benscott.co.uk", classifiers=[ ], description="Leventis NLP", long_description=readme, packages=find_packages(exclude=('tests',)), package_data={'leventis': ['data/*', 'data/models/*']}, install_requires=requirements, setup_requires=setup_requirements, test_suite='tests', tests_require=test_requirements, entry_points={ 'console_scripts': [ 'leventis=leventis.cli:main', ], }, )
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,031
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/__init__.py
"""leventis - Leventis OCR""" __version__ = '0.1.0' __author__ = 'Ben Scott <ben@benscott.co.uk>' __all__ = []
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,032
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/features.py
import nltk from abc import ABC, abstractmethod from nltk.corpus import wordnet from leventis.helpers import is_abbreviated_form class Features(ABC): def doc_to_features(self, doc): tokens = self.tokenise_doc(doc) return self.to_features(tokens) def to_features(self, tokens): return [self.extract_features(tokens, i) for i in range(len(tokens))] @abstractmethod def tokenise_doc(self, doc): pass @staticmethod @abstractmethod def extract_features(tokens, index): pass class WordFeatures(Features): name = 'WordFeatures' def tokenise_doc(self, doc): # Extract the docs parts we need for the features return [token.text for token in doc] @staticmethod def extract_features(tokens, index): word = tokens[index] features = { 'bias': 1.0, 'spelling': 1 if wordnet.synsets(word) else 0, 'word[-4:]': word[-4:], 'word[-3:]': word[-3:], 'word[-2:]': word[-2:], 'is_abbreviated': is_abbreviated_form(word), 'capitalised_first_letter': word[0].isupper(), } return features class BiGramFeatures(Features): name = 'BiGramFeatures' def tokenise_doc(self, doc): # Extract the docs parts we need for the features return list(nltk.bigrams([t.text for t in doc])) def to_features(self, bigrams): return [self.extract_features(bigram) for bigram in bigrams] @staticmethod def extract_features(bigram): features = { 'word-0_upper_first_char': bigram[0][0].isupper(), 'word-1_lower': bigram[1].islower(), 'word-1_alpha': bigram[1].replace('-', '').isalpha(), 'word-1_lower_first_char': bigram[1][0].islower(), } if is_abbreviated_form(bigram[0]): features['word-0-abbreviated'] = True else: features['word-0-alpha'] = bigram[0].isalpha() features['word-0-title'] = len(bigram[0] ) > 2 and bigram[0].istitle() for index, word in enumerate(bigram): features[f'word-{index}'] = word features[f'word-{index}_spelling'] = 1 if wordnet.synsets( word) else 0 for x in range(3, 5): if len(word) >= x: features[f'word-{index}_suffix-{x}'] = word[-x:] return features
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,033
NaturalHistoryMuseum/leventis
refs/heads/master
/leventis/cli.py
import click import logging import pandas as pd import mysql.connector as sql import time from pathlib import Path from tqdm import tqdm from leventis.parse_page import ParsePage output_path = Path('../output') logger = logging.getLogger() logging.basicConfig(filename=output_path / 'trait.log', filemode='w', format='%(levelname)s - %(message)s') @click.command() @click.option('--limit', default=None, help='Limit', type=int) @click.option('--taxon', default=None, help='Filter by taxon') @click.option('--page', default=None, help='Page ID', type=int) def main(limit, taxon, page): results = {} db_connection = sql.connect(database='pup', user='root') query = ''' SELECT bc.page_id, pt.trait_term, pn.pup_name AS taxon FROM bhl_citations bc INNER JOIN pup_names_sp pn ON pn.pup_name_id = bc.pup_id INNER JOIN trait_page tp ON tp.page_id = bc.page_id INNER JOIN x_pup_traits pt ON tp.trait_id=pt.trait_id WHERE pn.pup_higher_group = 'Dicot' AND bc.item_language = 'English'; ''' df = pd.read_sql(query, con=db_connection) if taxon: df = df[df['taxon'] == taxon] df = df.groupby('page_id')['taxon'].agg(set).reset_index() if limit: df = df[:limit] if page: df = df[df['page_id'] == page] with tqdm(list(df.itertuples(index=False)), leave=True) as bar: for page_id, taxa in bar: bar.set_description(f"Page {page_id}") parsed_page = ParsePage(page_id) for taxon in taxa: results.setdefault(taxon, {}) try: results[taxon][page_id] = parsed_page.traits[taxon] except KeyError: if taxon not in parsed_page.taxa: logging.warning( f"Taxon {taxon} not found in {page_id}" ) else: logging.warning( f"No traits found for {taxon} in {page_id}" ) results_df = pd.DataFrame( zip(results.keys(), results.values()), columns=['Taxon', 'Traits'] ) results_df.to_csv(output_path / 'traits.csv') click.echo(click.style('Trait extraction complete', fg='green')) click.echo(results_df.head(n=20)) if __name__ == '__main__': main()
{"/leventis/parse_page.py": ["/leventis/bhl/page.py", "/leventis/nlp.py"], "/leventis/nlp.py": ["/leventis/components/entity_matcher.py", "/leventis/components/sentenizer.py", "/leventis/components/expand_trait_entities.py", "/leventis/components/normalise_taxon_entities.py", "/leventis/components/abbreviated_names.py", "/leventis/helpers.py"], "/leventis/components/abbreviated_names.py": ["/leventis/helpers.py"], "/leventis/components/entity_matcher.py": ["/leventis/tags.py"], "/leventis/bhl/page.py": ["/leventis/preprocess.py"], "/leventis/features.py": ["/leventis/helpers.py"], "/leventis/cli.py": ["/leventis/parse_page.py"]}
74,041
leonardo-modules/leonardo-system
refs/heads/master
/leonardo_system/pip/__init__.py
from .versions import check_versions
{"/leonardo_system/pip/__init__.py": ["/leonardo_system/pip/versions.py"], "/leonardo_system/package/views.py": ["/leonardo_system/package/forms.py"], "/leonardo_system/__init__.py": ["/leonardo_system/package/patch_pip.py"], "/leonardo_system/management/commands/system_check.py": ["/leonardo_system/pip/__init__.py"], "/leonardo_system/package/urls.py": ["/leonardo_system/package/views.py"], "/leonardo_system/package/forms.py": ["/leonardo_system/package/utils.py"], "/leonardo_system/maintenance/urls.py": ["/leonardo_system/maintenance/views.py"], "/leonardo_system/maintenance/forms.py": ["/leonardo_system/maintenance/tables.py"], "/leonardo_system/maintenance/views.py": ["/leonardo_system/maintenance/forms.py"]}