diff --git "a/PythonDataset/test/monet-task-instances.jsonl.all" "b/PythonDataset/test/monet-task-instances.jsonl.all" new file mode 100644--- /dev/null +++ "b/PythonDataset/test/monet-task-instances.jsonl.all" @@ -0,0 +1 @@ +{"repo": "noaa-oar-arl/monet", "pull_number": 22, "instance_id": "noaa-oar-arl__monet-22", "issue_numbers": "", "base_commit": "122196d3691f4ee40491739adae1f5a4ba764b62", "patch": "diff --git a/docs/conf.py b/docs/conf.py\nnew file mode 100644\n--- /dev/null\n+++ b/docs/conf.py\n@@ -0,0 +1,178 @@\n+# -*- coding: utf-8 -*-\n+#\n+# Configuration file for the Sphinx documentation builder.\n+#\n+# This file does only contain a selection of the most common options. For a\n+# full list see the documentation:\n+# http://www.sphinx-doc.org/en/master/config\n+\n+# -- Path setup --------------------------------------------------------------\n+\n+# If extensions (or modules to document with autodoc) are in another directory,\n+# add these directories to sys.path here. If the directory is relative to the\n+# documentation root, use os.path.abspath to make it absolute, like shown here.\n+#\n+import os\n+import sys\n+sys.path.insert(0, os.path.abspath('../'))\n+\n+# -- Project information -----------------------------------------------------\n+\n+project = u'MONET'\n+copyright = u'2018, Barry Baker'\n+author = u'Barry Baker'\n+\n+# The short X.Y version\n+version = u''\n+# The full version, including alpha/beta/rc tags\n+release = u''\n+\n+# -- General configuration ---------------------------------------------------\n+\n+# If your documentation needs a minimal Sphinx version, state it here.\n+#\n+# needs_sphinx = '1.0'\n+\n+# Add any Sphinx extension module names here, as strings. They can be\n+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n+# ones.\n+extensions = [\n+ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.napoleon',\n+ 'sphinx.ext.extlinks'\n+]\n+#exclude_patterns = ['_build', '**.ipynb_checkpoints']\n+\n+extlinks = {\n+ 'issue': ('https://github.com/noaa-oar-arl/MONET/issues/%s', 'GH'),\n+ 'pull': ('https://github.com/noaa-oar-arl/MONET/pull/%s', 'PR'),\n+}\n+\n+autosummary_generate = True\n+numpydoc_class_members_toctree = True\n+napoleon_google_docstring = False\n+napoleon_use_param = False\n+napoleon_use_ivar = True\n+\n+# Add any paths that contain templates here, relative to this directory.\n+templates_path = ['_templates']\n+\n+# The suffix(es) of source filenames.\n+# You can specify multiple suffix as a list of string:\n+#\n+# source_suffix = ['.rst', '.md']\n+source_suffix = '.rst'\n+\n+# The master toctree document.\n+master_doc = 'index'\n+\n+# The language for content autogenerated by Sphinx. Refer to documentation\n+# for a list of supported languages.\n+#\n+# This is also used if you do content translation via gettext catalogs.\n+# Usually you set \"language\" from the command line for these cases.\n+language = None\n+\n+# List of patterns, relative to source directory, that match files and\n+# directories to ignore when looking for source files.\n+# This pattern also affects html_static_path and html_extra_path .\n+exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']\n+\n+# The name of the Pygments (syntax highlighting) style to use.\n+pygments_style = 'sphinx'\n+\n+# -- Options for HTML output -------------------------------------------------\n+\n+# The theme to use for HTML and HTML Help pages. See the documentation for\n+# a list of builtin themes.\n+#\n+html_theme = 'sphinx_rtd_theme'\n+\n+# Theme options are theme-specific and customize the look and feel of a theme\n+# further. For a list of options available for each theme, see the\n+# documentation.\n+#\n+# html_theme_options = {}\n+\n+# Add any paths that contain custom static files (such as style sheets) here,\n+# relative to this directory. They are copied after the builtin static files,\n+# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n+html_static_path = ['_static']\n+\n+# Custom sidebar templates, must be a dictionary that maps document names\n+# to template names.\n+#\n+# The default sidebars (for documents that don't match any pattern) are\n+# defined by theme itself. Builtin themes are using these templates by\n+# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n+# 'searchbox.html']``.\n+#\n+# html_sidebars = {}\n+\n+# -- Options for HTMLHelp output ---------------------------------------------\n+\n+# Output file base name for HTML help builder.\n+htmlhelp_basename = 'MONETdoc'\n+\n+html_theme_options = {\n+ 'logo_only': True,\n+}\n+\n+# Add any paths that contain custom themes here, relative to this directory.\n+#html_theme_path = []\n+\n+# The name for this set of Sphinx documents. If None, it defaults to\n+# \" v documentation\".\n+#html_title = None\n+\n+# A shorter title for the navigation bar. Default is the same as html_title.\n+#html_short_title = None\n+\n+# The name of an image file (relative to this directory) to place at the top\n+# of the sidebar.\n+html_logo = \"_static/noaa.png\"\n+\n+# -- Options for LaTeX output ------------------------------------------------\n+\n+latex_elements = {\n+ # The paper size ('letterpaper' or 'a4paper').\n+ #\n+ # 'papersize': 'letterpaper',\n+\n+ # The font size ('10pt', '11pt' or '12pt').\n+ #\n+ # 'pointsize': '10pt',\n+\n+ # Additional stuff for the LaTeX preamble.\n+ #\n+ # 'preamble': '',\n+\n+ # Latex figure (float) alignment\n+ #\n+ # 'figure_align': 'htbp',\n+}\n+\n+# Grouping the document tree into LaTeX files. List of tuples\n+# (source start file, target name, title,\n+# author, documentclass [howto, manual, or own class]).\n+latex_documents = [\n+ (master_doc, 'MONET.tex', u'MONET Documentation', u'Barry Baker',\n+ 'manual'),\n+]\n+\n+# -- Options for manual page output ------------------------------------------\n+\n+# One entry per manual page. List of tuples\n+# (source start file, name, description, authors, manual section).\n+man_pages = [(master_doc, 'monet', u'MONET Documentation', [author], 1)]\n+\n+# -- Options for Texinfo output ----------------------------------------------\n+\n+# Grouping the document tree into Texinfo files. List of tuples\n+# (source start file, target name, title, author,\n+# dir menu entry, description, category)\n+texinfo_documents = [\n+ (master_doc, 'MONET', u'MONET Documentation', author, 'MONET',\n+ 'One line description of project.', 'Miscellaneous'),\n+]\n+\n+# -- Extension configuration -------------------------------------------------\ndiff --git a/monet/__init__.py b/monet/__init__.py\n--- a/monet/__init__.py\n+++ b/monet/__init__.py\n@@ -1,9 +1,6 @@\n from __future__ import absolute_import, print_function\n-\n from . import models, obs, plots, util, verification\n-from .monet import MONET\n-\n-# from monet.models import camx, cmaq\n+from . import monet_accessor\n \n # from .monetmodels, obs, plots, util\n-__all__ = ['models', 'obs', 'plots', 'verification', 'util']\n+__all__ = ['models', 'obs', 'plots', 'verification', 'util', 'monet_accessor']\ndiff --git a/monet/grids.py b/monet/grids.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/grids.py\n@@ -0,0 +1,210 @@\n+\"\"\" This is a module that will derive the proj4 string and\n+ pyesample.geometry.AreaDefinition for any gridded dataset (satellite,\n+ models, etc....)\n+\n+ \"\"\"\n+import os\n+path = os.path.abspath(__file__)\n+\n+\n+def _geos_16_grid(dset):\n+ from pyresample import geometry\n+ from numpy import asarray\n+ projection = dset.goes_imager_projection\n+ h = projection.perspective_point_height\n+ a = projection.semi_major_axis\n+ b = projection.semi_minor_axis\n+ lon_0 = projection.longitude_of_projection_origin\n+ sweep = projection.sweep_angle_axis\n+ x = dset.x * h\n+ y = dset.y * h\n+ x_ll = x[0] # lower left corner\n+ x_ur = x[-1] # upper right corner\n+ y_ll = y[0] # lower left corner\n+ y_ur = y[-1] # upper right corner\n+ x_h = (x_ur - x_ll) / (len(x) - 1.) / 2. # 1/2 grid size\n+ y_h = (y_ur - y_ll) / (len(y) - 1.) / 2. # 1/2 grid size\n+ area_extent = (x_ll - x_h, y_ll - y_h, x_ur + x_h, y_ur + y_h)\n+\n+ proj_dict = {\n+ 'a': float(a),\n+ 'b': float(b),\n+ 'lon_0': float(lon_0),\n+ 'h': float(h),\n+ 'proj': 'geos',\n+ 'units': 'm',\n+ 'sweep': sweep\n+ }\n+\n+ area = geometry.AreaDefinition('GEOS_ABI', 'ABI', 'GOES_ABI', proj_dict,\n+ len(x), len(y), asarray(area_extent))\n+ return area\n+\n+\n+def _get_sinu_grid_df():\n+ from pandas import read_csv\n+ \"\"\"This function finds the modis grid tiles found within\n+ the defined grid.\n+ input:\n+ lon = gridded longitude - numpy array\n+ lat = gridded latitude - numpy array\n+ output:\n+ pandas dataframe of tiles\n+ \"\"\"\n+ f = path[:-8] + 'data/sn_bound_10deg.txt'\n+ td = read_csv(f, skiprows=4, delim_whitespace=True)\n+ td = td.assign(ihiv='h' + td.ih.astype(str).str.zfill(2) + 'v' +\n+ td.iv.astype(str).str.zfill(2))\n+ return td\n+\n+\n+def _sinu_grid_latlon_boundary(h, v):\n+ td = _get_sinu_grid_df()\n+ o = td.loc[(td.ih == int(h)) & (td.iv == int(v))]\n+ latmin = o.lat_min.iloc[0]\n+ lonmin = o.lon_min.iloc[0]\n+ latmax = o.lat_max.iloc[0]\n+ lonmax = o.lon_max.iloc[0]\n+ return lonmin, latmin, lonmax, latmax\n+\n+\n+def _get_sinu_xy(lon, lat):\n+ from pyproj import Proj\n+ sinu = Proj(\n+ '+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m'\n+ )\n+ return sinu(lon, lat)\n+\n+\n+def _get_sinu_latlon(x, y):\n+ from numpy import meshgrid\n+ from pyproj import Proj\n+ xv, yv = meshgrid(x, y)\n+ sinu = Proj(\n+ '+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m, +R=6371007.181'\n+ )\n+ return sinu(xv, yv, inverse=True)\n+\n+\n+def get_sinu_area_extent(lonmin, latmin, lonmax, latmax):\n+ xmin, ymin = _get_sinu_xy(lonmin, latmin)\n+ xmax, ymax = _get_sinu_xy(lonmax, latmax)\n+ return (xmin, ymin, xmax, ymax)\n+\n+\n+def get_modis_latlon_from_swath_hv(h, v, dset):\n+ from numpy import linspace\n+ lonmin, latmin, lonmax, latmax = _sinu_grid_latlon_boundary(h, v)\n+ xmin, ymin = _get_sinu_xy(lonmin, latmin)\n+ xmax, ymax = _get_sinu_xy(lonmax, latmax)\n+ x = linspace(xmin, xmax, len(dset.x))\n+ y = linspace(ymin, ymax, len(dset.y))\n+ lon, lat = _get_sinu_latlon(x, y)\n+ dset.coords['longitude'] = (('x', 'y'), lon)\n+ dset.coords['latitude'] = (('x', 'y'), lat)\n+ dset.attrs['area_extent'] = (x.min(), y.min(), x.max(), y.max())\n+ dset.attrs[\n+ 'proj4_srs'] = '+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 ' \\\n+ '+b=6371007.181 +units=m'\n+ return dset\n+\n+\n+def get_sinu_area_def(dset):\n+ from pyresample import utils\n+ from pyproj import Proj\n+ p = Proj(dset.attrs['proj4_srs'])\n+ proj4_args = p.srs\n+ area_name = 'MODIS Grid Def'\n+ area_id = 'modis'\n+ proj_id = area_id\n+ area_extent = dset.attrs['area_extent']\n+ nx, ny = dset.longitude.shape\n+ return utils.get_area_def(area_id, area_name, proj_id, proj4_args, nx, ny,\n+ area_extent)\n+\n+\n+def get_ioapi_pyresample_area_def(ds, proj4_srs):\n+ from pyresample import geometry, utils\n+ y_size = ds.NROWS\n+ x_size = ds.NCOLS\n+ projection = utils.proj4_str_to_dict(proj4_srs)\n+ proj_id = 'IOAPI_Dataset'\n+ description = 'IOAPI area_def for pyresample'\n+ area_id = 'MONET_Object_Grid'\n+ x_ll, y_ll = ds.XORIG + ds.XCELL * .5, ds.YORIG + ds.YCELL * .5\n+ x_ur, y_ur = ds.XORIG + (ds.NCOLS * ds.XCELL) + .5 * ds.XCELL, ds.YORIG + (\n+ ds.YCELL * ds.NROWS) + .5 * ds.YCELL\n+ area_extent = (x_ll, y_ll, x_ur, y_ur)\n+ area_def = geometry.AreaDefinition(area_id, description, proj_id,\n+ projection, x_size, y_size, area_extent)\n+ return area_def\n+\n+\n+def _ioapi_grid_from_dataset(ds, earth_radius=6370000):\n+ \"\"\"Get the IOAPI projection out of the file into proj4.\"\"\"\n+\n+ pargs = dict()\n+ pargs['lat_1'] = ds.P_ALP\n+ pargs['lat_2'] = ds.P_BET\n+ pargs['lat_0'] = ds.YCENT\n+ pargs['lon_0'] = ds.P_GAM\n+ pargs['center_lon'] = ds.XCENT\n+ pargs['x0'] = ds.XORIG\n+ pargs['y0'] = ds.YORIG\n+ pargs['r'] = earth_radius\n+ proj_id = ds.GDTYP\n+ if proj_id == 2:\n+ # Lambert\n+ p4 = '+proj=lcc +lat_1={lat_1} +lat_2={lat_2} ' \\\n+ '+lat_0={lat_0} +lon_0={lon_0} ' \\\n+ '+x_0=0 +y_0=0 +datum=WGS84 +units=m +a={r} +b={r}'\n+ p4 = p4.format(**pargs)\n+ elif proj_id == 4:\n+ # Polar stereo\n+ p4 = '+proj=stere +lat_ts={lat_1} +lon_0={lon_0} +lat_0=90.0' \\\n+ '+x_0=0 +y_0=0 +a={r} +b={r}'\n+ p4 = p4.format(**pargs)\n+ elif proj_id == 3:\n+ # Mercator\n+ p4 = '+proj=merc +lat_ts={lat_1} ' \\\n+ '+lon_0={center_lon} ' \\\n+ '+x_0={x0} +y_0={y0} +a={r} +b={r}'\n+ p4 = p4.format(**pargs)\n+ else:\n+ raise NotImplementedError('IOAPI proj not implemented yet: '\n+ '{}'.format(proj_id))\n+ #area_def = _get_ioapi_pyresample_area_def(ds)\n+ return p4 # , area_def\n+\n+\n+def _hysplit_latlon_grid_from_dataset(ds):\n+ pargs = dict()\n+ pargs['lat_0'] = ds.latitude.mean()\n+ pargs['lon_0'] = ds.longitude.mean()\n+\n+ p4 = '+proj=eqc +lat_ts={lat_0} +lat_0={lat_0} +lon_0={lon_0} ' \\\n+ '+ellps=WGS84 +datum=WGS84 +units=m +no_defs'.format(pargs)\n+ return p4\n+\n+\n+def get_hysplit_latlon_pyreample_area_def(ds, proj4_srs):\n+ from pyresample import geometry\n+ return geometry.SwathDefinition(\n+ lons=ds.longitude.values, lats=ds.latitude.values)\n+\n+\n+def grid_from_dataset(ds, earth_radius=6370000):\n+ \"\"\"Find out if the dataset contains enough info for Salem to understand.\n+\n+ ``ds`` can be an xarray dataset\n+\n+ Returns a :py:string:`proj4_string` if successful, ``None`` otherwise\n+ \"\"\"\n+ # maybe its an IOAPI file\n+ if hasattr(ds, 'IOAPI_VERSION') or hasattr(ds, 'P_ALP'):\n+ # IOAPI_VERSION\n+ return _ioapi_grid_from_dataset(ds, earth_radius=earth_radius)\n+\n+ # Try out platte carree\n+\n+ # return _lonlat_grid_from_dataset(ds)\ndiff --git a/monet/models/__init__.py b/monet/models/__init__.py\n--- a/monet/models/__init__.py\n+++ b/monet/models/__init__.py\n@@ -1,9 +1,5 @@\n-from __future__ import absolute_import, print_function\n+from . import cmaq, hysplit, camx\n \n-from . import camx, cmaq\n+__all__ = ['cmaq', 'hysplit', 'camx']\n \n-__all__ = ['camx', 'cmaq']\n-\n-__name__ = 'models'\n-\n-#\n+__name__ = 'models'\n\\ No newline at end of file\ndiff --git a/monet/models/camx.py b/monet/models/camx.py\n--- a/monet/models/camx.py\n+++ b/monet/models/camx.py\n@@ -1,179 +1,311 @@\n-from __future__ import division, print_function\n+\"\"\" CAMx File Reader \"\"\"\n+from numpy import array, concatenate\n+from pandas import Series, to_datetime\n+import xarray as xr\n+from ..grids import grid_from_dataset, get_ioapi_pyresample_area_def\n \n-from builtins import object, zip\n \n-import pandas as pd\n-import xarray as xr\n-from dask.diagnostics import ProgressBar\n-from numpy import array\n-from past.utils import old_div\n-\n-# This file is to deal with CAMx code - try to make it general for CAMx 4.7.1 --> 5.1\n-\n-\n-ProgressBar().register()\n-\n-\n-class CAMx(object):\n- def __init__(self):\n- self.objtype = 'CAMX'\n- self.coarse = array(\n- ['NA', 'PSO4', 'PNO3', 'PNH4', 'PH2O', 'PCL', 'PEC', 'FPRM', 'FCRS', 'CPRM', 'CCRS', 'SOA1', 'SOA2', 'SOA3',\n- 'SOA4'])\n- self.fine = array(\n- ['NA', 'PSO4', 'PNO3', 'PNH4', 'PH2O', 'PCL', 'PEC', 'FPRM', 'FCRS', 'SOA1', 'SOA2', 'SOA3',\n- 'SOA4'])\n- self.noy_gas = array(\n- ['NO', 'NO2', 'NO3', 'N2O5', 'HONO', 'HNO3', 'PAN', 'PANX', 'PNA', 'NTR', 'CRON', 'CRN2', 'CRNO',\n- 'CRPX', 'OPAN'])\n- self.poc = array(['SOA1', 'SOA2', 'SOA3', 'SOA4'])\n- self.dset = None\n- self.grid = None # gridcro2d obj\n- self.fname = None\n- self.metcrofnames = None\n- self.aerofnames = None\n- self.dates = None\n- self.keys = None\n- self.indexdates = None\n- self.metindex = None\n- self.latitude = None\n- self.longitude = None\n- self.map = None\n-\n- def get_dates(self):\n- print('Reading CAMx dates...')\n- print(self.dset)\n- tflag1 = array(self.dset['TFLAG'][:, 0], dtype='|S7')\n- tflag2 = array(old_div(self.dset['TFLAG'][:, 1], 10000), dtype='|S6')\n- date = pd.to_datetime([i + j.zfill(2) for i, j in zip(tflag1, tflag2)], format='%Y%j%H')\n- indexdates = pd.Series(date).drop_duplicates(keep='last').index.values\n- self.dset = self.dset.isel(time=indexdates)\n- self.dset['time'] = date[indexdates]\n-\n- def open_camx(self, file):\n- from glob import glob\n- from numpy import sort\n- dropset = ['layer', 'longitude_bounds', 'latitude_bounds',\n- 'x', 'y', 'level', 'lambert_conformal_conic']\n- nameset = {'COL': 'x', 'ROW': 'y', 'TSTEP': 'time', 'LAY': 'z'}\n- if type(file) == str:\n- fname = sort(array(glob(file)))\n- else:\n- fname = sort(array(file))\n- if fname.shape[0] >= 1:\n- if self.dset is None:\n- self.dset = xr.open_mfdataset(\n- fname.tolist(), concat_dim='TSTEP', engine='pnc').drop(dropset).rename(nameset).squeeze()\n- self.load_conus_basemap(res='l')\n- self.get_dates()\n- else:\n- dset = xr.open_mfdataset(fname.tolist(), concat_dim='TSTEP',\n- engine='pnc').drop(dropset).rename(nameset).squeeze()\n- self.dset = xr.merge([self.dset, dset])\n- else:\n- print('Files not found')\n- self.keys = list(self.dset.keys())\n-\n- def check_z(self, varname):\n- if pd.Series(self.dset[varname].dims).isin('z').max():\n- return True\n- else:\n- return False\n-\n- def get_nox(self, lay=None):\n- if self.check_z('NO'):\n- if lay is not None:\n- var = self.dset['NO'][:, 0, :, :].squeeze().copy()\n- var += self.dset['NO2'][:, 0, :, :].squeeze().copy()\n- else:\n- var = self.dset['NO'][:, :, :, :].copy()\n- var += self.dset['NO2'][:, :, :, :].copy()\n- else:\n- var = self.dset['NO'][:, :, :].copy()\n- var += self.dset['NO2'][:, :, :].copy()\n- return var\n-\n- def get_pm25(self, lay=None):\n- keys = list(self.dset.keys())\n- allvars = self.fine\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- if self.check_z(newkeys[0]):\n- if lay is not None:\n- var = self.dset[newkeys[0]][:, 0, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, 0, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :].copy()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :].squeeze()\n- return var\n-\n- def get_pm10(self, lay=None):\n- keys = list(self.dset.keys())\n- allvars = self.coarse\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- if self.check_z(newkeys[0]):\n- if lay is not None:\n- var = self.dset[newkeys[0]][:, 0, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, 0, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :].copy()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :].squeeze()\n- return var\n-\n- def get_var(self, param='O3', lay=None):\n- p = param.upper()\n- print(param)\n- if p == 'PM25':\n- var = self.get_pm25(lay=lay)\n- elif p == 'PM10':\n- var = self.get_pm10(lay=lay)\n- elif p == 'NOX':\n- var = self.get_nox(lay=lay)\n- elif p == 'OC':\n- var = self.get_oc(lay=lay)\n- elif p == 'VOC':\n- if lay is not None:\n- var = self.dset['VOC'][:, 0, :, :].copy().squeeze()\n- else:\n- var = self.dset['VOC'][:, :, :, :].copy().squeeze()\n- else:\n- if self.check_z(param):\n- if lay is None:\n- var = self.dset[param][:, :, :, :].copy()\n- else:\n- var = self.dset[param][:, lay, :, :].copy().squeeze()\n- else:\n- var = self.dset[param]\n- return var\n-\n- def load_conus_basemap(self, res='l'):\n- from mpl_toolkits.basemap import Basemap\n- if self.map is None:\n- lat1 = self.dset.P_ALP\n- lat2 = self.dset.P_BET\n- lon1 = self.dset.P_GAM\n- lon0 = self.dset.XCENT\n- lat0 = self.dset.YCENT\n- m = Basemap(projection='lcc', resolution=res, lat_1=lat1, lat_2=lat2, lat_0=lat0, lon_0=lon0,\n- lon_1=lon1,\n- llcrnrlat=self.dset.latitude[0, 0], urcrnrlat=self.dset.latitude[-1, -1],\n- llcrnrlon=self.dset.longitude[0, 0],\n- urcrnrlon=self.dset.longitude[-1, -1], rsphere=6371200.,\n- area_thresh=50.)\n- self.map = m\n- else:\n- m = self.map\n- return self.map\n+def can_do(index):\n+ if index.max():\n+ return True\n+ else:\n+ return False\n+\n+\n+def open_files(fname, earth_radius=6370000):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ fname : type\n+ Description of parameter `fname`.\n+ earth_radius : type\n+ Description of parameter `earth_radius`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ # open the dataset using xarray\n+ dset = xr.open_mfdataset(\n+ fname, engine='pseudonetcdf', backend_kwargs={'format': 'uamiv'})\n+\n+ # get the grid information\n+ grid = grid_from_dataset(dset, earth_radius=earth_radius)\n+ area_def = get_ioapi_pyresample_area_def(dset, grid)\n+ # assign attributes for dataset and all DataArrays\n+ dset = dset.assign_attrs({'proj4_srs': grid})\n+ for i in dset.variables:\n+ dset[i] = dset[i].assign_attrs({'proj4_srs': grid})\n+ for j in dset[i].attrs:\n+ dset[i].attrs[j] = dset[i].attrs[j].strip()\n+ dset[i] = dset[i].assign_attrs({'area': area_def})\n+ dset = dset.assign_attrs(area=area_def)\n+\n+ # add lazy diagnostic variables\n+ dset = add_lazy_pm25(dset)\n+ dset = add_lazy_pm10(dset)\n+ dset = add_lazy_pm_course(dset)\n+ dset = add_lazy_noy(dset)\n+ dset = add_lazy_nox(dset)\n+\n+ # get the times\n+ dset = _get_times(dset)\n+\n+ # get the lat lon\n+ dset = _get_latlon(dset)\n+\n+ # get Predefined mapping tables for observations\n+ dset = _predefined_mapping_tables(dset)\n+\n+ # rename dimensions\n+ dset = dset.rename({'COL': 'x', 'ROW': 'y', 'LAY': 'z'})\n+\n+ return dset\n+\n+\n+def _get_times(d):\n+ idims = len(d.TFLAG.dims)\n+ if idims == 2:\n+ tflag1 = Series(d['TFLAG'][:, 0]).astype(str).str.zfill(7)\n+ tflag2 = Series(d['TFLAG'][:, 1]).astype(str).str.zfill(6)\n+ else:\n+ tflag1 = Series(d['TFLAG'][:, 0, 0]).astype(str).str.zfill(7)\n+ tflag2 = Series(d['TFLAG'][:, 0, 1]).astype(str).str.zfill(6)\n+ date = to_datetime(\n+ [i + j for i, j in zip(tflag1, tflag2)], format='%Y%j%H%M%S')\n+ indexdates = Series(date).drop_duplicates(keep='last').index.values\n+ d = d.isel(TSTEP=indexdates)\n+ d['TSTEP'] = date[indexdates]\n+ return d.rename({'TSTEP': 'time'})\n+\n+\n+def _get_latlon(dset):\n+ \"\"\"gets the lat and lons from the pyreample.geometry.AreaDefinition\n+\n+ Parameters\n+ ----------\n+ dset : xarray.Dataset\n+ Description of parameter `dset`.\n+\n+ Returns\n+ -------\n+ xarray.Dataset\n+ Description of returned object.\n+\n+ \"\"\"\n+ lon, lat = dset.area.get_lonlats()\n+ dset['longitude'] = xr.DataArray(lon[::-1, :], dims=['ROW', 'COL'])\n+ dset['latitude'] = xr.DataArray(lat[::-1, :], dims=['ROW', 'COL'])\n+ dset = dset.assign_coords(longitude=dset.longitude, latitude=dset.latitude)\n+ return dset\n+\n+\n+def add_lazy_pm25(d):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ d : type\n+ Description of parameter `d`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(fine)\n+ if 'PM25_TOT' in keys:\n+ d['PM25'] = d['PM25_TOT'].chunk()\n+ else:\n+ index = allvars.isin(keys)\n+ newkeys = allvars.loc[index]\n+ d['PM25'] = add_multiple_lazy(d, newkeys)\n+ d['PM25'].assign_attrs({'name': 'PM2.5', 'long_name': 'PM2.5'})\n+ return d\n+\n+\n+def add_lazy_pm10(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(concatenate([fine, coarse]))\n+ if 'PM_TOT' in keys:\n+ d['PM10'] = d['PM_TOT'].chunk()\n+ else:\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['PM10'] = add_multiple_lazy(d, newkeys)\n+ d['PM10'] = d['PM10'].assign_attrs({\n+ 'name':\n+ 'PM10',\n+ 'long_name':\n+ 'Particulate Matter < 10 microns'\n+ })\n+ return d\n+\n+\n+def add_lazy_pm_course(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(coarse)\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['PM_COURSE'] = add_multiple_lazy(d, newkeys)\n+ d['PM_COURSE'] = d['PM_COURSE'].assign_attrs({\n+ 'name':\n+ 'PM_COURSE',\n+ 'long_name':\n+ 'Course Mode Particulate Matter'\n+ })\n+ return d\n+\n+\n+def add_lazy_clf(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['ACLI', 'ACLJ', 'ACLK'])\n+ weights = Series([1, 1, .2])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ neww = weights.loc[index]\n+ d['CLf'] = add_multiple_lazy(d, newkeys, weights=neww)\n+ d['CLf'] = d['CLf'].assign_attrs({\n+ 'name':\n+ 'CLf',\n+ 'long_name':\n+ 'Fine Mode particulate Cl'\n+ })\n+ return d\n+\n+\n+def add_lazy_noy(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(noy_gas)\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['NOy'] = add_multiple_lazy(d, newkeys)\n+ d['NOy'] = d['NOy'].assign_attrs({'name': 'NOy', 'long_name': 'NOy'})\n+ return d\n+\n+\n+def add_lazy_nox(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['NO', 'NOX'])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['NOx'] = add_multiple_lazy(d, newkeys)\n+ d['NOx'] = d['NOx'].assign_attrs({'name': 'NOx', 'long_name': 'NOx'})\n+ return d\n+\n+\n+def add_multiple_lazy(dset, variables, weights=None):\n+ from numpy import ones\n+ if weights is None:\n+ weights = ones(len(variables))\n+ new = dset[variables[0]].copy() * weights[0]\n+ for i, j in zip(variables[1:], weights[1:]):\n+ new = new + dset[i].chunk() * j\n+ return new\n+\n+\n+def _predefined_mapping_tables(dset):\n+ \"\"\"Predefined mapping tables for different observational parings used when\n+ combining data.\n+\n+ Returns\n+ -------\n+ dictionary\n+ A dictionary of to map to.\n+\n+ \"\"\"\n+ to_improve = {}\n+ to_nadp = {}\n+ to_aqs = {\n+ 'OZONE': ['O3'],\n+ 'PM2.5': ['PM25'],\n+ 'CO': ['CO'],\n+ 'NOY': [\n+ 'NO', 'NO2', 'NO3', 'N2O5', 'HONO', 'HNO3', 'PAN', 'PANX', 'PNA',\n+ 'NTR', 'CRON', 'CRN2', 'CRNO', 'CRPX', 'OPAN'\n+ ],\n+ 'NOX': ['NO', 'NO2'],\n+ 'SO2': ['SO2'],\n+ 'NO': ['NO'],\n+ 'NO2': ['NO2'],\n+ 'SO4f': ['PSO4'],\n+ 'PM10': ['PM10'],\n+ 'NO3f': ['PNO3'],\n+ 'ECf': ['PEC'],\n+ 'OCf': ['OC'],\n+ 'ETHANE': ['ETHA'],\n+ 'BENZENE': ['BENZENE'],\n+ 'TOLUENE': ['TOL'],\n+ 'ISOPRENE': ['ISOP'],\n+ 'O-XYLENE': ['XYL'],\n+ 'WS': ['WSPD10'],\n+ 'TEMP': ['TEMP2'],\n+ 'WD': ['WDIR10'],\n+ 'NAf': ['NA'],\n+ 'NH4f': ['PNH4']\n+ }\n+ to_airnow = {\n+ 'OZONE': ['O3'],\n+ 'PM2.5': ['PM25'],\n+ 'CO': ['CO'],\n+ 'NOY': [\n+ 'NO', 'NO2', 'NO3', 'N2O5', 'HONO', 'HNO3', 'PAN', 'PANX', 'PNA',\n+ 'NTR', 'CRON', 'CRN2', 'CRNO', 'CRPX', 'OPAN'\n+ ],\n+ 'NOX': ['NO', 'NO2'],\n+ 'SO2': ['SO2'],\n+ 'NO': ['NO'],\n+ 'NO2': ['NO2'],\n+ 'SO4f': ['PSO4'],\n+ 'PM10': ['PM10'],\n+ 'NO3f': ['PNO3'],\n+ 'ECf': ['PEC'],\n+ 'OCf': ['OC'],\n+ 'ETHANE': ['ETHA'],\n+ 'BENZENE': ['BENZENE'],\n+ 'TOLUENE': ['TOL'],\n+ 'ISOPRENE': ['ISOP'],\n+ 'O-XYLENE': ['XYL'],\n+ 'WS': ['WSPD10'],\n+ 'TEMP': ['TEMP2'],\n+ 'WD': ['WDIR10'],\n+ 'NAf': ['NA'],\n+ 'NH4f': ['PNH4']\n+ }\n+ to_crn = {}\n+ to_aeronet = {}\n+ to_cems = {}\n+ mapping_tables = {\n+ 'improve': to_improve,\n+ 'aqs': to_aqs,\n+ 'airnow': to_airnow,\n+ 'crn': to_crn,\n+ 'cems': to_cems,\n+ 'nadp': to_nadp,\n+ 'aeronet': to_aeronet\n+ }\n+ dset = dset.assign_attrs({'mapping_tables': mapping_tables})\n+ return dset\n+\n+\n+# Arrays for different gasses and pm groupings\n+coarse = array(['CPRM', 'CCRS'])\n+fine = array([\n+ 'NA', 'PSO4', 'PNO3', 'PNH4', 'PH2O', 'PCL', 'PEC', 'FPRM', 'FCRS', 'SOA1',\n+ 'SOA2', 'SOA3', 'SOA4'\n+])\n+noy_gas = array([\n+ 'NO', 'NO2', 'NO3', 'N2O5', 'HONO', 'HNO3', 'PAN', 'PANX', 'PNA', 'NTR',\n+ 'CRON', 'CRN2', 'CRNO', 'CRPX', 'OPAN'\n+])\n+poc = array(['SOA1', 'SOA2', 'SOA3', 'SOA4'])\ndiff --git a/monet/models/cmaq.py b/monet/models/cmaq.py\n--- a/monet/models/cmaq.py\n+++ b/monet/models/cmaq.py\n@@ -1,447 +1,504 @@\n-from __future__ import absolute_import, division, print_function\n+\"\"\" CMAQ File Reader \"\"\"\n+from numpy import array, concatenate\n+from pandas import Series, to_datetime\n+import xarray as xr\n+from ..grids import grid_from_dataset, get_ioapi_pyresample_area_def\n \n-# This file is to deal with CMAQ code - try to make it general for cmaq 4.7.1 --> 5.1\n-from builtins import object, zip\n-from gc import collect\n \n-import pandas as pd\n-import xarray as xr\n-from dask.diagnostics import ProgressBar\n-from numpy import array, zeros\n-from past.utils import old_div\n-\n-ProgressBar().register()\n-\n-\n-class CMAQ(object):\n- def __init__(self):\n- self.objtype = 'CMAQ'\n- self.dust_pm25 = array(\n- ['ASO4J', 'ANO3J', 'ACLJ', 'ANH4J', 'ANAJ', 'ACAJ', 'AMGJ', 'AKJ', 'APOCJ', 'APNCOMJ', 'AECJ', 'AFEJ',\n- 'AALJ', 'ASIJ', 'ATIJ', 'AMNJ', 'AOTHRJ'])\n- self.dust_total = array(\n- ['ASO4J', 'ASO4K', 'ANO3J', 'ANO3K', 'ACLJ', 'ACLK', 'ANH4J', 'ANAJ', 'ACAJ', 'AMGJ', 'AKJ', 'APOCJ',\n- 'APNCOMJ', 'AECJ', 'AFEJ', 'AALJ', 'ASIJ', 'ATIJ', 'AMNJ', 'AOTHRJ', 'ASOIL'])\n- self.aitken = array(['ACLI', 'AECI', 'ANAI', 'ANH4I',\n- 'ANO3I', 'AOTHRI', 'APNCOMI', 'APOCI',\n- 'ASO4I', 'AORGAI', 'AORGPAI',\n- 'AORGBI'])\n- self.accumulation = array(\n- ['AALJ', 'AALK1J', 'AALK2J', 'ABNZ1J', 'ABNZ2J', 'ABNZ3J', 'ACAJ', 'ACLJ', 'AECJ', 'AFEJ',\n- 'AISO1J', 'AISO2J', 'AISO3J', 'AKJ', 'AMGJ', 'AMNJ', 'ANAJ', 'ANH4J', 'ANO3J', 'AOLGAJ', 'AOLGBJ',\n- 'AORGCJ', 'AOTHRJ', 'APAH1J', 'APAH2J', 'APAH3J', 'APNCOMJ', 'APOCJ', 'ASIJ', 'ASO4J', 'ASQTJ', 'ATIJ',\n- 'ATOL1J', 'ATOL2J', 'ATOL3J', 'ATRP1J', 'ATRP2J', 'AXYL1J', 'AXYL2J', 'AXYL3J', 'AORGAJ',\n- 'AORGPAJ', 'AORGBJ'])\n- self.coarse = array(['ACLK', 'ACORS', 'ANH4K', 'ANO3K', 'ASEACAT', 'ASO4K', 'ASOIL'])\n- self.noy_gas = array(\n- ['NO', 'NO2', 'NO3', 'N2O5', 'HONO', 'HNO3', 'PAN', 'PANX', 'PNA', 'NTR', 'CRON', 'CRN2', 'CRNO',\n- 'CRPX', 'OPAN'])\n- self.pec = array(['AECI', 'AECJ'])\n- self.pso4 = array(['ASO4I', 'ASO4J'])\n- self.pno3 = array(['ANO3I', 'ANO3J'])\n- self.pnh4 = array(['ANH4I', 'ANH4J'])\n- self.pcl = array(['ACLI', 'ACLJ'])\n- self.poc = array(['AOTHRI', 'APNCOMI', 'APOCI', 'AORGAI', 'AORGPAI', 'AORGBI', 'ATOL1J', 'ATOL2J', 'ATOL3J',\n- 'ATRP1J', 'ATRP2J', 'AXYL1J', 'AXYL2J', 'AXYL3J', 'AORGAJ', 'AORGPAJ', 'AORGBJ', 'AOLGAJ',\n- 'AOLGBJ', 'AORGCJ', 'AOTHRJ', 'APAH1J', 'APAH2J', 'APAH3J', 'APNCOMJ', 'APOCJ', 'ASQTJ',\n- 'AISO1J', 'AISO2J', 'AISO3J', 'AALK1J', 'AALK2J', 'ABNZ1J', 'ABNZ2J', 'ABNZ3J', 'AORGAI',\n- 'AORGAJ', 'AORGPAI', 'AORGPAJ', 'AORGBI', 'AORGBJ'])\n- self.minerals = array(['AALJ', 'ACAJ', 'AFEJ', 'AKJ', 'AMGJ', 'AMNJ', 'ANAJ', 'ATIJ', 'ASIJ'])\n- self.dset = None # CMAQ xarray dataset object\n- self.grid = None # CMAQ xarray dataset gridcro2d obj\n- self.dates = None\n- self.keys = None\n- self.metcrokeys = []\n- self.indexdates = None\n- self.metdates = None\n- self.metindex = None\n- self.latitude = None\n- self.longitude = None\n- self.map = None\n-\n- def get_dates(self):\n- print('Reading CMAQ dates...')\n- idims = len(self.dset.TFLAG.dims)\n- if idims == 2:\n- tflag1 = array(self.dset['TFLAG'][:, 0], dtype='|S7')\n- tflag2 = array(old_div(self.dset['TFLAG'][:, 1], 10000), dtype='|S6')\n- else:\n- tflag1 = array(self.dset['TFLAG'][:, 0, 0], dtype='|S7')\n- tflag2 = array(old_div(self.dset['TFLAG'][:, 0, 1], 10000), dtype='|S6')\n- date = pd.to_datetime([i + j.zfill(2) for i, j in zip(tflag1, tflag2)], format='%Y%j%H')\n- indexdates = pd.Series(date).drop_duplicates(keep='last').index.values\n- self.dset = self.dset.isel(time=indexdates)\n- self.dset['time'] = date[indexdates]\n-\n- def open_cmaq(self, file):\n- from glob import glob\n- from numpy import sort\n- nameset = {'COL': 'x', 'ROW': 'y', 'TSTEP': 'time', 'LAY': 'z'}\n- if type(file) == str:\n- fname = sort(array(glob(file)))\n- else:\n- fname = sort(array(file))\n- if fname.shape[0] >= 1:\n- if self.dset is None:\n- self.dset = xr.open_mfdataset(fname.tolist(), concat_dim='TSTEP').rename(nameset).squeeze()\n- else:\n- dset = xr.open_mfdataset(fname.tolist(), concat_dim='TSTEP').rename(nameset).squeeze()\n- self.dset = xr.merge([self.dset, dset])\n- else:\n- print('Files not found')\n- if self.grid is not None:\n- self.dset = self.dset.assign(latitude=self.grid.LAT.squeeze())\n- self.dset = self.dset.assign(longitude=self.grid.LON.squeeze())\n- self.dset = self.dset.set_coords(['latitude', 'longitude'])\n- self.get_dates()\n- self.keys = list(self.dset.keys())\n-\n- def check_z(self, varname):\n- if pd.Series(self.dset[varname].dims).isin(['z']).max():\n- return True\n- else:\n- return False\n-\n- def add_multiple_fields(self, findkeys, lay=None, weights=None):\n- from numpy import ones\n- keys = self.keys\n- newkeys = pd.Series(findkeys).loc[pd.Series(findkeys).isin(keys)].values\n- if weights is None:\n- w = ones(len(newkeys))\n- if self.check_z(newkeys[0]):\n- if lay is not None:\n- var = self.dset[newkeys[0]][:, 0, :, :].squeeze() * w[0]\n- for i, j in zip(newkeys[1:], w[1:]):\n- var += self.dset[i][:, 0, :, :].squeeze() * j\n- else:\n- var = self.dset[newkeys[0]][:, :, :, :].squeeze() * w[0]\n- for i, j in zip(newkeys[1:], w[1:]):\n- var += self.dset[i][:, :, :, :].squeeze() * j\n- else:\n- var = self.dset[newkeys[0]][:, :, :].copy() * w[0]\n- for i, j in zip(newkeys[1:], w[1:]):\n- var += self.dset[i][:, :, :].squeeze() * j\n- return var\n-\n- def get_dust_total(self, lay=None):\n- return self.add_multiple_fields(self.dust_totl, lay=lay)\n-\n- def get_noy(self, lay=None):\n- keys = self.keys\n- if 'NOY' in keys:\n- if self.check_z('NOY'):\n- if lay is not None:\n- var = self.dset['NOY'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['NOY'][:, :, :, :]\n- else:\n- var = self.dset['NOY'][:]\n- else:\n- var = self.add_multiple_fields(self.noy_gas, lay=lay)\n- return var\n-\n- def get_nox(self, lay=None):\n- var = self.add_multiple_fields(['NO', 'NOX'], lay=lay)\n- return var\n-\n- def get_dust_pm25(self, lay=None):\n- return self.add_multiple_fields(self.dust_pm25, lay=lay)\n-\n- def get_pm25(self, lay=None):\n- from numpy import concatenate\n- keys = self.keys\n- allvars = concatenate([self.aitken, self.accumulation, self.coarse])\n- weights = array(\n- [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n- 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n- 1., 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2])\n- if 'PM25_TOT' in keys:\n- if self.check_z('PM25_TOT'):\n- if lay is not None:\n- var = self.dset['PM25_TOT'][:, lay, :, :].copy().squeeze()\n- else:\n- var = self.dset['PM25_TOT'][:, :, :, :].copy()\n- else:\n- var = self.dset['PM25_TOT'][:, :, :].copy()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM2.5'\n- var['long_name'] = 'PM2.5'\n- var['var_desc'] = 'Variable PM2.5'\n- return var\n-\n- def get_pm10(self, lay=None):\n- from numpy import concatenate\n- keys = self.keys\n- allvars = concatenate([self.aitken, self.accumulation, self.coarse])\n- var = None\n- if 'PMC_TOT' in keys:\n- if self.check_z('PM10_TOT'):\n- if lay is not None:\n- var = self.dset['PMC_TOT'][:, lay, :, :].copy().squeeze()\n- else:\n- var = self.dset['PMC_TOT'][:, :, :, :].copy()\n- else:\n- var = self.dset['PMC_TOT'][:, :, :].copy()\n- elif 'PM10' in keys:\n- if self.check_z('PM10_TOT'):\n- if lay is not None:\n- var = self.dset['PM10'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM10'][:, :, :, :].squeeze()\n- else:\n- var = self.dset['PMC_TOT'][:, :, :].copy()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM2.5'\n- var['long_name'] = 'PM2.5'\n- var['var_desc'] = 'Variable PM2.5'\n- return var\n-\n- def get_clf(self, lay=None):\n- keys = self.keys\n- allvars = array(['ACLI', 'ACLJ', 'ACLK'])\n- weights = array([1, 1, .2])\n- var = None\n- if 'PM25_CL' in keys:\n- var = self.dset['PM25_CL'][:, lay, :, :].squeeze()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_CL'\n- var['long_name'] = 'PM25_CL'\n- var['var_desc'] = 'Variable PM25_CL'\n- return var\n-\n- def get_naf(self, lay=None):\n- keys = self.keys\n- allvars = array(['ANAI', 'ANAJ', 'ASEACAT', 'ASOIL', 'ACORS'])\n- weights = array([1, 1, .2 * 837.3 / 1000., .2 * 62.6 / 1000., .2 * 2.3 / 1000.])\n- if 'PM25_NA' in keys:\n- if lay is not None:\n- var = self.dset['PM25_NA'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM25_NA'][:, :, :, :].squeeze()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_NA'\n- var['long_name'] = 'PM25_NA'\n- var['var_desc'] = 'Variable PM25_NA'\n- return var\n-\n- def get_kf(self, lay=None):\n- keys = self.keys\n- allvars = array(['AKI', 'AKJ', 'ASEACAT', 'ASOIL', 'ACORS'])\n- weights = array([1, 1, .2 * 31. / 1000., .2 * 24. / 1000., .2 * 17.6 / 1000.])\n- if 'PM25_K' in keys:\n- if lay is not None:\n- var = self.dset['PM25_K'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM25_K'][:, :, :, :].squeeze()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_K'\n- var['long_name'] = 'PM25_K'\n- var['var_desc'] = 'Variable PM25_K'\n-\n- return var\n-\n- def get_caf(self, lay=None):\n- keys = self.keys\n- allvars = array(['ACAI', 'ACAJ', 'ASEACAT', 'ASOIL', 'ACORS'])\n- weights = array([1, 1, .2 * 32. / 1000., .2 * 83.8 / 1000., .2 * 56.2 / 1000.])\n- if 'PM25_CA' in keys:\n- if lay is not None:\n- var = self.dset['PM25_CA'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM25_CA'][:, :, :, :].squeeze()\n- else:\n- print(' Computing PM25_NO3...')\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index.values]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_CA'\n- var['long_name'] = 'PM25_CA'\n- var['var_desc'] = 'Variable PM25_CA'\n- return var\n-\n- def get_so4f(self, lay=None):\n- keys = self.keys\n- allvars = array(['ASO4I', 'ASO4J', 'ASO4K'])\n- weights = array([1., 1., .2])\n- if 'PM25_SO4' in keys:\n- if lay is not None:\n- var = self.dset['PM25_SO4'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM25_SO4'][:, :, :, :].squeeze()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index.values]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_SO4'\n- var['long_name'] = 'PM25_SO4'\n- var['var_desc'] = 'Variable PM25_SO4'\n- return var\n-\n- def get_nh4f(self, lay=None):\n- keys = self.keys\n- allvars = array(['ANH4I', 'ANH4J', 'ANH4K'])\n- weights = array([1., 1., .2])\n- var = None\n- if 'PM25_NH4' in keys:\n- if lay is not None:\n- var = self.dset['PM25_NH4'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM25_NH4'][:, :, :, :].squeeze()\n- else:\n- print(' Computing PM25_NH4...')\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_NH4'\n- var['long_name'] = 'PM25_NH4'\n- var['var_desc'] = 'Variable PM25_NH4'\n-\n- return var\n-\n- def get_no3f(self, lay=None):\n- keys = self.keys\n- allvars = array(['ANO3I', 'ANO3J', 'ANO3K'])\n- weights = array([1., 1., .2])\n- var = None\n- if 'PM25_NO3' in keys:\n- if lay is not None:\n- var = self.dset['PM25_NO3'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM25_NO3'][:, :, :, :].squeeze()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_NO3'\n- var['long_name'] = 'PM25_NO3'\n- var['var_desc'] = 'Variable PM25_NO3'\n- return var\n-\n- def get_ec(self, lay=None):\n- keys = self.keys\n- allvars = array(['AECI', 'AECJ'])\n- weights = array([1., 1.])\n- var = None\n- if 'PM25_EC' in keys:\n- if lay is not None:\n- var = self.dset['PM25_EC'][:, lay, :, :].squeeze()\n- else:\n- var = self.dset['PM25_EC'][:, :, :, :].squeeze()\n- else:\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- neww = weights[index]\n- var = self.add_multiple_fields(newkeys, lay=lay, weights=neww)\n- var.name = 'PM25_EC'\n- var['long_name'] = 'PM25_EC'\n- var['var_desc'] = 'Variable PM25_EC'\n-\n- return var\n-\n- def get_var(self, param='O3', lay=None):\n- p = param.upper()\n- print(param)\n- if p == 'PM25':\n- var = self.get_pm25(lay=lay)\n- elif p == 'PM10':\n- var = self.get_pm10(lay=lay)\n- elif p == 'PM25_DUST':\n- var = self.get_dust_pm25(lay=lay)\n- elif p == 'PM10_DUST':\n- var = self.get_dust_total(lay=lay)\n- elif p == 'NOX':\n- var = self.get_nox(lay=lay)\n- elif p == 'NOY':\n- var = self.get_noy(lay=lay)\n- elif p == 'CLF':\n- var = self.get_clf(lay=lay)\n- elif p == 'CAF':\n- var = self.get_caf(lay=lay)\n- elif p == 'NAF':\n- var = self.get_naf(lay=lay)\n- elif p == 'KF':\n- var = self.get_kf(lay=lay)\n- elif (p == 'SO4F') | (p == 'PM2.5_SO4'):\n- var = self.get_so4f(lay=lay)\n- elif p == 'NH4F':\n- var = self.get_nh4f(lay=lay)\n- elif (p == 'NO3F') | (p == 'PM2.5_NO3'):\n- var = self.get_no3f(lay=lay)\n- elif (p == 'PM2.5_EC') | (p == 'ECF'):\n- var = self.get_ec(lay=lay)\n- elif p == 'OC':\n- var = self.get_oc(lay=lay)\n- elif p == 'VOC':\n- var = self.dset['VOC'][:, lay, :, :].squeeze()\n- elif p == 'RH':\n- var = self.get_metcro2d_rh(self, lay=lay)\n- else:\n- if self.check_z(param):\n- if lay is None:\n- var = self.dset[param][:, :, :, :].copy()\n- else:\n- var = self.dset[param][:, lay, :, :].copy().squeeze()\n- else:\n- var = self.dset[param][:, :, :].copy()\n- return var\n-\n- def get_metcro2d_rh(self, lay=None):\n- import atmos\n- data = {'T': self.dset['TEMP2'][:].compute().values, 'rv': self.dset['Q2'][:].compute().values,\n- 'p': self.dset['PRSFC'][:].compute().values}\n- if lay is None:\n- a = atmos.calculate('RH', **data)[:, :, :, :].squeeze()\n- else:\n- atmos.calculate('RH', **data)[:, lay, :, :].squeeze()\n- return a\n-\n- def set_gridcro2d(self, filename):\n- self.grid = xr.open_dataset(filename).rename({'COL': 'x', 'ROW': 'y'}).drop('TFLAG').squeeze()\n- lat = 'LAT'\n- lon = 'LON'\n- self.latitude = self.grid[lat][:][:, :].squeeze().compute()\n- self.longitude = self.grid[lon][:][:, :].squeeze().compute()\n- self.load_conus_basemap(res='l')\n-\n- def load_conus_basemap(self, res='l'):\n- from mpl_toolkits.basemap import Basemap\n- if isinstance(self.map, type(None)):\n- lat1 = self.grid.P_ALP\n- lat2 = self.grid.P_BET\n- lon1 = self.grid.P_GAM\n- lon0 = self.grid.XCENT\n- lat0 = self.grid.YCENT\n- m = Basemap(projection='lcc', resolution=res, lat_1=lat1, lat_2=lat2, lat_0=lat0, lon_0=lon0,\n- lon_1=lon1,\n- llcrnrlat=self.latitude[0, 0], urcrnrlat=self.latitude[-1, -1],\n- llcrnrlon=self.longitude[0, 0],\n- urcrnrlon=self.longitude[-1, -1], rsphere=6371200.,\n- area_thresh=50.)\n- self.map = m\n- else:\n- m = self.map\n- return self.map\n+def can_do(index):\n+ if index.max():\n+ return True\n+ else:\n+ return False\n+\n+\n+def open_files(fname, earth_radius=6370000, convert_to_ppb=True):\n+ \"\"\"Method to open CMAQ IOAPI netcdf files.\n+\n+ Parameters\n+ ----------\n+ fname : string or list\n+ fname is the path to the file or files. It will accept hot keys in\n+ strings as well.\n+ earth_radius : float\n+ The earth radius used for the map projection\n+ convert_to_ppb : boolean\n+ If true the units of the gas species will be converted to ppbV\n+\n+ Returns\n+ -------\n+ xarray.DataSet\n+\n+\n+ \"\"\"\n+\n+ # open the dataset using xarray\n+ dset = xr.open_mfdataset(fname)\n+\n+ # set log pressure as coordinate\n+ if 'PRES' in dset.variables:\n+ dset.coords['logp'] = xr.ufuncs.log(dset.PRES.chunk())\n+ # add lazy diagnostic variables\n+ dset = add_lazy_pm25(dset)\n+ dset = add_lazy_pm10(dset)\n+ dset = add_lazy_pm_course(dset)\n+ dset = add_lazy_clf(dset)\n+ dset = add_lazy_naf(dset)\n+ dset = add_lazy_caf(dset)\n+ dset = add_lazy_noy(dset)\n+ dset = add_lazy_nox(dset)\n+ dset = add_lazy_no3f(dset)\n+ dset = add_lazy_nh4f(dset)\n+ dset = add_lazy_so4f(dset)\n+ dset = add_lazy_rh(dset)\n+\n+ # get the grid information\n+ grid = grid_from_dataset(dset, earth_radius=earth_radius)\n+ area_def = get_ioapi_pyresample_area_def(dset, grid)\n+ # assign attributes for dataset and all DataArrays\n+ dset = dset.assign_attrs({'proj4_srs': grid})\n+ for i in dset.variables:\n+ dset[i] = dset[i].assign_attrs({'proj4_srs': grid})\n+ for j in dset[i].attrs:\n+ dset[i].attrs[j] = dset[i].attrs[j].strip()\n+ dset[i] = dset[i].assign_attrs({'area': area_def})\n+ dset = dset.assign_attrs(area=area_def)\n+\n+ # get the times\n+ dset = _get_times(dset)\n+\n+ # get the lat lon\n+ dset = _get_latlon(dset)\n+\n+ # get Predefined mapping tables for observations\n+ dset = _predefined_mapping_tables(dset)\n+\n+ # rename dimensions\n+ dset = dset.rename({'COL': 'x', 'ROW': 'y', 'LAY': 'z'})\n+\n+ # convert all gas species to ppbv\n+ if convert_to_ppb:\n+ for i in dset.variables:\n+ if 'units' in dset[i].attrs:\n+ if 'ppmV' in dset[i].attrs['units']:\n+ dset[i] *= 1000.\n+ dset[i].attrs['units'] = 'ppbV'\n+\n+ # convert 'micrograms to \\mu g'\n+ for i in dset.variables:\n+ if 'units' in dset[i].attrs:\n+ if 'micrograms' in dset[i].attrs['units']:\n+ dset[i].attrs['units'] = '$\\mu g m^{-3}$'\n+\n+ return dset\n+\n+\n+def _get_times(d):\n+ idims = len(d.TFLAG.dims)\n+ if idims == 2:\n+ tflag1 = Series(d['TFLAG'][:, 0]).astype(str).str.zfill(7)\n+ tflag2 = Series(d['TFLAG'][:, 1]).astype(str).str.zfill(6)\n+ else:\n+ tflag1 = Series(d['TFLAG'][:, 0, 0]).astype(str).str.zfill(7)\n+ tflag2 = Series(d['TFLAG'][:, 0, 1]).astype(str).str.zfill(6)\n+ date = to_datetime(\n+ [i + j for i, j in zip(tflag1, tflag2)], format='%Y%j%H%M%S')\n+ indexdates = Series(date).drop_duplicates(keep='last').index.values\n+ d = d.isel(TSTEP=indexdates)\n+ d['TSTEP'] = date[indexdates]\n+ return d.rename({'TSTEP': 'time'})\n+\n+\n+def _get_latlon(dset):\n+ \"\"\"gets the lat and lons from the pyreample.geometry.AreaDefinition\n+\n+ Parameters\n+ ----------\n+ dset : xarray.Dataset\n+ Description of parameter `dset`.\n+\n+ Returns\n+ -------\n+ xarray.Dataset\n+ Description of returned object.\n+\n+ \"\"\"\n+ lon, lat = dset.area.get_lonlats()\n+ dset['longitude'] = xr.DataArray(lon[::-1, :], dims=['ROW', 'COL'])\n+ dset['latitude'] = xr.DataArray(lat[::-1, :], dims=['ROW', 'COL'])\n+ dset = dset.assign_coords(longitude=dset.longitude, latitude=dset.latitude)\n+ return dset\n+\n+\n+def add_lazy_pm25(d):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ d : type\n+ Description of parameter `d`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(concatenate([aitken, accumulation, coarse]))\n+ weights = Series([\n+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n+ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n+ 1., 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2\n+ ])\n+ if 'PM25_TOT' in keys:\n+ d['PM25'] = d['PM25_TOT'].chunk()\n+\n+ else:\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ newweights = weights.loc[index]\n+ d['PM25'] = add_multiple_lazy(d, newkeys, weights=newweights)\n+ d['PM25'] = d['PM25'].assign_attrs({\n+ 'units': '$\\mu g m^{-3}$',\n+ 'name': 'PM2.5',\n+ 'long_name': 'PM2.5'\n+ })\n+ return d\n+\n+\n+def add_lazy_pm10(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(concatenate([aitken, accumulation, coarse]))\n+ if 'PM_TOT' in keys:\n+ d['PM10'] = d['PM_TOT'].chunk()\n+ else:\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['PM10'] = add_multiple_lazy(d, newkeys)\n+ d['PM10'] = d['PM10'].assign_attrs({\n+ 'units':\n+ '$\\mu g m^{-3}$',\n+ 'name':\n+ 'PM10',\n+ 'long_name':\n+ 'Particulate Matter < 10 microns'\n+ })\n+ return d\n+\n+\n+def add_lazy_pm_course(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(coarse)\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['PM_COURSE'] = add_multiple_lazy(d, newkeys)\n+ d['PM_COURSE'] = d['PM_COURSE'].assign_attrs({\n+ 'units':\n+ '$\\mu g m^{-3}$',\n+ 'name':\n+ 'PM_COURSE',\n+ 'long_name':\n+ 'Course Mode Particulate Matter'\n+ })\n+ return d\n+\n+\n+def add_lazy_clf(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['ACLI', 'ACLJ', 'ACLK'])\n+ weights = Series([1, 1, .2])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ neww = weights.loc[index]\n+ d['CLf'] = add_multiple_lazy(d, newkeys, weights=neww)\n+ d['CLf'] = d['CLf'].assign_attrs({\n+ 'units':\n+ '$\\mu g m^{-3}$',\n+ 'name':\n+ 'CLf',\n+ 'long_name':\n+ 'Fine Mode particulate Cl'\n+ })\n+ return d\n+\n+\n+def add_lazy_caf(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['ACAI', 'ACAJ', 'ASEACAT', 'ASOIL', 'ACORS'])\n+ weights = Series(\n+ [1, 1, .2 * 32. / 1000., .2 * 83.8 / 1000., .2 * 56.2 / 1000.])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ neww = weights.loc[index]\n+ d['CAf'] = add_multiple_lazy(d, newkeys, weights=neww)\n+ d['CAf'] = d['CAf'].assign_attrs({\n+ 'units':\n+ '$\\mu g m^{-3}$',\n+ 'name':\n+ 'CAf',\n+ 'long_name':\n+ 'Fine Mode particulate CA'\n+ })\n+ return d\n+\n+\n+def add_lazy_naf(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['ANAI', 'ANAJ', 'ASEACAT', 'ASOIL', 'ACORS'])\n+ weights = Series(\n+ [1, 1, .2 * 837.3 / 1000., .2 * 62.6 / 1000., .2 * 2.3 / 1000.])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ neww = weights.loc[index]\n+ d['NAf'] = add_multiple_lazy(d, newkeys, weights=neww)\n+ d['NAf'] = d['NAf'].assign_attrs({\n+ 'units': '$\\mu g m^{-3}$',\n+ 'name': 'NAf',\n+ 'long_name': 'NAf'\n+ })\n+ return d\n+\n+\n+def add_lazy_so4f(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['ASO4I', 'ASO4J', 'ASO4K'])\n+ weights = Series([1., 1., .2])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ neww = weights.loc[index]\n+ d['SO4f'] = add_multiple_lazy(d, newkeys, weights=neww)\n+ d['SO4f'] = d['SO4f'].assign_attrs({\n+ 'units': '$\\mu g m^{-3}$',\n+ 'name': 'SO4f',\n+ 'long_name': 'SO4f'\n+ })\n+ return d\n+\n+\n+def add_lazy_nh4f(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['ANH4I', 'ANH4J', 'ANH4K'])\n+ weights = Series([1., 1., .2])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ neww = weights.loc[index]\n+ d['NH4f'] = add_multiple_lazy(d, newkeys, weights=neww)\n+ d['NH4f'] = d['NH4f'].assign_attrs({\n+ 'units': '$\\mu g m^{-3}$',\n+ 'name': 'NH4f',\n+ 'long_name': 'NH4f'\n+ })\n+ return d\n+\n+\n+def add_lazy_no3f(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['ANO3I', 'ANO3J', 'ANO3K'])\n+ weights = Series([1., 1., .2])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ neww = weights.loc[index]\n+ d['NO3f'] = add_multiple_lazy(d, newkeys, weights=neww)\n+ d['NO3f'] = d['NO3f'].assign_attrs({\n+ 'units': '$\\mu g m^{-3}$',\n+ 'name': 'NO3f',\n+ 'long_name': 'NO3f'\n+ })\n+ return d\n+\n+\n+def add_lazy_noy(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(noy_gas)\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['NOy'] = add_multiple_lazy(d, newkeys)\n+ d['NOy'] = d['NOy'].assign_attrs({'name': 'NOy', 'long_name': 'NOy'})\n+ return d\n+\n+\n+def add_lazy_rh(d):\n+ # keys = Series([i for i in d.variables])\n+ # allvars = Series(['TEMP', 'Q', 'PRES'])\n+ # index = allvars.isin(keys)\n+ # if can_do(index):\n+ # import atmos\n+ # data = {\n+ # 'T': dset['TEMP'][:].compute().values,\n+ # 'rv': dset['Q'][:].compute().values,\n+ # 'p': dset['PRES'][:].compute().values\n+ # }\n+ # d['NOx'] = add_multiple_lazy(d, newkeys)\n+ # d['NOx'] = d['NOx'].assign_attrs({'name': 'NOx', 'long_name': 'NOx'})\n+ return d\n+\n+\n+def add_lazy_nox(d):\n+ keys = Series([i for i in d.variables])\n+ allvars = Series(['NO', 'NOX'])\n+ index = allvars.isin(keys)\n+ if can_do(index):\n+ newkeys = allvars.loc[index]\n+ d['NOx'] = add_multiple_lazy(d, newkeys)\n+ d['NOx'] = d['NOx'].assign_attrs({'name': 'NOx', 'long_name': 'NOx'})\n+ return d\n+\n+\n+def add_multiple_lazy(dset, variables, weights=None):\n+ from numpy import ones\n+ if weights is None:\n+ weights = ones(len(variables))\n+ new = dset[variables[0]].copy() * weights[0]\n+ for i, j in zip(variables[1:], weights[1:]):\n+ new = new + dset[i].chunk() * j\n+ return new\n+\n+\n+def _predefined_mapping_tables(dset):\n+ \"\"\"Predefined mapping tables for different observational parings used when\n+ combining data.\n+\n+ Returns\n+ -------\n+ dictionary\n+ A dictionary of to map to.\n+\n+ \"\"\"\n+ to_improve = {}\n+ to_nadp = {}\n+ to_aqs = {\n+ 'OZONE': ['O3'],\n+ 'PM2.5': ['PM25'],\n+ 'CO': ['CO'],\n+ 'NOY': ['NOy'],\n+ 'NOX': ['NOx'],\n+ 'SO2': ['SO2'],\n+ 'NOX': ['NOx'],\n+ 'NO': ['NO'],\n+ 'NO2': ['NO2'],\n+ 'SO4f': ['SO4f'],\n+ 'PM10': ['PM10'],\n+ 'NO3f': ['NO3f'],\n+ 'ECf': ['ECf'],\n+ 'OCf': ['OCf'],\n+ 'ETHANE': ['ETHA'],\n+ 'BENZENE': ['BENZENE'],\n+ 'TOLUENE': ['TOL'],\n+ 'ISOPRENE': ['ISOP'],\n+ 'O-XYLENE': ['XYL'],\n+ 'WS': ['WSPD10'],\n+ 'TEMP': ['TEMP2'],\n+ 'WD': ['WDIR10'],\n+ 'NAf': ['NAf'],\n+ 'MGf': ['AMGJ'],\n+ 'TIf': ['ATIJ'],\n+ 'SIf': ['ASIJ'],\n+ 'Kf': ['Kf'],\n+ 'CAf': ['CAf'],\n+ 'NH4f': ['NH4f'],\n+ 'FEf': ['AFEJ'],\n+ 'ALf': ['AALJ'],\n+ 'MNf': ['AMNJ']\n+ }\n+ to_airnow = {\n+ 'OZONE': ['O3'],\n+ 'PM2.5': ['PM25'],\n+ 'CO': ['CO'],\n+ 'NOY': ['NOy'],\n+ 'NOX': ['NOx'],\n+ 'SO2': ['SO2'],\n+ 'NOX': ['NOx'],\n+ 'NO': ['NO'],\n+ 'NO2': ['NO2'],\n+ 'SO4f': ['SO4f'],\n+ 'PM10': ['PM10'],\n+ 'NO3f': ['NO3f'],\n+ 'ECf': ['ECf'],\n+ 'OCf': ['OCf'],\n+ 'ETHANE': ['ETHA'],\n+ 'BENZENE': ['BENZENE'],\n+ 'TOLUENE': ['TOL'],\n+ 'ISOPRENE': ['ISOP'],\n+ 'O-XYLENE': ['XYL'],\n+ 'WS': ['WSPD10'],\n+ 'TEMP': ['TEMP2'],\n+ 'WD': ['WDIR10'],\n+ 'NAf': ['NAf'],\n+ 'MGf': ['AMGJ'],\n+ 'TIf': ['ATIJ'],\n+ 'SIf': ['ASIJ'],\n+ 'Kf': ['Kf'],\n+ 'CAf': ['CAf'],\n+ 'NH4f': ['NH4f'],\n+ 'FEf': ['AFEJ'],\n+ 'ALf': ['AALJ'],\n+ 'MNf': ['AMNJ']\n+ }\n+ to_crn = {\n+ 'SUR_TEMP': ['TEMPG'],\n+ 'T_HR_AVG': ['TEMP2'],\n+ 'SOLARAD': ['RGRND'],\n+ 'SOIL_MOISTURE_5': ['SOIM1'],\n+ 'SOIL_MOISTURE_10': ['SOIM2']\n+ }\n+ to_aeronet = {}\n+ to_cems = {}\n+ mapping_tables = {\n+ 'improve': to_improve,\n+ 'aqs': to_aqs,\n+ 'airnow': to_airnow,\n+ 'crn': to_crn,\n+ 'cems': to_cems,\n+ 'nadp': to_nadp,\n+ 'aeronet': to_aeronet\n+ }\n+ dset = dset.assign_attrs({'mapping_tables': mapping_tables})\n+ return dset\n+\n+\n+# Arrays for different gasses and pm groupings\n+accumulation = array([\n+ 'AALJ', 'AALK1J', 'AALK2J', 'ABNZ1J', 'ABNZ2J', 'ABNZ3J', 'ACAJ', 'ACLJ',\n+ 'AECJ', 'AFEJ', 'AISO1J', 'AISO2J', 'AISO3J', 'AKJ', 'AMGJ', 'AMNJ',\n+ 'ANAJ', 'ANH4J', 'ANO3J', 'AOLGAJ', 'AOLGBJ', 'AORGCJ', 'AOTHRJ', 'APAH1J',\n+ 'APAH2J', 'APAH3J', 'APNCOMJ', 'APOCJ', 'ASIJ', 'ASO4J', 'ASQTJ', 'ATIJ',\n+ 'ATOL1J', 'ATOL2J', 'ATOL3J', 'ATRP1J', 'ATRP2J', 'AXYL1J', 'AXYL2J',\n+ 'AXYL3J', 'AORGAJ', 'AORGPAJ', 'AORGBJ'\n+])\n+aitken = array([\n+ 'ACLI', 'AECI', 'ANAI', 'ANH4I', 'ANO3I', 'AOTHRI', 'APNCOMI', 'APOCI',\n+ 'ASO4I', 'AORGAI', 'AORGPAI', 'AORGBI'\n+])\n+coarse = array(\n+ ['ACLK', 'ACORS', 'ANH4K', 'ANO3K', 'ASEACAT', 'ASO4K', 'ASOIL'])\n+noy_gas = array([\n+ 'NO', 'NO2', 'NO3', 'N2O5', 'HONO', 'HNO3', 'PAN', 'PANX', 'PNA', 'NTR',\n+ 'CRON', 'CRN2', 'CRNO', 'CRPX', 'OPAN'\n+])\n+pec = array(['AECI', 'AECJ'])\n+pso4 = array(['ASO4I', 'ASO4J'])\n+pno3 = array(['ANO3I', 'ANO3J'])\n+pnh4 = array(['ANH4I', 'ANH4J'])\n+pcl = array(['ACLI', 'ACLJ'])\n+poc = array([\n+ 'AOTHRI', 'APNCOMI', 'APOCI', 'AORGAI', 'AORGPAI', 'AORGBI', 'ATOL1J',\n+ 'ATOL2J', 'ATOL3J', 'ATRP1J', 'ATRP2J', 'AXYL1J', 'AXYL2J', 'AXYL3J',\n+ 'AORGAJ', 'AORGPAJ', 'AORGBJ', 'AOLGAJ', 'AOLGBJ', 'AORGCJ', 'AOTHRJ',\n+ 'APAH1J', 'APAH2J', 'APAH3J', 'APNCOMJ', 'APOCJ', 'ASQTJ', 'AISO1J',\n+ 'AISO2J', 'AISO3J', 'AALK1J', 'AALK2J', 'ABNZ1J', 'ABNZ2J', 'ABNZ3J',\n+ 'AORGAI', 'AORGAJ', 'AORGPAI', 'AORGPAJ', 'AORGBI', 'AORGBJ'\n+])\n+minerals = array(\n+ ['AALJ', 'ACAJ', 'AFEJ', 'AKJ', 'AMGJ', 'AMNJ', 'ANAJ', 'ATIJ', 'ASIJ'])\ndiff --git a/monet/models/combinetool.py b/monet/models/combinetool.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/models/combinetool.py\n@@ -0,0 +1,224 @@\n+from __future__ import absolute_import, print_function\n+\n+from pandas import Series\n+\n+# from ..util import interp_util as interpo\n+# from ..obs import epa_util\n+\n+\n+def combine_da_to_df(da, df, col=None, radius_of_influence=12e3, merge=True):\n+ \"\"\"This function will combine an xarray data array with spatial information\n+ point observations in `df`.\n+\n+ Parameters\n+ ----------\n+ da : xr.DataArray\n+ Description of parameter `da`.\n+ df : pd.DataFrame\n+ Description of parameter `df`.\n+ lay : iterable, default = [0]\n+ Description of parameter `lay`.\n+ radius : integer or float, default = 12e3\n+ Description of parameter `radius`.\n+\n+ Returns\n+ -------\n+ pandas.DataFrame\n+ \"\"\"\n+ from ..util.interp_util import lonlat_to_swathdefinition\n+ from ..util.resample import resample_dataset\n+ try:\n+ if col is None:\n+ raise RuntimeError\n+ except RuntimeError:\n+ print('Must enter column name')\n+ dfn = df.dropna(subset=[col])\n+ dfnn = dfn.drop_duplicates(subset=['latitude', 'longitude'])\n+ # unit = dfnn[col + '_unit'].unique()[0]\n+ target_grid = lonlat_to_swathdefinition(\n+ longitude=dfnn.longitude.values, latitude=dfnn.latitude.values)\n+ da_interped = resample_dataset(\n+ da.compute(), target_grid, radius_of_influence=radius_of_influence)\n+ # add model if da.name is the same as column\n+ df_interped = da_interped.to_dataframe().reset_index()\n+ cols = Series(df_interped.columns)\n+ drop_cols = cols.loc[cols.isin(['x', 'y', 'z'])]\n+ df_interped.drop(drop_cols, axis=1, inplace=True)\n+ if da.name in df.columns:\n+ df_interped.rename(columns={da.name: da.name + '_new'}, inplace=True)\n+ print(df_interped.keys())\n+ final_df = df.merge(\n+ df_interped, on=['latitude', 'longitude', 'time'], how='left')\n+ return final_df\n+\n+\n+def combine_da_to_height_profile(da, dset, radius_of_influence=12e3):\n+ \"\"\"This function will combine an xarray.DataArray to a 2d dataset with\n+ dimensions (time,z)\n+\n+ Parameters\n+ ----------\n+ da : xarray.DataArray\n+ Description of parameter `da`.\n+ dset : xarray.Dataset\n+ Description of parameter `dset`.\n+\n+ Returns\n+ -------\n+ xarray.Dataset\n+ returns the xarray.Dataset with the `da` added as an additional\n+ variable.\n+\n+ \"\"\"\n+ # from ..util.interp_util import nearest_point_swathdefinition\n+ lon, lat = dset.longitude, dset.latitude\n+ # target_grid = nearest_point_swathdefinition(longitude=lon, latitude=lat)\n+ da_interped = da.monet.nearest_latlon(\n+ lon=lon, lat=lat, radius_of_influence=radius_of_influence)\n+\n+ # FIXME: interp to height here\n+\n+ dset[da.name] = da_interped\n+\n+ return dset\n+\n+\n+#\n+# def combine_to_df(model=None,\n+# obs=None,\n+# mapping_table=None,\n+# lay=None,\n+# radius=None):\n+# # first get mapping table for obs to model\n+# if radius is None:\n+# try:\n+# radius = model.dset.XCELL\n+# except AttributeError:\n+# radius = 40e3\n+# if mapping_table is None:\n+# mapping_table = get_mapping_table(model, obs)\n+# # get the data inside of the obs dataset (check for tolnet)\n+# if obs.objtype is not 'TOLNET' and obs.objtype is not 'AERONET':\n+# obslist = Series(obs.df.variable.unique())\n+# # find all variables to map\n+# comparelist = obslist.loc[obslist.isin(mapping_table.keys())]\n+# dfs = []\n+# for i in comparelist:\n+# print('Pairing: ' + i)\n+# obsdf = obs.df.groupby('variable').get_group(\n+# i) # get observations locations\n+# obsunit = obsdf.units.unique()[0] # get observation unit\n+# # get observation lat and lons\n+# dfn = obsdf.drop_duplicates(subset=['latitude', 'longitude'])\n+# factor = check_units(model, obsunit, variable=mapping_table[i][0])\n+# try:\n+# if lay is None and Series([model.objtype]).isin(\n+# ['CAMX', 'CMAQ']).max():\n+# modelvar = get_model_fields(\n+# model, mapping_table[i], lay=0).compute() * factor\n+# else:\n+# modelvar = get_model_fields(\n+# model, mapping_table[i], lay=lay).compute() * factor\n+# mvar_interped = interpo.interp_latlon(\n+# modelvar,\n+# dfn.latitude.values,\n+# dfn.longitude.values,\n+# radius=radius)\n+# combined_df = merge_obs_and_model(\n+# mvar_interped,\n+# obsdf,\n+# dfn,\n+# model_time=modelvar.time.to_index(),\n+# daily=obs.daily,\n+# obstype=obs.objtype)\n+# dfs.append(combined_df)\n+# except KeyError:\n+# print(i + ' not in dataset and will not be paired')\n+# df = concat(dfs)\n+#\n+# return df\n+#\n+#\n+# def merge_obs_and_model(model,\n+# obs,\n+# dfn,\n+# model_time=None,\n+# daily=False,\n+# obstype=None):\n+# import pandas as pd\n+# e = pd.DataFrame(model, index=dfn.siteid, columns=model_time)\n+# w = e.stack(dropna=False).reset_index().rename(columns={\n+# 'level_1': 'time',\n+# 0: 'model'\n+# })\n+# if daily and pd.Series(['AirNow', 'AQS', 'IMPROVE']).isin([obstype]).max():\n+# w = w.merge(\n+# dfn[['siteid', 'variable', 'gmt_offset', 'pollutant_standard']],\n+# on='siteid',\n+# how='left')\n+# w = epa_util.regulatory_resample(w)\n+# w = w.merge(\n+# obs.drop(['time', 'gmt_offset', 'variable'], axis=1),\n+# on=['siteid', 'time_local', 'pollutant_standard'],\n+# how='left')\n+# elif daily:\n+# w.index = w.time\n+# w = w.resample('D').mean().reset_index().rename(\n+# columns={'level_1': 'time'})\n+# w = w.merge(obs, on=['siteid', 'time'], how='left')\n+# else:\n+# w = w.merge(\n+# obs, on=['siteid', 'time'],\n+# how='left') # assume outputs are hourly\n+# return w\n+#\n+#\n+# def get_model_fields(model, findkeys, lay=None, weights=None):\n+# from numpy import ones\n+# keys = model.dset.keys()\n+# print(findkeys)\n+# newkeys = Series(findkeys).loc[Series(findkeys).isin(keys)]\n+# if len(newkeys) > 1:\n+# mvar = model.select_layer(model.dset[newkeys[0]], lay=lay)\n+# for i in newkeys:\n+# mvar = mvar + model.select_layer(model.dset[newkeys[0]], lay=lay)\n+# else:\n+# mvar = model.get_var(findkeys[0], lay=lay)\n+# return mvar\n+#\n+#\n+# def check_units(model, obsunit, variable=None):\n+# \"\"\"Short summary.\n+#\n+# Parameters\n+# ----------\n+# df : type\n+# Description of parameter `df`.\n+# param : type\n+# Description of parameter `param` (the default is 'O3').\n+# aqs_param : type\n+# Description of parameter `aqs_param` (the default is 'OZONE').\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# \"\"\"\n+# if obsunit == 'UG/M3':\n+# fac = 1.\n+# elif obsunit == 'PPB':\n+# fac = 1000.\n+# elif obsunit == 'ppbC':\n+# fac = 1000.\n+# if variable == 'ISOPRENE':\n+# fac *= 5.\n+# elif variable == 'BENZENE':\n+# fac *= 6.\n+# elif variable == 'TOLUENE':\n+# fac *= 7.\n+# elif variable == 'O-XYLENE':\n+# fac *= 8.\n+# else:\n+# fac = 1.\n+# return fac\ndiff --git a/monet/models/hysplit.py b/monet/models/hysplit.py\n--- a/monet/models/hysplit.py\n+++ b/monet/models/hysplit.py\n@@ -1,165 +1,529 @@\n-from __future__ import division, print_function\n-\n-from builtins import object, zip\n-\n+\"\"\" HYPSLIT MODEL READER \"\"\"\n+from ..grids import _hysplit_latlon_grid_from_dataset\n+from ..grids import get_hysplit_latlon_pyreample_area_def\n import pandas as pd\n import xarray as xr\n-from dask.diagnostics import ProgressBar\n-from numpy import array\n-from past.utils import old_div\n \n-# This file is to deal with CAMx code - try to make it general for CAMx 4.7.1 --> 5.1\n \n+def open_files(fname):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ fname : type\n+ Description of parameter `fname`.\n+ earth_radius : type\n+ Description of parameter `earth_radius`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ CHANGES for PYTHON 3\n+ For python 3 the numpy char4 are read in as a numpy.bytes_ class and need to\n+ be converted to a python\n+ string by using decode('UTF-8').\n+\n+\n+ \"\"\"\n+\n+ # open the dataset using xarray\n+ binfile = ModelBin(fname, verbose=False, readwrite='r')\n+ dset = binfile.dset\n+\n+ # get the grid information\n+ p4 = _hysplit_latlon_grid_from_dataset(dset)\n+ swath = get_hysplit_latlon_pyreample_area_def(dset, p4)\n+\n+ # now assign this to the dataset and each dataarray\n+ dset = dset.assign_attrs({'proj4_srs': p4})\n+ for i in dset.variables:\n+ dset[i] = dset[i].assign_attrs({'proj4_srs': p4})\n+ for j in dset[i].attrs:\n+ dset[i].attrs[j] = dset[i].attrs[j].strip()\n+ dset[i] = dset[i].assign_attrs({'area': swath})\n+ dset = dset.assign_attrs(area=swath)\n+\n+ # return the dataset\n+ return dset\n+\n+\n+# class HYSPLIT(BaseModel):\n+# def __init__(self):\n+# BaseModel.__init__(self)\n+# self.dset = None\n+#\n+# # use open_files method from the BaseModel class.\n+#\n+# def add_files(self, cdump, verbose=True):\n+# # TO DO dset needs to be made into a regular array.\n+# binfile = ModelBin(cdump, verbose=False, readwrite='r')\n+# dset = binfile.dset\n+# if self.dset is None:\n+# self.dset = dset\n+# else:\n+# #self.dset = xr.merge([self.dset, dset])\n+# self.dset.combine_first(dset)\n+# print(self.dset)\n+#\n+# @staticmethod\n+# def select_layer(variable, layer=None):\n+# if lay is not None:\n+# try:\n+# var = variable.sel(levels=layer)\n+# except ValueError:\n+# print(\n+# 'Dimension \\'levels\\' not in Dataset. Returning Dataset\n+# anyway'\n+# )\n+# var = variable\n+# else:\n+# var = variable\n+# return var\n+#\n+# def get_var(self, param, layer=None):\n+# return self.select_layer(self.dset[param], layer=layer)\n+\n+\n+class ModelBin(object):\n+ \"\"\"represents a binary cdump (concentration) output file from HYSPLIT\n+ methods:\n+ readfile - opens and reads contents of cdump file into an xarray\n+ self.dset\n+\n+ \"\"\"\n+\n+ def __init__(self,\n+ filename,\n+ drange=None,\n+ century=None,\n+ verbose=False,\n+ readwrite='r',\n+ fillra=False):\n+ \"\"\"\n+ drange should be a list of two datetime objects.\n+ The read method will store data from the cdump file for which the\n+ sample start is greater thand drange[0] and less than drange[1]\n+ for which the sample stop is less than drange[1].\n+\n+ \"\"\"\n+ self.drange = drange\n+ self.filename = filename\n+ self.century = century\n+ self.verbose = verbose\n+ # list of tuples (date1, date2) of averaging periods with zero\n+ # concentrations\n+ self.zeroconcdates = []\n+ # list of tuples of averaging periods with nonzero concentrtations]\n+ self.nonzeroconcdates = []\n+ if readwrite == 'r':\n+ self.dataflag = self.readfile(\n+ filename, drange, verbose, century, fillra=fillra)\n+\n+ @staticmethod\n+ def define_struct():\n+ \"\"\"Each record in the fortran binary begins and ends with 4 bytes which\n+ specify the length of the record. These bytes are called pad below.\n+ They are not used here, but are thrown out. The following block defines\n+ a numpy dtype object for each record in the binary file. \"\"\"\n+ from numpy import dtype\n+ real4 = '>f'\n+ int4 = '>i'\n+ int2 = '>i2'\n+ char4 = '>a4'\n+\n+ rec1 = dtype([\n+ ('pad1', int4),\n+ ('model_id', char4), # meteorological model id\n+ ('met_year', int4), # meteorological model starting time\n+ ('met_month', int4),\n+ ('met_day', int4),\n+ ('met_hr', int4),\n+ ('met_fhr', int4), # forecast hour\n+ ('start_loc', int4), # number of starting locations\n+ ('conc_pack', int4), # concentration packing flag (0=no, 1=yes)\n+ ('pad2', int4),\n+ ])\n+\n+ # start_loc in rec1 tell how many rec there are.\n+ rec2 = dtype([\n+ ('pad1', int4),\n+ ('r_year', int4), # release starting time\n+ ('r_month', int4),\n+ ('r_day', int4),\n+ ('r_hr', int4),\n+ ('s_lat', real4), # Release location\n+ ('s_lon', real4),\n+ ('s_ht', real4),\n+ ('r_min', int4), # release startime time (minutes)\n+ ('pad2', int4),\n+ ])\n+\n+ rec3 = dtype([\n+ ('pad1', int4),\n+ ('nlat', int4),\n+ ('nlon', int4),\n+ ('dlat', real4),\n+ ('dlon', real4),\n+ ('llcrnr_lat', real4),\n+ ('llcrnr_lon', real4),\n+ ('pad2', int4),\n+ ])\n+\n+ rec4a = dtype([\n+ ('pad1', int4),\n+ ('nlev', int4), # number of vertical levels in concentration grid\n+ ])\n+\n+ rec4b = dtype([\n+ ('levht', int4), # height of each level (meters above ground)\n+ ])\n+\n+ rec5a = dtype([\n+ ('pad1', int4),\n+ ('pad2', int4),\n+ ('pollnum', int4), # number of different pollutants\n+ ])\n+\n+ rec5b = dtype([\n+ ('pname', char4), # identification string for each pollutant\n+ ])\n+\n+ rec5c = dtype([\n+ ('pad2', int4),\n+ ])\n+\n+ rec6 = dtype([\n+ ('pad1', int4),\n+ ('oyear', int4), # sample start time.\n+ ('omonth', int4),\n+ ('oday', int4),\n+ ('ohr', int4),\n+ ('omin', int4),\n+ ('oforecast', int4),\n+ ('pad3', int4),\n+ ])\n+\n+ # rec7 has same form as rec6. #sample stop time.\n+\n+ # record 8 is pollutant type identification string, output level.\n \n-ProgressBar().register()\n+ rec8a = dtype([\n+ ('pad1', int4),\n+ ('poll', char4), # pollutant identification string\n+ ('lev', int4),\n+ ('ne', int4), # number of elements\n+ ])\n \n+ rec8b = dtype([\n+ ('indx', int2), # longitude index\n+ ('jndx', int2), # latitude index\n+ ('conc', real4),\n+ ])\n \n-class HYSPLIT(object):\n- def __init__(self):\n- self.objtype = 'HYSPLIT'\n+ rec8c = dtype([\n+ ('pad2', int4),\n+ ])\n+ recs = (rec1, rec2, rec3, rec4a, rec4b, rec5a, rec5b, rec5c, rec6,\n+ rec8a, rec8b, rec8c)\n+ return recs\n+\n+ def readfile(self, filename, drange, verbose, century, fillra=True):\n+ \"\"\"Data from the file is stored in an xarray, self.dset\n+ returns False if all concentrations are zero else returns True.\n+ INPUTS\n+ filename - name of cdump file to open\n+ drange - [date1, date2] - range of dates to load data for. if []\n+ then loads all data.\n+ date1 and date2 should be datetime ojbects.\n+ verbose - turns on print statements\n+ century - if None will try to guess the century by looking\n+ at the last two digits of the year.\n+ For python 3 the numpy char4 are read in as a numpy.bytes_\n+ class and need to be converted to a python\n+ string by using decode('UTF-8').\n+ fillra : if True will return complete concentration grid array\n+ with zero cocenctrations filled in\n+\n+ \"\"\"\n+ from numpy import fromfile, arange\n+ import datetime\n+ # 8/16/2016 moved species=[] to before while loop. Added print\n+ # statements when verbose.\n self.dset = None\n- self.fname = None\n- self.dates = None\n- self.keys = None\n- self.indexdates = None\n- self.latitude = None\n- self.longitude = None\n- self.map = None\n-\n- def get_dates(self):\n- print('Reading CAMx dates...')\n- print(self.dset)\n- tflag1 = array(self.dset['TFLAG'][:, 0], dtype='|S7')\n- tflag2 = array(old_div(self.dset['TFLAG'][:, 1], 10000), dtype='|S6')\n- date = pd.to_datetime([i + j.zfill(2) for i, j in zip(tflag1, tflag2)], format='%Y%j%H')\n- indexdates = pd.Series(date).drop_duplicates(keep='last').index.values\n- self.dset = self.dset.isel(time=indexdates)\n- self.dset['time'] = date[indexdates]\n-\n- def open_camx(self, file):\n- from glob import glob\n- from numpy import sort\n- dropset = ['layer', 'longitude_bounds', 'latitude_bounds',\n- 'x', 'y', 'level', 'lambert_conformal_conic']\n- nameset = {'COL': 'x', 'ROW': 'y', 'TSTEP': 'time', 'LAY': 'z'}\n- if type(file) == str:\n- fname = sort(array(glob(file)))\n- else:\n- fname = sort(array(file))\n- if fname.shape[0] >= 1:\n- if self.dset is None:\n- self.dset = xr.open_mfdataset(\n- fname.tolist(), concat_dim='TSTEP', engine='pnc').drop(dropset).rename(nameset).squeeze()\n- self.load_conus_basemap(res='l')\n- self.get_dates()\n- else:\n- dset = xr.open_mfdataset(fname.tolist(), concat_dim='TSTEP',\n- engine='pnc').drop(dropset).rename(nameset).squeeze()\n- self.dset = xr.merge([self.dset, dset])\n- else:\n- print('Files not found')\n- self.keys = list(self.dset.keys())\n-\n- def check_z(self, varname):\n- if pd.Series(self.dset[varname].dims).isin('z').max():\n- return True\n- else:\n- return False\n+ atthash = {\n+ } # dictionary which will be turned into the dset attributes.\n+ ahash = {}\n+ fp = open(filename, 'rb')\n \n- def get_nox(self, lay=None):\n- if self.check_z('NO'):\n- if lay is not None:\n- var = self.dset['NO'][:, 0, :, :].squeeze().copy()\n- var += self.dset['NO2'][:, 0, :, :].squeeze().copy()\n- else:\n- var = self.dset['NO'][:, :, :, :].copy()\n- var += self.dset['NO2'][:, :, :, :].copy()\n- else:\n- var = self.dset['NO'][:, :, :].copy()\n- var += self.dset['NO2'][:, :, :].copy()\n- return var\n-\n- def get_pm25(self, lay=None):\n- keys = list(self.dset.keys())\n- allvars = self.fine\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- if self.check_z(newkeys[0]):\n- if lay is not None:\n- var = self.dset[newkeys[0]][:, 0, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, 0, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :].copy()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :].squeeze()\n- return var\n-\n- def get_pm10(self, lay=None):\n- keys = list(self.dset.keys())\n- allvars = self.coarse\n- index = pd.Series(allvars).isin(keys)\n- newkeys = allvars[index]\n- if self.check_z(newkeys[0]):\n- if lay is not None:\n- var = self.dset[newkeys[0]][:, 0, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, 0, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :, :].squeeze()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :, :].squeeze()\n- else:\n- var = self.dset[newkeys[0]][:, :, :].copy()\n- for i in newkeys[1:]:\n- var += self.dset[i][:, :, :].squeeze()\n- return var\n-\n- def get_var(self, param='O3', lay=None):\n- p = param.upper()\n- print(param)\n- if p == 'PM25':\n- var = self.get_pm25(lay=lay)\n- elif p == 'PM10':\n- var = self.get_pm10(lay=lay)\n- elif p == 'NOX':\n- var = self.get_nox(lay=lay)\n- elif p == 'OC':\n- var = self.get_oc(lay=lay)\n- elif p == 'VOC':\n- if lay is not None:\n- var = self.dset['VOC'][:, 0, :, :].copy().squeeze()\n- else:\n- var = self.dset['VOC'][:, :, :, :].copy().squeeze()\n- else:\n- if self.check_z(param):\n- if lay is None:\n- var = self.dset[param][:, :, :, :].copy()\n+ # each record in the fortran binary begins and ends with 4 bytes which\n+ # specify the length of the record.\n+ # These bytes are called pad1 and pad2 below. They are not used here,\n+ # but are thrown out.\n+ # The following block defines a numpy dtype object for each record in\n+ # the binary file.\n+ recs = self.define_struct()\n+ rec1, rec2, rec3, rec4a = recs[0], recs[1], recs[2], recs[3]\n+ rec4b, rec5a, rec5b, rec5c = recs[4], recs[5], recs[6], recs[7]\n+ rec6, rec8a, rec8b, rec8c = recs[8], recs[9], recs[10], recs[11]\n+\n+ # rec7 = rec6\n+ # start_loc in rec1 tell how many rec there are.\n+ tempzeroconcdates = []\n+ # Reads header data. This consists of records 1-5.\n+ hdata1 = fromfile(fp, dtype=rec1, count=1)\n+ nstartloc = hdata1['start_loc'][0]\n+ # in python 3 np.fromfile reads the record into a list even if it is\n+ # just one number.\n+ # so if the length of this record is greater than one something is\n+ # wrong.\n+ if len(hdata1['start_loc']) != 1:\n+ print(\n+ 'WARNING in ModelBin _readfile - number of starting locations '\n+ 'incorrect')\n+ print(hdata1['start_loc'])\n+ hdata2 = fromfile(fp, dtype=rec2, count=nstartloc)\n+ hdata3 = fromfile(fp, dtype=rec3, count=1)\n+ atthash['Number Start Locations'] = nstartloc\n+\n+ # Description of concentration grid\n+ ahash['Number Lat Points'] = hdata3['nlat'][0]\n+ ahash['Number Lon Points'] = hdata3['nlon'][0]\n+ ahash['Latitude Spacing'] = hdata3['dlat'][0]\n+ ahash['Longitude Spacing'] = hdata3['dlon'][0]\n+ ahash['llcrnr longitude'] = hdata3['llcrnr_lon'][0]\n+ ahash['llrcrnr latitude'] = hdata3['llcrnr_lat'][0]\n+\n+ self.llcrnr_lon = hdata3['llcrnr_lon'][0]\n+ self.llcrnr_lat = hdata3['llcrnr_lat'][0]\n+ self.nlat = hdata3['nlat'][0]\n+ self.nlon = hdata3['nlon'][0]\n+ self.dlat = hdata3['dlat'][0]\n+ self.dlon = hdata3['dlon'][0]\n+\n+ atthash['Meteorological Model ID'] = hdata1['model_id'][0].decode(\n+ 'UTF-8')\n+ self.sourcedate = []\n+ self.slat = []\n+ self.slon = []\n+ self.sht = []\n+ atthash['Starting Locations'] = []\n+ atthash['Source Date'] = []\n+ # Loop through starting locations\n+\n+ for n in range(0, nstartloc):\n+ # create list of starting latitudes, longitudes and heights.\n+ self.slat.append(hdata2['s_lat'][n])\n+ self.slon.append(hdata2['s_lon'][n])\n+ self.sht.append(hdata2['s_ht'][n])\n+ atthash['Starting Locations'].append((hdata2['s_lat'][n],\n+ hdata2['s_lon'][n]))\n+\n+ # try to guess century if century not given\n+ if century is None:\n+ if hdata2['r_year'][0] < 50:\n+ century = 2000\n else:\n- var = self.dset[param][:, lay, :, :].copy().squeeze()\n+ century = 1900\n+ print(\n+ 'WARNING: Guessing Century for HYSPLIT concentration file',\n+ century)\n+ # add sourcedate which is datetime.datetime object\n+ sourcedate = (datetime.datetime(\n+ century + hdata2['r_year'][n], hdata2['r_month'][n],\n+ hdata2['r_day'][n], hdata2['r_hr'][n], hdata2['r_min'][n]))\n+ self.sourcedate.append(sourcedate)\n+ atthash['Source Date'].append(sourcedate)\n+\n+ # read record 4 which gives information about vertical levels.\n+ hdata4a = fromfile(fp, dtype=rec4a, count=1) # gets nmber of levels\n+ hdata4b = fromfile(\n+ fp, dtype=rec4b, count=hdata4a['nlev'][\n+ 0]) # reads levels, count is number of levels.\n+ self.levels = hdata4b['levht']\n+ atthash['Number of Levels'] = hdata4a['nlev'][0]\n+ atthash['Level top heights (m)'] = hdata4b['levht']\n+\n+ # read record 5 which gives information about pollutants / species.\n+ hdata5a = fromfile(fp, dtype=rec5a, count=1)\n+ fromfile(fp, dtype=rec5b, count=hdata5a['pollnum'][0])\n+ fromfile(fp, dtype=rec5c, count=1)\n+ # hdata5b = fromfile(fp, dtype=rec5b, count=hdata5a['pollnum'][0])\n+ # hdata5c = fromfile(fp, dtype=rec5c, count=1)\n+ atthash['Number of Species'] = hdata5a['pollnum'][0]\n+\n+ # Loop to reads records 6-8. Number of loops is equal to number of\n+ # output times.\n+ # Only save data for output times within drange. if drange=[] then\n+ # save all.\n+ # Loop to go through each sampling time\n+ ii = 0 # check to make sure don't go above max number of iterations\n+ iii = 0 # checks to see if some nonzero data was saved in xarray\n+ # Safety valve - will not allow more than 1000 loops to be executed.\n+ imax = 1e3\n+ testf = True\n+ while testf:\n+ hdata6 = fromfile(fp, dtype=rec6, count=1)\n+ hdata7 = fromfile(fp, dtype=rec6, count=1)\n+ if len(hdata6\n+ ) == 0: # if no data read then break out of the while loop.\n+ break\n+ if verbose:\n+ print('REC 6 & 7 ***************')\n+ print(hdata6)\n+ print(hdata7)\n+ # pdate1 is the sample start\n+ # pdate2 is the sample stop\n+ pdate1 = datetime.datetime(century + hdata6['oyear'],\n+ hdata6['omonth'], hdata6['oday'],\n+ hdata6['ohr'])\n+ pdate2 = datetime.datetime(century + hdata7['oyear'],\n+ hdata7['omonth'], hdata7['oday'],\n+ hdata7['ohr'])\n+ atthash['Sampling Time'] = pdate2 - pdate1\n+ savedata = True\n+\n+ # if pdate1 is within drange then save the data.\n+ # AND if pdate2 is within drange then save the data.\n+ # if drange[0] > pdate1 then stop looping to look for more data\n+ # this block sets savedata to true if data within specified time\n+ # range or time range not specified\n+ if drange is None:\n+ savedata = True\n+ elif pdate1 >= drange[0] and pdate1 <= drange[1] and pdate2 <= drange[1]:\n+ savedata = True\n+ elif pdate1 > drange[1] or pdate2 > drange[1]:\n+ testf = False\n+ savedata = False\n else:\n- var = self.dset[param]\n- return var\n-\n- def load_conus_basemap(self, res='l'):\n- from mpl_toolkits.basemap import Basemap\n- if self.map is None:\n- lat1 = self.dset.P_ALP\n- lat2 = self.dset.P_BET\n- lon1 = self.dset.P_GAM\n- lon0 = self.dset.XCENT\n- lat0 = self.dset.YCENT\n- m = Basemap(projection='lcc', resolution=res, lat_1=lat1, lat_2=lat2, lat_0=lat0, lon_0=lon0,\n- lon_1=lon1,\n- llcrnrlat=self.dset.latitude[0, 0], urcrnrlat=self.dset.latitude[-1, -1],\n- llcrnrlon=self.dset.longitude[0, 0],\n- urcrnrlon=self.dset.longitude[-1, -1], rsphere=6371200.,\n- area_thresh=50.)\n- self.map = m\n- else:\n- m = self.map\n- return self.map\n+ savedata = False\n+ # END block\n+\n+ if verbose:\n+ print(savedata, 'DATES :', pdate1, pdate2)\n+\n+ # datelist = []\n+ atthash['Species ID'] = []\n+ inc_iii = False\n+ # LOOP to go through each level\n+ for lev in range(hdata4a['nlev'][0]):\n+ # LOOP to go through each pollutant\n+ for pollutant in range(hdata5a['pollnum'][0]):\n+\n+ # record 8a has the number of elements (ne). If number of\n+ # elements greater than 0 than there are concentrations.\n+ hdata8a = fromfile(fp, dtype=rec8a, count=1)\n+ atthash['Species ID'].append(\n+ hdata8a['poll'][0].decode('UTF-8'))\n+ # if number of elements is nonzero then\n+ if hdata8a['ne'] >= 1:\n+ hdata8b = fromfile(\n+ fp, dtype=rec8b,\n+ count=hdata8a['ne'][0]) # get rec8 - indx and jndx\n+ self.nonzeroconcdates.append(\n+ pdate1\n+ ) # add sample start time to list of start times with\n+ # non zero conc\n+ else:\n+ tempzeroconcdates.append(\n+ pdate1\n+ ) # or add sample start time to list of start times\n+ # with zero conc.\n+ # This is just padding.\n+ fromfile(fp, dtype=rec8c, count=1)\n+ # if savedata is set and nonzero concentrations then save\n+ # the data in a pandas dataframe\n+ if savedata and hdata8a['ne'] >= 1:\n+ self.nonzeroconcdates.append(pdate1)\n+ # set to True to indicate that there is data to be\n+ # saved.\n+ inc_iii = True\n+\n+ lev_name = hdata8a['lev'][0]\n+ col_name = hdata8a['poll'][0].decode('UTF-8')\n+ ndata = hdata8b.byteswap().newbyteorder(\n+ ) # otherwise get endian error.\n+ concframe = pd.DataFrame.from_records(ndata)\n+ # add latitude longitude columns\n+ lat = arange(self.llcrnr_lat,\n+ self.llcrnr_lat + self.nlat * self.dlat,\n+ self.dlat)\n+ lon = arange(self.llcrnr_lon,\n+ self.llcrnr_lon + self.nlon * self.dlon,\n+ self.dlon)\n+\n+ def flat(x):\n+ return lat[x - 1]\n+\n+ def flon(x):\n+ return lon[x - 1]\n+\n+ # This block will fill in zero values in the\n+ # concentration grid.\n+ if fillra:\n+ n1 = arange(1, self.nlat + 1)\n+ n2 = arange(1, self.nlon + 1)\n+ concframe['ji'] = zip(concframe['jndx'],\n+ concframe['indx'])\n+ concframe.set_index(['ji'], inplace=True)\n+ newi = [(x, y) for x in n1 for y in n2]\n+ concframe = concframe.reindex(newi)\n+ concframe.reset_index(inplace=True)\n+ concframe[['jndx',\n+ 'indx']] = concframe['ji'].tolist()\n+ concframe.fillna(0, inplace=True)\n+ concframe.drop('ji', axis=1, inplace=True)\n+ # print(len(lat))\n+ # print(len(lon))\n+ # print(len(n1), len(n2), n1[-1], n2[-1])\n+ # print(self.nlat, self.nlon)\n+ # print(self.llcrnr_lat, self.llcrnr_lon)\n+ # print(concframe[-50:-1])\n+\n+ concframe['latitude'] = concframe['jndx'].apply(flat)\n+ concframe['longitude'] = concframe['indx'].apply(flon)\n+ concframe.drop(['jndx', 'indx'], axis=1, inplace=True)\n+ concframe['levels'] = lev_name\n+ concframe['time'] = pdate1\n+ if verbose:\n+ print('pdate1')\n+ concframe.set_index(\n+ ['time', 'levels', 'longitude', 'latitude'],\n+ inplace=True)\n+ concframe.rename(\n+ columns={'conc': col_name}, inplace=True)\n+ dset = xr.Dataset.from_dataframe(concframe)\n+ # if this is the first time through. create dataframe\n+ # for first level and pollutant.\n+ if self.dset is None:\n+ self.dset = dset\n+ else: # create dataframe for level and pollutant and\n+ # then merge with main dataframe.\n+ # self.dset = xr.concat([self.dset, dset],'levels')\n+ self.dset = xr.merge([self.dset, dset])\n+ ii += 1\n+\n+ # END LOOP to go through each pollutant\n+ # END LOOP to go through each level\n+ # safety check - will stop sampling time while loop if goes over\n+ # imax iterations.\n+ if ii > imax:\n+ testf = False\n+ if inc_iii:\n+ iii += 1\n+\n+ atthash['Concentration Grid'] = ahash\n+ atthash['Species ID'] = list(set(atthash['Species ID']))\n+ atthash['Coordinate time description'] = 'Beginning of sampling time'\n+ # END OF Loop to go through each sampling time\n+ self.dset.attrs = atthash\n+ if verbose:\n+ print(self.dset)\n+\n+ if iii == 0:\n+ print(\n+ 'Warning: ModelBin class _readfile method: no data in the date range found'\n+ )\n+ return False\n+ return True\ndiff --git a/monet/monet_accessor.py b/monet/monet_accessor.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/monet_accessor.py\n@@ -0,0 +1,507 @@\n+\"MONET Accessor\"\n+\n+from __future__ import absolute_import, division, print_function\n+from builtins import object\n+import pandas as pd\n+import xarray as xr\n+import stratify\n+\n+\n+@xr.register_dataarray_accessor('monet')\n+class MONETAccessor(object):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ xray_obj : type\n+ Description of parameter `xray_obj`.\n+\n+ Attributes\n+ ----------\n+ obj : type\n+ Description of attribute `obj`.\n+\n+ \"\"\"\n+\n+ def __init__(self, xray_obj):\n+ self.obj = xray_obj\n+\n+ def stratify(self, levels, vertical, axis=1):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ levels : type\n+ Description of parameter `levels`.\n+ vertical : type\n+ Description of parameter `vertical`.\n+ axis : type\n+ Description of parameter `axis`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ result = stratify.interpolate(\n+ levels, vertical.chunk(), self.obj.chunk(), axis=axis)\n+ dims = self.obj.dims\n+ out = xr.DataArray(result, dims=dims)\n+ for i in dims:\n+ if i != 'z':\n+ out[i] = self.obj[i]\n+ out.attrs = self.obj.attrs.copy()\n+ if len(self.obj.coords) > 0:\n+ for i in self.obj.coords:\n+ out.coords[i] = self.obj.coords[i]\n+ return out\n+\n+ def interp_constant_lat(self, lat=None, **kwargs):\n+ \"\"\"Interpolates the data array to constant longitude.\n+\n+ Parameters\n+ ----------\n+ lon : float\n+ Latitude on which to interpolate to\n+\n+ Returns\n+ -------\n+ DataArray\n+ DataArray of at constant longitude\n+\n+ \"\"\"\n+ from .util.interp_util import constant_lat_swathdefition\n+ from .util.resample import resample_dataset\n+ from numpy import linspace\n+ try:\n+ if lat is None:\n+ raise RuntimeError\n+ except RuntimeError:\n+ print('Must enter lat value')\n+ longitude = linspace(self.obj.longitude.min(),\n+ self.obj.longitude.max(), len(self.obj.x))\n+ target = constant_lat_swathdefition(longitude=longitude, latitude=lat)\n+ output = resample_dataset(self.obj, target, **kwargs).squeeze()\n+ return output\n+\n+ def interp_constant_lon(self, lon=None, **kwargs):\n+ \"\"\"Interpolates the data array to constant longitude.\n+\n+ Parameters\n+ ----------\n+ lon : float\n+ Latitude on which to interpolate to\n+\n+ Returns\n+ -------\n+ DataArray\n+ DataArray of at constant longitude\n+\n+ \"\"\"\n+ from .util.interp_util import constant_lon_swathdefition\n+ from .util.resample import resample_dataset\n+ from numpy import linspace\n+ try:\n+ if lon is None:\n+ raise RuntimeError\n+ except RuntimeError:\n+ print('Must enter lon value')\n+ latitude = linspace(self.obj.latitude.min(), self.obj.latitude.max(),\n+ len(self.obj.y))\n+ target = constant_lon_swathdefition(longitude=lon, latitude=latitude)\n+ output = resample_dataset(self.obj, target, **kwargs).squeeze()\n+ return output\n+\n+ def nearest_latlon(self, lat=None, lon=None, **kwargs):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ lat : type\n+ Description of parameter `lat`.\n+ lon : type\n+ Description of parameter `lon`.\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from .util.interp_util import nearest_point_swathdefinition\n+ from .util.resample import resample_dataset\n+ try:\n+ if lat is None or lon is None:\n+ raise RuntimeError\n+ except RuntimeError:\n+ print('Must provide latitude and longitude')\n+ target = nearest_point_swathdefinition(longitude=lon, latitude=lat)\n+ output = resample_dataset(self.obj, target, **kwargs)\n+ return output\n+\n+ def cartopy(self):\n+ \"\"\"Short summary.\n+\n+ Returns\n+ -------\n+ type\n+ Returns a cartopy.crs.Projection for this dataset\n+\n+ \"\"\"\n+\n+ return self.obj.area.to_cartopy_crs()\n+\n+ def quick_map(self, map_kwarg={}, **kwargs):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ map_kwarg : type\n+ Description of parameter `map_kwarg`.\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from .plots.mapgen import draw_map\n+ from matplotlib.pyplot import tight_layout\n+ # import cartopy.crs as ccrs\n+ # crs = self.obj.monet.cartopy()\n+ ax = draw_map(**map_kwarg)\n+ self.obj.plot(x='longitude', y='latitude', ax=ax, **kwargs)\n+ ax.outline_patch.set_alpha(0)\n+ tight_layout()\n+ return ax\n+\n+ def remap_data(self, dataarray, grid=None, **kwargs):\n+ \"\"\"remaps from another grid to the current grid of self using pyresample.\n+ it assumes that the dimensions are ordered in ROW,COL,CHANNEL per\n+ pyresample docs\n+\n+ Parameters\n+ ----------\n+ grid : pyresample grid (SwathDefinition or AreaDefinition)\n+ Description of parameter `grid`.\n+ da : ndarray or xarray DataArray\n+ Description of parameter `dset`.\n+ radius_of_influence : float or integer\n+ radius of influcence for pyresample in meters.\n+\n+ Returns\n+ -------\n+ xarray.DataArray\n+ resampled object on current grid.\n+\n+ \"\"\"\n+ from .util import resample\n+ # check to see if grid is supplied\n+ target = self.obj.area\n+ if grid is None: # grid is assumed to be in da.area\n+ out = resample.resample_dataset(dataarray.chunk(), target,\n+ **kwargs)\n+ else:\n+ dataarray.attrs['area'] = grid\n+ out = resample.resample_dataset(dataarray.chunk(), target,\n+ **kwargs)\n+ return out\n+\n+ def combine(self, data, col=None, radius_of_influence=None):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ data : type\n+ Description of parameter `data`.\n+ col : type\n+ Description of parameter `col`.\n+ radius : type\n+ Description of parameter `radius`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from .models.combinetool import combine_da_to_df\n+ # point source data\n+ if isinstance(data, pd.DataFrame):\n+ try:\n+ if col is None:\n+ raise RuntimeError\n+ return combine_da_to_df(\n+ self.obj,\n+ data,\n+ col=col,\n+ radius_of_influence=radius_of_influence)\n+ except RuntimeError:\n+ print('Must enter col ')\n+ elif isinstance(data, xr.Dataset) or isinstance(data, xr.DataArray):\n+ print('do spatial transform')\n+ else:\n+ print('d must be either a pd.DataFrame or xr.DataArray')\n+\n+\n+@xr.register_dataset_accessor('monet')\n+class MONETAccessorDataset(object):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ xray_obj : type\n+ Description of parameter `xray_obj`.\n+\n+ Attributes\n+ ----------\n+ obj : type\n+ Description of attribute `obj`.\n+\n+ \"\"\"\n+\n+ def __init__(self, xray_obj):\n+ self.obj = xray_obj\n+\n+ def remap_data(self, data, grid=None, **kwargs):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ data : type\n+ Description of parameter `data`.\n+ grid : type\n+ Description of parameter `grid`.\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ try:\n+ if isinstance(data, xr.DataArray):\n+ self._remap_dataarray(data, grid=grid, **kwargs)\n+ elif isinstance(data, xr.Dataset):\n+ self._remap_dataset(data, grid=None, **kwargs)\n+ else:\n+ raise TypeError\n+ except TypeError:\n+ print('data must be an xarray.DataArray or xarray.Dataset')\n+\n+ def _remap_dataset(self, dset, grid=None, **kwargs):\n+ \"\"\"Resample the entire dset (xarray.Dataset) to the current dataset object.\n+\n+ Parameters\n+ ----------\n+ dset : xarray.Dataset\n+ Description of parameter `dataarray`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ # from .util import resample\n+ # target = self.obj.area\n+ skip_keys = ['latitude', 'longitude', 'time', 'TFLAG']\n+ vars = pd.Series(dset.variables)\n+ loop_vars = vars.loc[~vars.isin(skip_keys)]\n+ # get the first one in the loop and get the resample_cache data\n+ dataarray = dset[loop_vars[0]]\n+\n+ da, resample_cache = self._remap_dataarray(\n+ dataarray, grid=grid, return_neighbor_info=True, **kwargs)\n+ if da.name in self.obj.variables:\n+ da.name = da.name + '_y'\n+ self.obj[da.name] = da\n+ for i in loop_vars[1:]:\n+ dataarray = dset[i]\n+ da, resample_cache = self._remap_dataarray(\n+ dataarray, grid=grid, resample_cache=resample_cache, **kwargs)\n+ if da.name in self.obj.variables:\n+ da.name = da.name + '_y'\n+ self.obj[da.name] = da\n+\n+ def _remap_dataarray(self, dataarray, grid=None, **kwargs):\n+ \"\"\"Resample the DataArray to the dataset object.\n+\n+ Parameters\n+ ----------\n+ dataarray : type\n+ Description of parameter `dataarray`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from .util import resample\n+ target = self.obj.area\n+ if grid is None: # grid is assumed to be in da.area\n+ out = resample.resample_dataset(dataarray.chunk(), target,\n+ **kwargs)\n+\n+ else:\n+ dataarray.attrs['area'] = grid\n+ out = resample.resample_dataset(dataarray.chunk(), target,\n+ **kwargs)\n+ if out.name in self.obj.variables:\n+ out.name = out.name + '_y'\n+ self.obj[out.name] = out\n+\n+ def nearest_latlon(self, lat=None, lon=None, **kwargs):\n+ vars = pd.Series(self.obj.variables)\n+ skip_keys = ['latitude', 'longitude', 'time', 'TFLAG']\n+ loop_vars = vars.loc[~vars.isin(skip_keys)]\n+ orig = self.obj[loop_vars.iloc[0]].monet.nearest_latlon(\n+ lat=lat, lon=lon, **kwargs)\n+ dset = orig.to_dataset()\n+ dset.attrs = self.obj.attrs.copy()\n+ for i in loop_vars[1:].values:\n+ dset[i] = self.obj[i].monet.nearest_latlon(\n+ lat=lat, lon=lon, **kwargs)\n+ return dset\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ lat : type\n+ Description of parameter `lat`.\n+ lon : type\n+ Description of parameter `lon`.\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+\n+ def interp_constant_lat(self, lat=None, **kwargs):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ lat : type\n+ Description of parameter `lat`.\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ vars = pd.Series(self.obj.variables)\n+ skip_keys = ['latitude', 'longitude', 'time', 'TFLAG']\n+ loop_vars = vars.loc[~vars.isin(skip_keys)]\n+\n+ orig = self.obj[loop_vars.iloc[0]].monet.interp_constant_lat(\n+ lat=lat, **kwargs)\n+\n+ dset = orig.to_dataset()\n+ dset.attrs = self.obj.attrs.copy()\n+ for i in loop_vars[1:].values:\n+ dset[i] = self.obj[i].interp_constant_lat(lat=lat, **kwargs)\n+ return dset\n+\n+ def interp_constant_lon(self, lon=None, **kwargs):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ lon : type\n+ Description of parameter `lon`.\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ vars = pd.Series(self.obj.variables)\n+ skip_keys = ['latitude', 'longitude', 'time', 'TFLAG']\n+ loop_vars = vars.loc[~vars.isin(skip_keys)]\n+ orig = self.obj[loop_vars[0]].interp_constant_lon(lon=lon, **kwargs)\n+ dset = orig.to_dataset()\n+ dset.attrs = self.obj.attrs.copy()\n+ for i in loop_vars[1:].values:\n+ dset[i] = self.obj[i].interp_constant_lon(lon=lon, **kwargs)\n+ return dset\n+\n+ def stratify(self, levels, vertical, axis=1):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ levels : type\n+ Description of parameter `levels`.\n+ vertical : type\n+ Description of parameter `vertical`.\n+ axis : type\n+ Description of parameter `axis`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ loop_vars = [i for i in self.obj.variables if 'z' in self.obj[i].dims]\n+ orig = self.obj[loop_vars[0]].stratify(levels, vertical, axis=axis)\n+ dset = orig.to_dataset()\n+ dset.attrs = self.obj.attrs.copy()\n+ for i in loop_vars[1:]:\n+ dset[i] = self.obj[i].stratify(levels, vertical, axis=axis)\n+ return dset\n+\n+ def cartopy(self):\n+ \"\"\"Returns a cartopy.crs.Projection for this dataset.\"\"\"\n+ return self.obj.area.to_cartopy_crs()\n+\n+ def combine_to_df(self, df, mapping_table=None, radius_of_influence=None):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ df : type\n+ Description of parameter `df`.\n+ mapping_table : type\n+ Description of parameter `mapping_table`.\n+ radius : type\n+ Description of parameter `radius`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from .models.combinetool import combine_da_to_df\n+ try:\n+ if ~isinstance(df, pd.DataFrame):\n+ raise TypeError\n+ except TypeError:\n+ print('df must be of type pd.DataFrame')\n+ for i in mapping_table:\n+ df = combine_da_to_df(\n+ self.obj[mapping_table[i]],\n+ df,\n+ col=i,\n+ radius=radius_of_influence)\n+ return df\ndiff --git a/monet/obs/__init__.py b/monet/obs/__init__.py\n--- a/monet/obs/__init__.py\n+++ b/monet/obs/__init__.py\n@@ -1,7 +1,21 @@\n from __future__ import absolute_import, print_function\n+from . import aeronet_mod, airnow_mod, aqs_mod, crn_mod, epa_util, improve_mod\n+from . import ish_mod, tolnet_mod, cems_mod, nadp_mod, modis_swath\n \n-from . import aeronet, airnow, aqs, crn, epa_util, improve, ish, tolnet\n-\n-__all__ = ['aeronet', 'airnow', 'aqs', 'crn', 'epa_util', 'improve', 'ish', 'tolnet']\n+__all__ = [\n+ 'aeronet_mod', 'airnow_mod', 'aqs_mod', 'crn_mod', 'epa_util',\n+ 'improve_mod', 'ish_mod', 'tolnet_mod', 'cems_mod', 'nadp_mod',\n+ 'modis_swath'\n+]\n \n __name__ = 'obs'\n+\n+airnow = airnow_mod.AirNow()\n+aqs = aqs_mod.AQS()\n+aeronet = aeronet_mod.AERONET()\n+crn = crn_mod.crn()\n+improve = improve_mod.IMPROVE()\n+tolnet = tolnet_mod.TOLNet()\n+cems = cems_mod.CEMS()\n+nadp = nadp_mod.NADP()\n+\ndiff --git a/monet/obs/aeronet_mod.py b/monet/obs/aeronet_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/aeronet_mod.py\n@@ -0,0 +1,175 @@\n+from __future__ import division, print_function\n+\n+# this is written to retrive airnow data concatenate and add to pandas array\n+# for usage\n+from builtins import object, str\n+from datetime import datetime\n+\n+import pandas as pd\n+from past.utils import old_div\n+\n+\n+def dateparse(x):\n+ return pd.datetime.strptime(x, '%d:%m:%Y %H:%M:%S')\n+\n+\n+class AERONET(object):\n+ def __init__(self):\n+ from numpy import concatenate, arange\n+ self.baseurl = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_v3?'\n+ self.dates = [\n+ datetime.strptime('2016-06-06 12:00:00', '%Y-%m-%d %H:%M:%S'),\n+ datetime.strptime('2016-06-10 13:00:00', '%Y-%m-%d %H:%M:%S')\n+ ]\n+ self.datestr = []\n+ self.df = pd.DataFrame()\n+ self.daily = None\n+ self.prod = None\n+ self.inv_type = None\n+ self.objtype = 'AERONET'\n+ self.usecols = concatenate((arange(30), arange(65, 83)))\n+ # [21.1,-131.6686,53.04,-58.775] #[latmin,lonmin,latmax,lonmax]\n+ self.latlonbox = None\n+ self.url = None\n+\n+ def build_url(self):\n+ sy = self.dates.min().strftime('%Y')\n+ sm = self.dates.min().strftime('%m').zfill(2)\n+ sd = self.dates.min().strftime('%d').zfill(2)\n+ sh = self.dates.min().strftime('%H').zfill(2)\n+ ey = self.dates.max().strftime('%Y').zfill(2)\n+ em = self.dates.max().strftime('%m').zfill(2)\n+ ed = self.dates.max().strftime('%d').zfill(2)\n+ eh = self.dates.max().strftime('%H').zfill(2)\n+ if self.prod in [\n+ 'AOD10', 'AOD15', 'AOD20', 'SDA10', 'SDA15', 'SDA20', 'TOT10',\n+ 'TOT15', 'TOT20'\n+ ]:\n+ base_url = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_v3?'\n+ inv_type = ''\n+ else:\n+ base_url = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_inv_v3?'\n+ if self.inv_type == 'ALM15':\n+ inv_type = '&ALM15=1'\n+ else:\n+ inv_type = '&AML20=1'\n+ date_portion = 'year=' + sy + '&month=' + sm + '&day=' + sd + \\\n+ '&hour=' + sh + '&year2=' + ey + '&month2=' + em + '&day2=' + ed +\\\n+ '&hour2=' + eh\n+ if self.inv_type is not '':\n+ product = '&product=' + self.prod\n+ else:\n+ product = '&' + self.prod + '=1'\n+ time = '&AVG=' + str(self.daily)\n+ if self.latlonbox is None:\n+ latlonbox = ''\n+ else:\n+ lat1 = str(self.latlonbox[0])\n+ lon1 = str(self.latlonbox[1])\n+ lat2 = str(self.latlonbox[2])\n+ lon2 = str(self.latlonbox[3])\n+ latlonbox = '&lat1=' + lat1 + '&lat2=' + \\\n+ lat2 + '&lon1=' + lon1 + '&lon2=' + lon2\n+ self.url = base_url + date_portion + product + inv_type + time +\\\n+ '&if_no_html=1'\n+ #\n+ # self.url = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_v3?year=' + sy + '&month=' + sm + '&day=' + sd + \\\n+ # '&hour=' + sh + '&year2=' + ey + '&month2=' + em + '&day2=' + ed + '&hour2=' + eh + '&AOD15=1&AVG=10&if_no_html=1'\n+ #\n+ #\n+ # self.url = 'https://aeronet.gsfc.nasa.gov/cgi-bin/print_web_data_v3?year=' + sy + '&month=' + sm + '&day=' + sd + '&hour=' + sh + '&year2=' + ey + \\\n+ # '&month2=' + em + '&day2=' + ed + '&hour2=' + eh + '&lat1=' + lat1 + '&lat2=' + lat2 + '&lon1=' + lon1 + '&lon2=' + lon2 + '&AOD15=1&AVG=10&if_no_html=1'\n+\n+ def read_aeronet(self):\n+ print('Reading Aeronet Data...')\n+ # header = self.get_columns()\n+ df = pd.read_csv(\n+ self.url,\n+ engine='python',\n+ header=None,\n+ skiprows=6,\n+ parse_dates={'time': [1, 2]},\n+ date_parser=dateparse,\n+ na_values=-999)\n+ # df.rename(columns={'date_time': 'time'}, inplace=True)\n+ columns = self.get_columns()\n+ df.columns = columns # self.get_columns()\n+ df.index = df.time\n+ df.rename(\n+ columns={\n+ 'site_latitude(degrees)': 'latitude',\n+ 'site_longitude(degrees)': 'longitude',\n+ 'site_elevation(m)': 'elevation',\n+ 'aeronet_site': 'siteid'\n+ },\n+ inplace=True)\n+ df.dropna(subset=['latitude', 'longitude'], inplace=True)\n+ df.dropna(axis=1, how='all', inplace=True)\n+ self.df = df\n+\n+ def get_columns(self):\n+ header = pd.read_csv(\n+ self.url, skiprows=5, header=None, nrows=1).values.flatten()\n+ final = ['time']\n+ for i in header:\n+ if \"Date(\" in i or 'Time(' in i:\n+ pass\n+ else:\n+ final.append(i.lower())\n+ return final\n+\n+ def add_data(self,\n+ dates=None,\n+ product='AOD15',\n+ latlonbox=None,\n+ daily=False,\n+ calc_550=True,\n+ inv_type=None,\n+ freq=None,\n+ detect_dust=False):\n+ self.latlonbox = latlonbox\n+ if dates is None: # get the current day\n+ self.dates = pd.date_range(\n+ start=pd.to_datetime('today'),\n+ end=pd.to_datetime('now'),\n+ freq='H')\n+ else:\n+ self.dates = dates\n+ self.prod = product.upper()\n+ if daily:\n+ self.daily = 20 # daily data\n+ else:\n+ self.daily = 10 # all points\n+ if inv_type is None:\n+ self.inv_type = 'ALM15'\n+ else:\n+ self.inv_type = inv_type\n+ self.build_url()\n+ self.read_aeronet()\n+ if freq is not None:\n+ self.df = self.df.groupby('siteid').resample(\n+ freq).mean().reset_index()\n+ if detect_dust:\n+ self.dust_detect()\n+ if calc_550:\n+ self.calc_550nm()\n+ return self.df.copy()\n+\n+ def calc_550nm(self):\n+ \"\"\"Since AOD at 500nm is not calculated we use the extrapolation of\n+ V. Cesnulyte et al (ACP,2014) for the calculation\n+\n+ aod550 = aod500 * (550/500) ^ -alpha\n+ \"\"\"\n+ self.df['aod_550nm'] = self.df.aod_500nm * (old_div(\n+ 550., 500.))**(-self.df['440-870_angstrom_exponent'])\n+\n+ def dust_detect(self):\n+ \"\"\" [Dubovik et al., 2002]. AOD_1020 > 0.3 and AE(440,870) < 0.6\"\"\"\n+ self.df['dust'] = (self.df['aod_1020nm'] >\n+ 0.3) & (self.df['440-870_angstrom_exponent'] < 0.6)\n+\n+ def set_daterange(self, begin='', end=''):\n+ dates = pd.date_range(\n+ start=begin, end=end, freq='H').values.astype('M8[s]').astype('O')\n+ self.dates = dates\ndiff --git a/monet/obs/airnow_mod.py b/monet/obs/airnow_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/airnow_mod.py\n@@ -0,0 +1,291 @@\n+from __future__ import print_function\n+\n+import inspect\n+import os\n+# this is written to retrive airnow data concatenate and add to pandas array\n+# for usage\n+from builtins import object\n+from datetime import datetime\n+\n+import pandas as pd\n+\n+\n+class AirNow(object):\n+ \"\"\"Short summary.\n+\n+ Attributes\n+ ----------\n+ url : type\n+ Description of attribute `url`.\n+ dates : type\n+ Description of attribute `dates`.\n+ datestr : type\n+ Description of attribute `datestr`.\n+ df : type\n+ Description of attribute `df`.\n+ daily : type\n+ Description of attribute `daily`.\n+ objtype : type\n+ Description of attribute `objtype`.\n+ filelist : type\n+ Description of attribute `filelist`.\n+ monitor_file : type\n+ Description of attribute `monitor_file`.\n+ __class__ : type\n+ Description of attribute `__class__`.\n+ monitor_df : type\n+ Description of attribute `monitor_df`.\n+ savecols : type\n+ Description of attribute `savecols`.\n+ \"\"\"\n+\n+ def __init__(self):\n+ self.datadir = '.'\n+ self.cwd = os.getcwd()\n+ self.url = None\n+ self.dates = [\n+ datetime.strptime('2016-06-06 12:00:00', '%Y-%m-%d %H:%M:%S'),\n+ datetime.strptime('2016-06-06 13:00:00', '%Y-%m-%d %H:%M:%S')\n+ ]\n+ self.datestr = []\n+ self.df = pd.DataFrame()\n+ self.daily = False\n+ self.objtype = 'AirNow'\n+ self.filelist = None\n+ self.monitor_file = inspect.getfile(\n+ self.__class__)[:-13] + 'data/monitoring_site_locations.dat'\n+ self.monitor_df = None\n+ self.savecols = [\n+ 'time', 'siteid', 'site', 'utcoffset', 'variable', 'units', 'obs',\n+ 'time_local', 'latitude', 'longitude', 'cmsa_name', 'msa_code',\n+ 'msa_name', 'state_name', 'epa_region'\n+ ]\n+\n+ def convert_dates_tofnames(self):\n+ \"\"\"Helper function to create file names\n+\n+ Returns\n+ -------\n+\n+\n+ \"\"\"\n+ self.datestr = []\n+ for i in self.dates:\n+ self.datestr.append(i.strftime('%Y%m%d%H.dat'))\n+\n+ def build_urls(self):\n+ \"\"\"Short summary.\n+\n+ Returns\n+ -------\n+ helper function to build urls\n+\n+ \"\"\"\n+\n+ furls = []\n+ fnames = []\n+ print('Building AIRNOW URLs...')\n+ # 2017/20170131/HourlyData_2017012408.dat\n+ url = 'https://s3-us-west-1.amazonaws.com//files.airnowtech.org/airnow/'\n+ for i in self.dates:\n+ f = url + i.strftime('%Y/%Y%m%d/HourlyData_%Y%m%d%H.dat')\n+ fname = i.strftime('HourlyData_%Y%m%d%H.dat')\n+ furls.append(f)\n+ fnames.append(fname)\n+ # https://s3-us-west-1.amazonaws.com//files.airnowtech.org/airnow/2017/20170108/HourlyData_2016121506.dat\n+\n+ # files needed for comparison\n+ self.url = pd.Series(furls, index=None)\n+ self.fnames = pd.Series(fnames, index=None)\n+\n+ def read_csv(self, fn):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ fn : string\n+ file name to read\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ try:\n+ dft = pd.read_csv(\n+ fn,\n+ delimiter='|',\n+ header=None,\n+ error_bad_lines=False,\n+ encoding='ISO-8859-1')\n+ cols = [\n+ 'date', 'time', 'siteid', 'site', 'utcoffset', 'variable',\n+ 'units', 'obs', 'source'\n+ ]\n+ dft.columns = cols\n+ except Exception:\n+ cols = [\n+ 'date', 'time', 'siteid', 'site', 'utcoffset', 'variable',\n+ 'units', 'obs', 'source'\n+ ]\n+ dft = pd.DataFrame(columns=cols)\n+ dft['obs'] = dft.obs.astype(float)\n+ dft['siteid'] = dft.siteid.str.zfill(9)\n+ dft['utcoffset'] = dft.utcoffset.astype(int)\n+ return dft\n+\n+ def retrieve(self, url, fname):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ url : type\n+ Description of parameter `url`.\n+ fname : type\n+ Description of parameter `fname`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ import requests\n+\n+ if not os.path.isfile(fname):\n+ print('\\n Retrieving: ' + fname)\n+ print(url)\n+ print('\\n')\n+ r = requests.get(url)\n+ open(fname, 'wb').write(r.content)\n+ else:\n+ print('\\n File Exists: ' + fname)\n+\n+ def aggregate_files(self, download=False):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ download : type\n+ Description of parameter `download` (the default is False).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ import dask\n+ import dask.dataframe as dd\n+\n+ print('Aggregating AIRNOW files...')\n+ self.build_urls()\n+ if download:\n+ for url, fname in zip(self.url, self.fnames):\n+ self.retrieve(url, fname)\n+ dfs = [dask.delayed(self.read_csv)(f) for f in self.fnames]\n+ else:\n+ dfs = [dask.delayed(self.read_csv)(f) for f in self.url]\n+ dff = dd.from_delayed(dfs)\n+ df = dff.compute()\n+ df['time'] = pd.to_datetime(\n+ df.date + ' ' + df.time,\n+ format='%m/%d/%y %H:%M',\n+ exact=True,\n+ box=False)\n+ df.drop(['date'], axis=1, inplace=True)\n+ df['time_local'] = df.time + pd.to_timedelta(df.utcoffset, unit='H')\n+ self.df = df\n+ print(' Adding in Meta-data')\n+ self.get_station_locations()\n+ self.df = self.df[self.savecols]\n+ self.df.drop_duplicates(inplace=True)\n+ self.filter_bad_values()\n+\n+ def add_data(self, dates, download=False):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ dates : type\n+ Description of parameter `dates`.\n+ download : type\n+ Description of parameter `download` (the default is False).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ self.dates = dates\n+ self.aggregate_files(download=download)\n+ return self.df\n+\n+ def filter_bad_values(self):\n+ \"\"\"Short summary.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from numpy import NaN\n+ self.df.loc[(self.df.obs > 1000) | (self.df.obs < 0), 'obs'] = NaN\n+\n+ def set_daterange(self, **kwargs):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ begin : type\n+ Description of parameter `begin` (the default is '').\n+ end : type\n+ Description of parameter `end` (the default is '').\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ dates = pd.date_range(**kwargs)\n+ self.dates = dates\n+ return dates\n+\n+ def get_station_locations(self):\n+ \"\"\"Short summary.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from .epa_util import read_monitor_file\n+ self.monitor_df = read_monitor_file(airnow=True)\n+ self.df = pd.merge(\n+ self.df, self.monitor_df, on='siteid') # , how='left')\n+\n+ def get_station_locations_remerge(self, df):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ df : type\n+ Description of parameter `df`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ df = pd.merge(\n+ df,\n+ self.monitor_df.drop(['Latitude', 'Longitude'], axis=1),\n+ on='siteid') # ,\n+ # how='left')\n+ return df\ndiff --git a/monet/obs/aqs_mod.py b/monet/obs/aqs_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/aqs_mod.py\n@@ -0,0 +1,568 @@\n+from __future__ import print_function\n+\n+import inspect\n+import os\n+# this is a class to deal with aqs data\n+from builtins import object, zip\n+import pandas as pd\n+from dask.diagnostics import ProgressBar\n+\n+from .epa_util import read_monitor_file\n+\n+pbar = ProgressBar()\n+pbar.register()\n+\n+\n+class AQS(object):\n+ \"\"\"Short summary.\n+\n+ Attributes\n+ ----------\n+ baseurl : type\n+ Description of attribute `baseurl`.\n+ objtype : type\n+ Description of attribute `objtype`.\n+ baseurl : type\n+ Description of attribute `baseurl`.\n+ dates : type\n+ Description of attribute `dates`.\n+ renamedhcols : type\n+ Description of attribute `renamedhcols`.\n+ renameddcols : type\n+ Description of attribute `renameddcols`.\n+ savecols : type\n+ Description of attribute `savecols`.\n+ df : type\n+ Description of attribute `df`.\n+ monitor_file : type\n+ Description of attribute `monitor_file`.\n+ __class__ : type\n+ Description of attribute `__class__`.\n+ monitor_df : type\n+ Description of attribute `monitor_df`.\n+ daily : type\n+ Description of attribute `daily`.\n+ d_df : type\n+ Description of attribute `d_df`.\n+\n+ \"\"\"\n+\n+ def __init__(self):\n+ # self.baseurl = 'https://aqs.epa.gov/aqsweb/airdata/'\n+ self.objtype = 'AQS'\n+ self.baseurl = 'https://aqs.epa.gov/aqsweb/airdata/'\n+ self.renamedhcols = [\n+ 'time_local', 'time', 'state_code', 'county_code', 'site_num',\n+ 'parameter_code', 'poc', 'latitude', 'longitude', 'datum',\n+ 'parameter_name', 'obs', 'units', 'mdl', 'uncertainty',\n+ 'qualifier', 'method_type', 'method_code', 'method_name',\n+ 'state_name', 'county_name', 'date_of_last_change'\n+ ]\n+ self.renameddcols = [\n+ 'time_local', 'state_code', 'county_code', 'site_num',\n+ 'parameter_code', 'poc', 'latitude', 'longitude', 'datum',\n+ 'parameter_name', 'sample_duration', 'pollutant_standard', 'units',\n+ 'event_type', 'observation_Count', 'observation_Percent', 'obs',\n+ '1st_max_Value', '1st_max_hour', 'aqi', 'method_code',\n+ 'method_name', 'local_site_name', 'address', 'state_name',\n+ 'county_name', 'city_name', 'msa_name', 'date_of_last_change'\n+ ]\n+ self.savecols = [\n+ 'time_local', 'time', 'siteid', 'latitude', 'longitude', 'obs',\n+ 'units', 'variable'\n+ ]\n+ self.df = pd.DataFrame() # hourly dataframe\n+ self.monitor_file = inspect.getfile(\n+ self.__class__)[:-10] + 'data/monitoring_site_locations.dat'\n+ self.monitor_df = None\n+ self.daily = False\n+ self.d_df = None # daily dataframe\n+\n+ def load_aqs_file(self, url, network):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ url : type\n+ Description of parameter `url`.\n+ network : type\n+ Description of parameter `network`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ if 'daily' in url:\n+\n+ def dateparse(x):\n+ return pd.datetime.strptime(x, '%Y-%m-%d')\n+\n+ df = pd.read_csv(\n+ url,\n+ parse_dates={'time_local': [\"Date Local\"]},\n+ date_parser=dateparse,\n+ dtype={\n+ 0: str,\n+ 1: str,\n+ 2: str\n+ },\n+ encoding='ISO-8859-1')\n+ df.columns = self.renameddcols\n+ df['pollutant_standard'] = df.pollutant_standard.astype(str)\n+ self.daily = True\n+ # df.rename(columns={'parameter_name':'variable'})\n+ else:\n+ df = pd.read_csv(\n+ url,\n+ parse_dates={\n+ 'time': ['Date GMT', 'Time GMT'],\n+ 'time_local': [\"Date Local\", \"Time Local\"]\n+ },\n+ infer_datetime_format=True)\n+ df.columns = self.renamedhcols\n+\n+ df['siteid'] = df.state_code.astype(str).str.zfill(\n+ 2) + df.county_code.astype(str).str.zfill(3) + df.site_num.astype(\n+ str).str.zfill(4)\n+ # df['siteid'] = df.state_code + df.county_code + df.site_num\n+ df.drop(['state_name', 'county_name'], axis=1, inplace=True)\n+ df.columns = [i.lower() for i in df.columns]\n+ if 'daily' not in url:\n+ df.drop(['datum', 'qualifier'], axis=1, inplace=True)\n+ if 'VOC' in url:\n+ voc = True\n+ else:\n+ voc = False\n+ df = self.get_species(df, voc=voc)\n+ return df\n+\n+ def build_url(self, param, year, daily=False, download=False):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ param : type\n+ Description of parameter `param`.\n+ year : type\n+ Description of parameter `year`.\n+ daily : type\n+ Description of parameter `daily` (the default is False).\n+ download : type\n+ Description of parameter `download` (the default is False).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ if daily:\n+ beginning = self.baseurl + 'daily_'\n+ fname = 'daily_'\n+ else:\n+ beginning = self.baseurl + 'hourly_'\n+ fname = 'hourly_'\n+ if (param.upper() == 'OZONE') | (param.upper() == 'O3'):\n+ code = '44201_'\n+ elif param.upper() == 'PM2.5':\n+ code = '88101_'\n+ elif param.upper() == 'PM2.5_FRM':\n+ code = '88502_'\n+ elif param.upper() == 'PM10':\n+ code = '81102_'\n+ elif param.upper() == 'SO2':\n+ code = '42401_'\n+ elif param.upper() == 'NO2':\n+ code = '42602_'\n+ elif param.upper() == 'CO':\n+ code = '42101_'\n+ elif param.upper() == 'NONOxNOy'.upper():\n+ code = 'NONOxNOy_'\n+ elif param.upper() == 'VOC':\n+ # https://aqs.epa.gov/aqsweb/airdata/daily_VOCS_2017.zip\n+ code = 'VOCS_'\n+ elif param.upper() == 'SPEC':\n+ code = 'SPEC_'\n+ elif param.upper() == 'PM10SPEC':\n+ code = 'PM10SPEC_'\n+ elif param.upper() == 'WIND':\n+ code = 'WIND_'\n+ elif param.upper() == 'TEMP':\n+ code = 'TEMP_'\n+ elif param.upper() == 'RHDP':\n+ code = 'RH_DP_'\n+ elif (param.upper() == 'WIND') | (param.upper() == 'WS') | (\n+ param.upper() == 'WDIR'):\n+ code = 'WIND_'\n+ url = beginning + code + year + '.zip'\n+ fname = fname + code + year + '.zip'\n+ return url, fname\n+\n+ def build_urls(self, params, dates, daily=False):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ params : type\n+ Description of parameter `params`.\n+ dates : list of datetime objects\n+ dates to retrieve data for. Only the years are taken into account.\n+ daily : type\n+ Description of parameter `daily` (the default is False).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ years = pd.DatetimeIndex(dates).year.unique().astype(str)\n+ urls = []\n+ fnames = []\n+ for i in params:\n+ for y in years:\n+ url, fname = self.build_url(i, y, daily=daily)\n+ urls.append(url)\n+ fnames.append(fname)\n+ return urls, fnames\n+\n+ def retrieve(self, url, fname):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ url : type\n+ Description of parameter `url`.\n+ fname : type\n+ Description of parameter `fname`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ import requests\n+\n+ if not os.path.isfile(fname):\n+ print('\\n Retrieving: ' + fname)\n+ print(url)\n+ print('\\n')\n+ r = requests.get(url)\n+ open(fname, 'wb').write(r.content)\n+ else:\n+ print('\\n File Exists: ' + fname)\n+\n+ def add_data(self,\n+ dates,\n+ param=None,\n+ daily=False,\n+ network=None,\n+ download=False,\n+ local=False):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ dates : list of datetime objects\n+ Description of parameter `dates`.\n+ param : list of strings\n+ Description of parameter `param` (the default is None).\n+ daily : boolean\n+ Description of parameter `daily` (the default is False).\n+ network : type\n+ Description of parameter `network` (the default is None).\n+ download : type\n+ Description of parameter `download` (the default is False).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ import dask\n+ import dask.dataframe as dd\n+ if param is None:\n+ params = [\n+ 'SPEC', 'PM10', 'PM2.5', 'PM2.5_FRM', 'CO', 'OZONE', 'SO2',\n+ 'VOC', 'NONOXNOY', 'WIND', 'TEMP', 'RHDP'\n+ ]\n+ else:\n+ params = param\n+ urls, fnames = self.build_urls(params, dates, daily=daily)\n+ if download:\n+ for url, fname in zip(urls, fnames):\n+ self.retrieve(url, fname)\n+ dfs = [\n+ dask.delayed(self.load_aqs_file)(i, network) for i in fnames\n+ ]\n+ elif local:\n+ dfs = [\n+ dask.delayed(self.load_aqs_file)(i, network) for i in fnames\n+ ]\n+ else:\n+ dfs = [dask.delayed(self.load_aqs_file)(i, network) for i in urls]\n+ dff = dd.from_delayed(dfs)\n+ self.df = dff.compute()\n+ self.df = self.change_units(self.df)\n+ if self.monitor_df is None:\n+ self.monitor_df = read_monitor_file()\n+ drop_monitor_cols = True\n+ else:\n+ drop_monitor_cols = False\n+ if daily:\n+ if drop_monitor_cols:\n+ monitor_drop = [\n+ 'msa_name', 'city_name', u'local_site_name', u'address',\n+ u'datum'\n+ ]\n+ self.monitor_df.drop(monitor_drop, axis=1, inplace=True)\n+ # else:\n+ # monitor_drop = [u'datum']\n+ # self.monitor_df.drop(monitor_drop, axis=1, inplace=True)\n+ if network is not None:\n+ monitors = self.monitor_df.loc[self.monitor_df.isin(\n+ [network])].drop_duplicates(subset=['siteid'])\n+ else:\n+ monitors = self.monitor_df.drop_duplicates(subset=['siteid'])\n+ self.df = pd.merge(self.df, monitors, on=['siteid'], how='left')\n+ if daily:\n+ self.df['time'] = self.df.time_local - pd.to_timedelta(\n+ self.df.gmt_offset, unit='H')\n+ # if pd.Series(self.df.columns).isin(['parameter_name']).max():\n+ # self.df.drop('parameter_name', axis=1, inplace=True)\n+ return self.df.copy()\n+\n+ def get_species(self, df, voc=False):\n+ \"\"\"Short summary.\n+\n+ Parameterssdfdsf\n+ ----------\n+ df : type\n+ Description of parameter `df`.\n+ voc : type\n+ Description of parameter `voc` (the default is False).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ pc = df.parameter_code.unique()\n+ df['variable'] = ''\n+ if voc:\n+ df['variable'] = df.parameter_name.str.upper()\n+ return df\n+ for i in pc:\n+ con = df.parameter_code == i\n+ if (i == 88101) | (i == 88502):\n+ df.loc[con, 'variable'] = 'PM2.5'\n+ if i == 44201:\n+ df.loc[con, 'variable'] = 'OZONE'\n+ if i == 81102:\n+ df.loc[con, 'variable'] = 'PM10'\n+ if i == 42401:\n+ df.loc[con, 'variable'] = 'SO2'\n+ if i == 42602:\n+ df.loc[con, 'variable'] = 'NO2'\n+ if i == 42101:\n+ df.loc[con, 'variable'] = 'CO'\n+ if i == 62101:\n+ df.loc[con, 'variable'] = 'TEMP'\n+ if i == 88305:\n+ df.loc[con, 'variable'] = 'OC'\n+ if i == 88306:\n+ df.loc[con, 'variable'] = 'NO3f'\n+ if (i == 88307):\n+ df.loc[con, 'variable'] = 'ECf'\n+ if i == 88316:\n+ df.loc[con, 'variable'] = 'ECf_optical'\n+ if i == 88403:\n+ df.loc[con, 'variable'] = 'SO4f'\n+ if i == 88312:\n+ df.loc[con, 'variable'] = 'TCf'\n+ if i == 88104:\n+ df.loc[con, 'variable'] = 'Alf'\n+ if i == 88107:\n+ df.loc[con, 'variable'] = 'Baf'\n+ if i == 88313:\n+ df.loc[con, 'variable'] = 'BCf'\n+ if i == 88109:\n+ df.loc[con, 'variable'] = 'Brf'\n+ if i == 88110:\n+ df.loc[con, 'variable'] = 'Cdf'\n+ if i == 88111:\n+ df.loc[con, 'variable'] = 'Caf'\n+ if i == 88117:\n+ df.loc[con, 'variable'] = 'Cef'\n+ if i == 88118:\n+ df.loc[con, 'variable'] = 'Csf'\n+ if i == 88203:\n+ df.loc[con, 'variable'] = 'Cl-f'\n+ if i == 88115:\n+ df.loc[con, 'variable'] = 'Clf'\n+ if i == 88112:\n+ df.loc[con, 'variable'] = 'Crf'\n+ if i == 88113:\n+ df.loc[con, 'variable'] = 'Cof'\n+ if i == 88114:\n+ df.loc[con, 'variable'] = 'Cuf'\n+ if i == 88121:\n+ df.loc[con, 'variable'] = 'Euf'\n+ if i == 88143:\n+ df.loc[con, 'variable'] = 'Auf'\n+ if i == 88127:\n+ df.loc[con, 'variable'] = 'Hff'\n+ if i == 88131:\n+ df.loc[con, 'variable'] = 'Inf'\n+ if i == 88126:\n+ df.loc[con, 'variable'] = 'Fef'\n+ if i == 88146:\n+ df.loc[con, 'variable'] = 'Laf'\n+ if i == 88128:\n+ df.loc[con, 'variable'] = 'Pbf'\n+ if i == 88140:\n+ df.loc[con, 'variable'] = 'Mgf'\n+ if i == 88132:\n+ df.loc[con, 'variable'] = 'Mnf'\n+ if i == 88142:\n+ df.loc[con, 'variable'] = 'Hgf'\n+ if i == 88134:\n+ df.loc[con, 'variable'] = 'Mof'\n+ if i == 88136:\n+ df.loc[con, 'variable'] = 'Nif'\n+ if i == 88147:\n+ df.loc[con, 'variable'] = 'Nbf'\n+ if i == 88310:\n+ df.loc[con, 'variable'] = 'NO3f'\n+ if i == 88152:\n+ df.loc[con, 'variable'] = 'Pf'\n+ if i == 88303:\n+ df.loc[con, 'variable'] = 'K+f'\n+ if i == 88176:\n+ df.loc[con, 'variable'] = 'Rbf'\n+ if i == 88162:\n+ df.loc[con, 'variable'] = 'Smf'\n+ if i == 88163:\n+ df.loc[con, 'variable'] = 'Scf'\n+ if i == 88154:\n+ df.loc[con, 'variable'] = 'Sef'\n+ if i == 88165:\n+ df.loc[con, 'variable'] = 'Sif'\n+ if i == 88166:\n+ df.loc[con, 'variable'] = 'Agf'\n+ if i == 88302:\n+ df.loc[con, 'variable'] = 'Na+f'\n+ if i == 88184:\n+ df.loc[con, 'variable'] = 'Naf'\n+ if i == 88168:\n+ df.loc[con, 'variable'] = 'Srf'\n+ if i == 88403:\n+ df.loc[con, 'variable'] = 'SO4f'\n+ if i == 88169:\n+ df.loc[con, 'variable'] = 'Sf'\n+ if i == 88170:\n+ df.loc[con, 'variable'] = 'Taf'\n+ if i == 88172:\n+ df.loc[con, 'variable'] = 'Tbf'\n+ if i == 88160:\n+ df.loc[con, 'variable'] = 'Snf'\n+ if i == 88161:\n+ df.loc[con, 'variable'] = 'Tif'\n+ if i == 88312:\n+ df.loc[con, 'variable'] = 'TOT_Cf'\n+ if i == 88310:\n+ df.loc[con, 'variable'] = 'NON-VOLITILE_NO3f'\n+ if i == 88309:\n+ df.loc[con, 'variable'] = 'VOLITILE_NO3f'\n+ if i == 88186:\n+ df.loc[con, 'variable'] = 'Wf'\n+ if i == 88314:\n+ df.loc[con, 'variable'] = 'C_370nmf'\n+ if i == 88179:\n+ df.loc[con, 'variable'] = 'Uf'\n+ if i == 88164:\n+ df.loc[con, 'variable'] = 'Vf'\n+ if i == 88183:\n+ df.loc[con, 'variable'] = 'Yf'\n+ if i == 88167:\n+ df.loc[con, 'variable'] = 'Znf'\n+ if i == 88185:\n+ df.loc[con, 'variable'] = 'Zrf'\n+ if i == 88102:\n+ df.loc[con, 'variable'] = 'Sbf'\n+ if i == 88103:\n+ df.loc[con, 'variable'] = 'Asf'\n+ if i == 88105:\n+ df.loc[con, 'variable'] = 'Bef'\n+ if i == 88124:\n+ df.loc[con, 'variable'] = 'Gaf'\n+ if i == 88185:\n+ df.loc[con, 'variable'] = 'Irf'\n+ if i == 88180:\n+ df.loc[con, 'variable'] = 'Kf'\n+ if i == 88301:\n+ df.loc[con, 'variable'] = 'NH4+f'\n+ if (i == 88320) | (i == 88355):\n+ df.loc[con, 'variable'] = 'OCf'\n+ if (i == 88357) | (i == 88321):\n+ df.loc[con, 'variable'] = 'ECf'\n+ if i == 42600:\n+ df.loc[con, 'variable'] = 'NOY'\n+ if i == 42601:\n+ df.loc[con, 'variable'] = 'NO'\n+ if i == 42603:\n+ df.loc[con, 'variable'] = 'NOX'\n+ if (i == 61103) | (i == 61101):\n+ df.loc[con, 'variable'] = 'WS'\n+ if (i == 61104) | (i == 61102):\n+ df.loc[con, 'variable'] = 'WD'\n+ if i == 62201:\n+ df.loc[con, 'variable'] = 'RH'\n+ if i == 62103:\n+ df.loc[con, 'variable'] = 'DP'\n+ return df\n+\n+ @staticmethod\n+ def change_units(df):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ df : type\n+ Description of parameter `df`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ units = df.units.unique()\n+ for i in units:\n+ con = df.units == i\n+ if i.upper() == 'Parts per billion Carbon'.upper():\n+ df.loc[con, 'units'] = 'ppbC'\n+ if i == 'Parts per billion':\n+ df.loc[con, 'units'] = 'ppb'\n+ if i == 'Parts per million':\n+ df.loc[con, 'units'] = 'ppm'\n+ if i == 'Micrograms/cubic meter (25 C)':\n+ df.loc[con, 'units'] = 'UG/M3'.lower()\n+ if i == 'Degrees Centigrade':\n+ df.loc[con, 'units'] = 'C'\n+ if i == 'Micrograms/cubic meter (LC)':\n+ df.loc[con, 'units'] = 'UG/M3'.lower()\n+ if i == 'Knots':\n+ df.loc[con, 'obs'] *= 0.51444\n+ df.loc[con, 'units'] = 'M/S'.lower()\n+ if i == 'Degrees Fahrenheit':\n+ df.loc[con, 'obs'] = (df.loc[con, 'obs'] + 459.67) * 5. / 9.\n+ df.loc[con, 'units'] = 'K'\n+ if i == 'Percent relative humidity':\n+ df.loc[con, 'units'] = '%'\n+ return df\ndiff --git a/monet/obs/cems_mod.py b/monet/obs/cems_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/cems_mod.py\n@@ -0,0 +1,537 @@\n+from __future__ import print_function\n+import os\n+import datetime\n+import pandas as pd\n+import numpy as np\n+\"\"\"\n+NAME: cems_mod.py\n+PGRMMER: Alice Crawford ORG: ARL\n+This code written at the NOAA air resources laboratory\n+Python 3\n+#################################################################\n+\"\"\"\n+\n+\n+def getdegrees(degrees, minutes, seconds):\n+ return degrees + minutes / 60.0 + seconds / 3600.00\n+\n+\n+def addmonth(dt):\n+ month = dt.month + 1\n+ year = dt.year\n+ day = dt.day\n+ hour = dt.hour\n+ if month > 12:\n+ year = dt.year + 1\n+ month = month - 12\n+ if day == 31 and month in [4, 6, 9, 11]:\n+ day = 30\n+ if month == 2 and day in [29, 30, 31]:\n+ if year % 4 == 0:\n+ day = 29\n+ else:\n+ day = 28\n+ return datetime.datetime(year, month, day, hour)\n+\n+\n+def get_date_fmt(date, verbose=False):\n+ \"\"\"Determines what format of the date is in.\n+ In some files year is first and others it is last.\n+ Parameters\n+ ----------\n+ date: str\n+ with format either YYYY-mm-DD or mm-DD-YYYY\n+ verbose: boolean\n+ if TRUE print extra information\n+ Rerturns\n+ --------\n+ fmt: str\n+ string which can be used with datetime object to give format of date\n+ string.\n+ \"\"\"\n+ if verbose:\n+ print('Determining date format')\n+ if verbose:\n+ print(date)\n+ temp = date.split('-')\n+ if len(temp[0]) == 4:\n+ fmt = \"%Y-%m-%d %H\"\n+ else:\n+ fmt = \"%m-%d-%Y %H\"\n+ return fmt\n+\n+\n+class CEMS(object):\n+ \"\"\"\n+ Class for data from continuous emission monitoring systems (CEMS).\n+ Data from power plants can be downloaded from\n+ ftp://newftp.epa.gov/DMDNLoad/emissions/\n+\n+ Attributes\n+ ----------\n+ efile : type string\n+ Description of attribute `efile`.\n+ url : type string\n+ Description of attribute `url`.\n+ info : type string\n+ Information about data.\n+ df : pandas DataFrame\n+ dataframe containing emissions data.\n+ Methods\n+ ----------\n+ __init__(self)\n+ add_data(self, rdate, states=['md'], download=False, verbose=True):\n+ load(self, efile, verbose=True):\n+ retrieve(self, rdate, state, download=True):\n+\n+ match_column(self, varname):\n+ get_var(self, varname, loc=None, daterange=None, unitid=-99, verbose=True):\n+ retrieve(self, rdate, state, download=True):\n+ create_location_dictionary(self):\n+ rename(self, ccc, newname, rcolumn, verbose):\n+ \"\"\"\n+\n+ def __init__(self):\n+ self.efile = None\n+ self.url = \"ftp://newftp.epa.gov/DmDnLoad/emissions/\"\n+ self.lb2kg = 0.453592 # number of kilograms per pound.\n+ self.info = \"Data from continuous emission monitoring systems (CEMS)\\n\"\n+ self.info += self.url + '\\n'\n+ self.df = pd.DataFrame()\n+ self.namehash = {\n+ } # if columns are renamed keeps track of original names.\n+ # Each facility may have more than one unit which is specified by the\n+ # unit id.\n+\n+ def __str__(self):\n+ return self.info\n+\n+ def add_data(self, rdate, states=['md'], download=False, verbose=True):\n+ \"\"\"\n+ gets the ftp url from the retrieve method and then\n+ loads the data from the ftp site using the load method.\n+\n+ Parameters\n+ ----------\n+ rdate : single datetime object of list of datetime objects\n+ The first datetime object indicates the month and year of the\n+ first file to retrieve.\n+ The second datetime object indicates the month and year of the\n+ last file to retrieve.\n+ states : list of strings\n+ list of two letter state identifications.\n+ download : boolean\n+ if download=True then retrieve will download the files and load\n+ will read the downloaded files.\n+ if download=False then retrieve will return the url and load\n+ will read directly from ftp site.\n+ verbose : boolean\n+ if TRUE prints out additional information.\n+ Returns\n+ -------\n+ boolean True\n+\n+ \"\"\"\n+ if isinstance(states, str):\n+ states = [states]\n+ if isinstance(rdate, list):\n+ r1 = rdate[0]\n+ r2 = rdate[1]\n+ rdatelist = [r1]\n+ done = False\n+ iii = 0\n+ while not done:\n+ r3 = addmonth(rdatelist[-1])\n+ if r3 <= r2:\n+ rdatelist.append(r3)\n+ else:\n+ done = True\n+ if iii > 100:\n+ done = True\n+ iii += 1\n+ else:\n+ rdatelist = [rdate]\n+ for rd in rdatelist:\n+ print('getting data')\n+ print(rd)\n+ for st in states:\n+ url = self.retrieve(rd, st, download=download, verbose=verbose)\n+ self.load(url, verbose=verbose)\n+ return True\n+\n+ def match_column(self, varname):\n+ \"\"\"varname is list of strings.\n+ returns column name which contains all the strings.\n+ \"\"\"\n+ columns = list(self.df.columns.values)\n+ cmatch = None\n+ for ccc in columns:\n+ # print('-----' + ccc + '------')\n+ # print( temp[ccc].unique())\n+ match = 0\n+ for vstr in varname:\n+ if vstr.lower() in ccc.lower():\n+ match += 1\n+ if match == len(varname):\n+ cmatch = ccc\n+ return cmatch\n+\n+ def cemspivot(self, varname, daterange=None, unitid=False, verbose=True):\n+ \"\"\"\n+ Parameters\n+ ----------\n+ varname: string\n+ name of column in the cems dataframe\n+ daterange: list of two datetime objects\n+ define a date range\n+ unitid: boolean.\n+ If True and unit id columns exist then these will be kept as\n+ separate columns in the pivot table.\n+ verbose: boolean\n+ if true print out extra information.\n+ Returns: pandas DataFrame object\n+ returns dataframe with rows time. Columns are (orispl_code,\n+ unit_id).\n+ If no unit_id in the file then columns are just orispl_code.\n+ if unitid flag set to False then sums over unit_id's that belong to\n+ an orispl_code. Values are from the column specified by the\n+ varname input.\n+ \"\"\"\n+\n+ from .obs_util import timefilter\n+ temp = self.df.copy()\n+ if daterange:\n+ temp = timefilter(temp, daterange)\n+ if 'unit_id' in temp.columns.values and unitid:\n+ if temp['unit_id'].unique():\n+ if verbose:\n+ print('UNIT IDs ', temp['unit_id'].unique())\n+ # create pandas frame with index datetime and columns for value for\n+ # each unit_id,orispl\n+ pivot = pd.pivot_table(\n+ temp,\n+ values=varname,\n+ index=['time'],\n+ columns=['orispl_code', 'unit_id'],\n+ aggfunc=np.sum)\n+ else:\n+ if verbose:\n+ print('NO UNIT ID')\n+ # returns data frame where rows are date and columns are the values\n+ # of cmatch for orispl\n+ pivot = pd.pivot_table(\n+ temp,\n+ values=varname,\n+ index=['time'],\n+ columns=['orispl_code'],\n+ aggfunc=np.sum)\n+ return pivot\n+\n+ def get_var(self,\n+ varname,\n+ orisp=None,\n+ daterange=None,\n+ unitid=-99,\n+ verbose=True):\n+ \"\"\"\n+ returns time series with variable indicated by varname.\n+ returns data frame where rows are date and columns are the\n+ values of cmatch for each fac_id.\n+\n+ routine looks for column which contains all strings in varname.\n+ Currently not case sensitive.\n+\n+ loc and ORISPL CODES.\n+ unitid is a unit_id\n+\n+ if a particular unitid is specified then will return values for that\n+ unit.\n+\n+\n+ Parameters\n+ ----------\n+ varname : string or iteratable of strings\n+ varname may be string or list of strings.\n+ loc : type\n+ Description of parameter `loc`.\n+ daterange : type\n+ Description of parameter `daterange`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+ \"\"\"\n+ if unitid == -99:\n+ ui = False\n+ temp = self.cemspivot(varname, daterange, unitid=ui)\n+ if not ui:\n+ return temp[orisp]\n+ else:\n+ return temp[orisp, unitid]\n+\n+ def retrieve(self, rdate, state, download=True, verbose=False):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ rdate : datetime object\n+ Uses year and month. Day and hour are not used.\n+ state : string\n+ state abbreviation to retrieve data for\n+ download : boolean\n+ set to True to download\n+ if download FALSE then returns string with url of ftp\n+ if download TRUE then returns name of downloaded file\n+\n+ Returns\n+ -------\n+ efile string\n+ if download FALSE then returns string with url of ftp\n+ if download TRUE then returns name of downloaded file\n+ \"\"\"\n+ # import requests\n+ # TO DO: requests does not support ftp sites.\n+ efile = 'empty'\n+ ftpsite = self.url\n+ ftpsite += 'hourly/'\n+ ftpsite += 'monthly/'\n+ ftpsite += rdate.strftime(\"%Y\") + '/'\n+ print(ftpsite)\n+ print(rdate)\n+ print(state)\n+ fname = rdate.strftime(\"%Y\") + state + rdate.strftime(\"%m\") + '.zip'\n+ if not download:\n+ efile = ftpsite + fname\n+ if not os.path.isfile(fname):\n+ # print('retrieving ' + ftpsite + fname)\n+ # r = requests.get(ftpsite + fname)\n+ # open(efile, 'wb').write(r.content)\n+ # print('retrieved ' + ftpsite + fname)\n+ efile = ftpsite + fname\n+ print('WARNING: Downloading file not supported at this time')\n+ print('you may download manually using the following address')\n+ print(efile)\n+ else:\n+ print('file exists ' + fname)\n+ efile = fname\n+ self.info += 'File retrieved :' + efile + '\\n'\n+ return efile\n+\n+ def create_location_dictionary(self, verbose=False):\n+ \"\"\"\n+ returns dictionary withe key orispl_code and value (latitude,\n+ longitude) tuple\n+ \"\"\"\n+ if 'latitude' in list(self.df.columns.values):\n+ dftemp = self.df.copy()\n+ pairs = zip(dftemp['orispl_code'],\n+ zip(dftemp['latitude'], dftemp['longitude']))\n+ pairs = list(set(pairs))\n+ lhash = dict(pairs) # key is facility id and value is name.\n+ if verbose:\n+ print(lhash)\n+ return lhash\n+ else:\n+ return False\n+\n+ def create_name_dictionary(self, verbose=False):\n+ \"\"\"\n+ returns dictionary withe key orispl_code and value facility name\n+ \"\"\"\n+ if 'latitude' in list(self.df.columns.values):\n+ dftemp = self.df.copy()\n+ pairs = zip(dftemp['orispl_code'], dftemp['facility_name'])\n+ pairs = list(set(pairs))\n+ lhash = dict(pairs) # key is facility id and value is name.\n+ if verbose:\n+ print(lhash)\n+ return lhash\n+ else:\n+ return False\n+\n+ def columns_rename(self, columns, verbose=False):\n+ \"\"\"\n+ Maps columns with one name to a standard name\n+ Parameters:\n+ ----------\n+ columns: list of strings\n+\n+ Returns:\n+ --------\n+ rcolumn: list of strings\n+ \"\"\"\n+ rcolumn = []\n+ for ccc in columns:\n+ if 'facility' in ccc.lower() and 'name' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'facility_name', rcolumn, verbose)\n+ elif 'orispl' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'orispl_code', rcolumn, verbose)\n+ elif 'facility' in ccc.lower() and 'id' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'fac_id', rcolumn, verbose)\n+ elif 'so2' in ccc.lower() and ('lbs' in ccc.lower()\n+ or 'pounds' in ccc.lower()) and (\n+ 'rate' not in ccc.lower()):\n+ rcolumn = self.rename(ccc, 'so2_lbs', rcolumn, verbose)\n+ elif 'nox' in ccc.lower() and ('lbs' in ccc.lower()\n+ or 'pounds' in ccc.lower()) and (\n+ 'rate' not in ccc.lower()):\n+ rcolumn = self.rename(ccc, 'nox_lbs', rcolumn, verbose)\n+ elif 'co2' in ccc.lower() and ('short' in ccc.lower()\n+ and 'tons' in ccc.lower()):\n+ rcolumn = self.rename(ccc, 'co2_short_tons', rcolumn, verbose)\n+ elif 'date' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'date', rcolumn, verbose)\n+ elif 'hour' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'hour', rcolumn, verbose)\n+ elif 'lat' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'latitude', rcolumn, verbose)\n+ elif 'lon' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'longitude', rcolumn, verbose)\n+ elif 'state' in ccc.lower():\n+ rcolumn = self.rename(ccc, 'state_name', rcolumn, verbose)\n+ else:\n+ rcolumn.append(ccc.strip().lower())\n+ return rcolumn\n+\n+ def rename(self, ccc, newname, rcolumn, verbose):\n+ \"\"\"\n+ keeps track of original and new column names in the namehash attribute\n+ Parameters:\n+ ----------\n+ ccc: str\n+ newname: str\n+ rcolumn: list of str\n+ verbose: boolean\n+ Returns\n+ ------\n+ rcolumn: list of str\n+ \"\"\"\n+ # dictionary with key as the newname and value as the original name\n+ self.namehash[newname] = ccc\n+ rcolumn.append(newname)\n+ if verbose:\n+ print(ccc + ' to ' + newname)\n+ return rcolumn\n+\n+ def add_info(self, dftemp):\n+ \"\"\"\n+ -------------Load supplmental data-----------------------\n+ Add location (latitude longitude) and time UTC information to dataframe\n+ dftemp.\n+ cemsinfo.csv contains info on facility id, lat, lon, time offset from\n+ UTC.\n+ allows transformation from local time to UTC.\n+ If not all power stations are found in the cemsinfo.csv file,\n+ then Nan will be written in lat, lon and 'time' column.\n+\n+ Parameters\n+ ----------\n+ dftemp: pandas dataframe\n+\n+ Returns\n+ ----------\n+ dftemp: pandas dataframe\n+ \"\"\"\n+ basedir = os.path.abspath(os.path.dirname(__file__))[:-3]\n+ iname = os.path.join(basedir, 'data', 'cemsinfo.csv')\n+ # iname = os.path.join(basedir, 'data', 'cem_facility_loc.csv')\n+ method = 1\n+ # TO DO: Having trouble with pytest throwing an error when using the\n+ # apply on the dataframe.\n+ # runs ok, but pytest fails. Tried several differnt methods.\n+ if os.path.isfile(iname):\n+ sinfo = pd.read_csv(iname, sep=',', header=0)\n+ try:\n+ dftemp.drop(['latitude', 'longitude'], axis=1, inplace=True)\n+ except Exception:\n+ pass\n+ dfnew = pd.merge(\n+ dftemp,\n+ sinfo,\n+ how='left',\n+ left_on=['orispl_code'],\n+ right_on=['orispl_code'])\n+ # print('---------z-----------')\n+ # print(dfnew.columns.values)\n+ # remove stations which do not have a time offset.\n+ dfnew.dropna(axis=0, subset=['time_offset'], inplace=True)\n+ if method == 1:\n+ # this runs ok but fails pytest\n+ def i2o(x):\n+ return datetime.timedelta(hours=x['time_offset'])\n+\n+ dfnew['time_offset'] = dfnew.apply(i2o, axis=1)\n+ dfnew['time'] = dfnew['time local'] + dfnew['time_offset']\n+ elif method == 2:\n+ # this runs ok but fails pytest\n+ def utc(x):\n+ return pd.Timestamp(x['time local']) + datetime.timedelta(\n+ hours=x['time_offset'])\n+\n+ dfnew['time'] = dfnew.apply(utc, axis=1)\n+ elif method == 3:\n+ # this runs ok but fails pytest\n+ def utc(x, y):\n+ return x + datetime.timedelta(hours=y)\n+\n+ dfnew['time'] = dfnew.apply(\n+ lambda row: utc(row['time local'], row['time_offset']),\n+ axis=1)\n+ # remove the time_offset column.\n+ dfnew.drop(['time_offset'], axis=1, inplace=True)\n+ mlist = dftemp.columns.values.tolist()\n+ # merge the dataframes back together to include rows with no info\n+ # in the cemsinfo.csv\n+ dftemp = pd.merge(\n+ dftemp, dfnew, how='left', left_on=mlist, right_on=mlist)\n+ return dftemp\n+ # return dfnew\n+\n+ def load(self, efile, verbose=True):\n+ \"\"\"\n+ loads information found in efile into a pandas dataframe.\n+ Parameters\n+ ----------\n+ efile: string\n+ name of csv file to open or url of csv file.\n+ verbose: boolean\n+ if TRUE prints out information\n+ \"\"\"\n+\n+ # pandas read_csv can read either from a file or url.\n+ dftemp = pd.read_csv(efile, sep=',', index_col=False, header=0)\n+ columns = list(dftemp.columns.values)\n+ columns = self.columns_rename(columns, verbose)\n+ dftemp.columns = columns\n+ if verbose:\n+ print(columns)\n+ dfmt = get_date_fmt(dftemp['date'][0], verbose=verbose)\n+\n+ # create column with datetime information\n+ # from column with month-day-year and column with hour.\n+ dftime = dftemp.apply(lambda x:\n+ pd.datetime.strptime(\"{0} {1}\".format(x['date'],\n+ x['hour']),\n+ dfmt), axis=1)\n+ dftemp = pd.concat([dftime, dftemp], axis=1)\n+ dftemp.rename(columns={0: 'time local'}, inplace=True)\n+ dftemp.drop(['date', 'hour'], axis=1, inplace=True)\n+\n+ # -------------Load supplmental data-----------------------\n+ # contains info on facility id, lat, lon, time offset from UTC.\n+ # allows transformation from local time to UTC.\n+ dftemp = self.add_info(dftemp)\n+\n+ if ['year'] in columns:\n+ dftemp.drop(['year'], axis=1, inplace=True)\n+ if self.df.empty:\n+ self.df = dftemp\n+ if verbose:\n+ print('Initializing pandas dataframe. Loading ' + efile)\n+ else:\n+ self.df = self.df.append(dftemp)\n+ if verbose:\n+ print('Appending to pandas dataframe. Loading ' + efile)\n+ # if verbose: print(dftemp[0:10])\n+ return dftemp\ndiff --git a/monet/obs/crn_mod.py b/monet/obs/crn_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/crn_mod.py\n@@ -0,0 +1,477 @@\n+\"\"\"\n+Data is taken from the Climate Reference Network. This is to expand validation\n+ of the NOAA ARL model validation leveraging inhouse datasets.\n+\n+ Data available at https://www.ncdc.noaa.gov/crn/qcdatasets.html\n+\n+ Here we use the hourly data.\n+\n+ Field# Name Units\n+ ---------------------------------------------\n+ 1 WBANNO XXXXX\n+ 2 UTC_DATE YYYYMMDD\n+ 3 UTC_TIME HHmm\n+ 4 LST_DATE YYYYMMDD\n+ 5 LST_TIME HHmm\n+ 6 CRX_VN XXXXXX\n+ 7 LONGITUDE Decimal_degrees\n+ 8 LATITUDE Decimal_degrees\n+ 9 T_CALC Celsius\n+ 10 T_HR_AVG Celsius\n+ 11 T_MAX Celsius\n+ 12 T_MIN Celsius\n+ 13 P_CALC mm\n+ 14 SOLARAD W/m^2\n+ 15 SOLARAD_FLAG X\n+ 16 SOLARAD_MAX W/m^2\n+ 17 SOLARAD_MAX_FLAG X\n+ 18 SOLARAD_MIN W/m^2\n+ 19 SOLARAD_MIN_FLAG X\n+ 20 SUR_TEMP_TYPE X\n+ 21 SUR_TEMP Celsius\n+ 22 SUR_TEMP_FLAG X\n+ 23 SUR_TEMP_MAX Celsius\n+ 24 SUR_TEMP_MAX_FLAG X\n+ 25 SUR_TEMP_MIN Celsius\n+ 26 SUR_TEMP_MIN_FLAG X\n+ 27 RH_HR_AVG %\n+ 28 RH_HR_AVG_FLAG X\n+ 29 SOIL_MOISTURE_5 m^3/m^3\n+ 30 SOIL_MOISTURE_10 m^3/m^3\n+ 31 SOIL_MOISTURE_20 m^3/m^3\n+ 32 SOIL_MOISTURE_50 m^3/m^3\n+ 33 SOIL_MOISTURE_100 m^3/m^3\n+ 34 SOIL_TEMP_5 Celsius\n+ 35 SOIL_TEMP_10 Celsius\n+ 36 SOIL_TEMP_20 Celsius\n+ 37 SOIL_TEMP_50 Celsius\n+ 38 SOIL_TEMP_100 Celsius\n+============================================\n+Daily Data\n+============================================\n+ Field# Name Units\n+---------------------------------------------\n+ 1 WBANNO XXXXX\n+ 2 LST_DATE YYYYMMDD\n+ 3 CRX_VN XXXXXX\n+ 4 LONGITUDE Decimal_degrees\n+ 5 LATITUDE Decimal_degrees\n+ 6 T_DAILY_MAX Celsius\n+ 7 T_DAILY_MIN Celsius\n+ 8 T_DAILY_MEAN Celsius\n+ 9 T_DAILY_AVG Celsius\n+ 10 P_DAILY_CALC mm\n+ 11 SOLARAD_DAILY MJ/m^2\n+ 12 SUR_TEMP_DAILY_TYPE X\n+ 13 SUR_TEMP_DAILY_MAX Celsius\n+ 14 SUR_TEMP_DAILY_MIN Celsius\n+ 15 SUR_TEMP_DAILY_AVG Celsius\n+ 16 RH_DAILY_MAX %\n+ 17 RH_DAILY_MIN %\n+ 18 RH_DAILY_AVG %\n+ 19 SOIL_MOISTURE_5_DAILY m^3/m^3\n+ 20 SOIL_MOISTURE_10_DAILY m^3/m^3\n+ 21 SOIL_MOISTURE_20_DAILY m^3/m^3\n+ 22 SOIL_MOISTURE_50_DAILY m^3/m^3\n+ 23 SOIL_MOISTURE_100_DAILY m^3/m^3\n+ 24 SOIL_TEMP_5_DAILY Celsius\n+ 25 SOIL_TEMP_10_DAILY Celsius\n+ 26 SOIL_TEMP_20_DAILY Celsius\n+ 27 SOIL_TEMP_50_DAILY Celsius\n+ 28 SOIL_TEMP_100_DAILY Celsius\n+\n+===============================================\n+ SUB HOURLY\n+ ==============================================\n+ Field# Name Units\n+---------------------------------------------\n+ 1 WBANNO XXXXX\n+ 2 UTC_DATE YYYYMMDD\n+ 3 UTC_TIME HHmm\n+ 4 LST_DATE YYYYMMDD\n+ 5 LST_TIME HHmm\n+ 6 CRX_VN XXXXXX\n+ 7 LONGITUDE Decimal_degrees\n+ 8 LATITUDE Decimal_degrees\n+ 9 AIR_TEMPERATURE Celsius\n+ 10 PRECIPITATION mm\n+ 11 SOLAR_RADIATION W/m^2\n+ 12 SR_FLAG X\n+ 13 SURFACE_TEMPERATURE Celsius\n+ 14 ST_TYPE X\n+ 15 ST_FLAG X\n+ 16 RELATIVE_HUMIDITY %\n+ 17 RH_FLAG X\n+ 18 SOIL_MOISTURE_5 m^3/m^3\n+ 19 SOIL_TEMPERATURE_5 Celsius\n+ 20 WETNESS Ohms\n+ 21 WET_FLAG X\n+ 22 WIND_1_5 m/s\n+ 23 WIND_FLAG X\n+\n+ \"\"\"\n+from __future__ import print_function\n+\n+import inspect\n+import os\n+from builtins import object, zip\n+\n+import pandas as pd\n+from future import standard_library\n+from numpy import array\n+\n+standard_library.install_aliases()\n+\n+\n+class crn(object):\n+ def __init__(self):\n+ self.dates = None\n+ self.daily = False\n+ self.ftp = None\n+ self.df = pd.DataFrame()\n+ self.se_states = array(\n+ ['AL', 'FL', 'GA', 'MS', 'NC', 'SC', 'TN', 'VA', 'WV'],\n+ dtype='|S14')\n+ self.ne_states = array(\n+ [\n+ 'CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA',\n+ 'RI', 'VT'\n+ ],\n+ dtype='|S20')\n+ self.nc_states = array(\n+ ['IL', 'IN', 'IA', 'KY', 'MI', 'MN', 'MO', 'OH', 'WI'],\n+ dtype='|S9')\n+ self.sc_states = array(['AR', 'LA', 'OK', 'TX'], dtype='|S9')\n+ self.r_states = array(\n+ [\n+ 'AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD',\n+ 'UT', 'WY'\n+ ],\n+ dtype='|S12')\n+ self.p_states = array(['CA', 'OR', 'WA'], dtype='|S10')\n+ self.objtype = 'CRN'\n+ self.monitor_file = inspect.getfile(\n+ self.__class__)[:-18] + 'data/stations.tsv'\n+ self.monitor_df = None\n+ self.baseurl = 'https://www1.ncdc.noaa.gov/pub/data/uscrn/products/'\n+ self.hcols = [\n+ 'WBANNO', 'UTC_DATE', 'UTC_TIME', 'LST_DATE', 'LST_TIME', 'CRX_VN',\n+ 'LONGITUDE', 'LATITUDE', 'T_CALC', 'T_AVG', 'T_MAX', 'T_MIN',\n+ 'P_CALC', 'SOLARAD', 'SOLARAD_FLAG', 'SOLARAD_MAX',\n+ 'SOLARAD_MAX_FLAG', 'SOLARAD_MIN', 'SOLARAD_MIN_FLAG',\n+ 'SUR_TEMP_TYPE', 'SUR_TEMP', 'SUR_TEMP_FLAG', 'SUR_TEMP_MAX',\n+ 'SUR_TEMP_MAX_FLAG', 'SUR_TEMP_MIN', 'SUR_TEMP_MIN_FLAG', 'RH_AVG',\n+ 'RH_AVG_FLAG', 'SOIL_MOISTURE_5', 'SOIL_MOISTURE_10',\n+ 'SOIL_MOISTURE_20', 'SOIL_MOISTURE_50', 'SOIL_MOISTURE_100',\n+ 'SOIL_TEMP_5', 'SOIL_TEMP_10', 'SOIL_TEMP_20', 'SOIL_TEMP_50',\n+ 'SOIL_TEMP_100'\n+ ]\n+ self.dcols = [\n+ 'WBANNO', 'LST_DATE', 'CRX_VN', 'LONGITUDE', 'LATITUDE', 'T_MAX',\n+ 'T_MIN', 'T_MEAN', 'T_AVG', 'P_CALC', 'SOLARAD', 'SUR_TEMP_TYPE',\n+ 'SUR_TEMP_MAX', 'SUR_TEMP_MAX', 'SUR_TEMP_MIN', 'SUR_TEMP_AVG',\n+ 'RH_MAX', 'RH_MIN', 'RH_AVG', 'SOIL_MOISTURE_5',\n+ 'SOIL_MOISTURE_10', 'SOIL_MOISTURE_20', 'SOIL_MOISTURE_50',\n+ 'SOIL_MOISTURE_100', 'SOIL_TEMP_5', 'SOIL_TEMP_10', 'SOIL_TEMP_20',\n+ 'SOIL_TEMP_50', 'SOIL_TEMP_100'\n+ ]\n+ self.shcols = [\n+ 'WBANNO', 'UTC_DATE', 'UTC_TIME', 'LST_DATE', 'LST_TIME', 'CRX_VN',\n+ 'LONGITUDE', 'LATITUDE', 'T_MEAN', 'P_CALC', 'SOLARAD',\n+ 'SOLARAD_FLAG', 'SUR_TEMP_AVG', 'SUR_TEMP_TYPE', 'SUR_TEMP_FLAG',\n+ 'RH_AVG', 'RH_FLAG', 'SOIL_MOISTURE_5', 'SOIL_TEMP_5', 'WETNESS',\n+ 'WET_FLAG', 'WIND', 'WIND_FLAG'\n+ ]\n+ self.citiation = 'Diamond, H. J., T. R. Karl, M. A. Palecki, C. B. Baker, J. E. Bell, R. D. Leeper, D. R. Easterling, J. H. '\n+ ' Lawrimore, T. P. Meyers, M. R. Helfert, G. Goodge, and P. W. Thorne,'\n+ ' 2013: U.S. Climate Reference Network after one decade of operations:'\n+ ' status and assessment. Bull. Amer. Meteor. Soc., 94, 489-498. '\n+ 'doi: 10.1175/BAMS-D-12-00170.1'\n+ self.citation2 = 'Bell, J. E., M. A. Palecki, C. B. Baker, W. G. '\n+ 'Collins, J. H. Lawrimore, R. D. Leeper, M. E. Hall, J. Kochendorfer, '\n+ 'T. P. Meyers, T. Wilson, and H. J. Diamond. 2013: U.S. Climate '\n+ 'Reference Network soil moisture and temperature observations. J. '\n+ 'Hydrometeorol., 14, 977-988. doi: 10.1175/JHM-D-12-0146.1'\n+\n+ def load_file(self, url):\n+ nanvals = [-99999, -9999.0]\n+ if 'CRND0103' in url:\n+ cols = self.dcols\n+ df = pd.read_csv(\n+ url,\n+ delim_whitespace=True,\n+ names=cols,\n+ parse_dates={'time_local': [1]},\n+ infer_datetime_format=True,\n+ na_values=nanvals)\n+ self.daily = True\n+ elif 'CRNS0101' in url:\n+ cols = self.shcols\n+ df = pd.read_csv(\n+ url,\n+ delim_whitespace=True,\n+ names=cols,\n+ parse_dates={\n+ 'time': ['UTC_DATE', 'UTC_TIME'],\n+ 'time_local': ['LST_DATE', 'LST_TIME']\n+ },\n+ infer_datetime_format=True,\n+ na_values=nanvals)\n+ else:\n+ cols = self.hcols\n+ df = pd.read_csv(\n+ url,\n+ delim_whitespace=True,\n+ names=cols,\n+ parse_dates={\n+ 'time': ['UTC_DATE', 'UTC_TIME'],\n+ 'time_local': ['LST_DATE', 'LST_TIME']\n+ },\n+ infer_datetime_format=True,\n+ na_values=nanvals)\n+ return df\n+\n+ def build_url(self,\n+ year,\n+ state,\n+ site,\n+ vector,\n+ daily=False,\n+ sub_hourly=False):\n+ if daily:\n+ beginning = self.baseurl + 'daily01/' + year + '/'\n+ fname = 'CRND0103-'\n+ elif sub_hourly:\n+ beginning = self.baseurl + 'subhourly01/' + year + '/'\n+ fname = 'CRNS0101-05-'\n+ else:\n+ beginning = self.baseurl + 'hourly02/' + year + '/'\n+ fname = 'CRNH0203-'\n+ rest = year + '-' + state + '_' + site + '_' + vector + '.txt'\n+ url = beginning + fname + rest\n+ fname = fname + rest\n+ return url, fname\n+\n+ @staticmethod\n+ def check_url(url):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ url : type\n+ Description of parameter `url`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ import requests\n+ if requests.head(url).status_code < 400:\n+ return True\n+ else:\n+ return False\n+\n+ def build_urls(self, monitors, dates, daily=False, sub_hourly=False):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ monitors : type\n+ Description of parameter `monitors`.\n+ dates : type\n+ Description of parameter `dates`.\n+ daily : type\n+ Description of parameter `daily` (the default is False).\n+ sub_hourly : type\n+ Description of parameter `sub_hourly` (the default is False).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ print('Building and checking urls...')\n+ years = pd.DatetimeIndex(dates).year.unique().astype(str)\n+ urls = []\n+ fnames = []\n+ for i in monitors.index:\n+ for y in years:\n+ state = monitors.iloc[i].STATE\n+ site = monitors.iloc[i].LOCATION.replace(' ', '_')\n+ vector = monitors.iloc[i].VECTOR.replace(' ', '_')\n+ url, fname = self.build_url(\n+ y, state, site, vector, daily=daily, sub_hourly=sub_hourly)\n+ if self.check_url(url):\n+ urls.append(url)\n+ fnames.append(fname)\n+ print(url)\n+ return urls, fnames\n+\n+ def retrieve(self, url, fname):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ url : type\n+ Description of parameter `url`.\n+ fname : type\n+ Description of parameter `fname`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ \"\"\"rdate - datetime object. Uses year and month. Day and hour are not used.\n+ state - state abbreviation to retrieve data for\n+ Files are by year month and state.\n+ \"\"\"\n+ import wget\n+\n+ if not os.path.isfile(fname):\n+ print('Retrieving: ' + fname)\n+ print(url)\n+ print('\\n')\n+ wget.download(url)\n+ else:\n+ print('File Exists: ' + fname)\n+\n+ def add_data(self,\n+ dates,\n+ daily=False,\n+ sub_hourly=False,\n+ download=False,\n+ latlonbox=None):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ dates : type\n+ Description of parameter `dates`.\n+ daily : type\n+ Description of parameter `daily` (the default is False).\n+ sub_hourly : type\n+ Description of parameter `sub_hourly` (the default is False).\n+ download : type\n+ Description of parameter `download` (the default is False).\n+ latlonbox : type\n+ Description of parameter `latlonbox` (the default is None).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ import dask\n+ import dask.dataframe as dd\n+ if self.monitor_df is None:\n+ self.get_monitor_df()\n+ if latlonbox is not None: # get them all[latmin,lonmin,latmax,lonmax]\n+ mdf = self.monitor_df\n+ con = (mdf.LATITUDE >=\n+ latlonbox[0]) & (mdf.LATITUDE <= latlonbox[2]) & (\n+ mdf.LONGITUDE >= latlonbox[1]) & (mdf.LONGITUDE <=\n+ latlonbox[3])\n+ monitors = mdf.loc[con].copy()\n+ else:\n+ monitors = self.monitor_df.copy()\n+ urls, fnames = self.build_urls(\n+ monitors, dates, daily=daily, sub_hourly=sub_hourly)\n+ if download:\n+ for url, fname in zip(urls, fnames):\n+ self.retrieve(url, fname)\n+ dfs = [dask.delayed(self.load_file)(i) for i in fnames]\n+ else:\n+ dfs = [dask.delayed(self.load_file)(i) for i in urls]\n+ dff = dd.from_delayed(dfs)\n+ self.df = dff.compute()\n+ self.df = pd.merge(\n+ self.df,\n+ monitors,\n+ how='left',\n+ on=['WBANNO', 'LATITUDE', 'LONGITUDE'])\n+ if ~self.df.columns.isin(['time']).max():\n+ self.df['time'] = self.df.time_local + pd.to_timedelta(\n+ self.df.GMT_OFFSET, unit='H')\n+ id_vars = self.monitor_df.columns.append(\n+ pd.Index(['time', 'time_local']))\n+ keys = self.df.columns[self.df.columns.isin(id_vars)]\n+ self.df = pd.melt(\n+ self.df, id_vars=keys, var_name='variable',\n+ value_name='obs') # this stacks columns to be inline with MONET\n+ self.df.rename(columns={'WBANNO': 'siteid'}, inplace=True)\n+ self.change_units()\n+ self.df.columns = [i.lower() for i in self.df.columns]\n+\n+ def change_units(self):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ df : type\n+ Description of parameter `df`.\n+ param : type\n+ Description of parameter `param` (the default is 'O3').\n+ aqs_param : type\n+ Description of parameter `aqs_param` (the default is 'OZONE').\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ self.df['units'] = ''\n+ for i in self.df.variable.unique():\n+ if self.daily and i is 'SOLARAD':\n+ self.df.loc[self.df.variable == i, 'units'] = 'MJ/m^2'\n+ elif 'T_' in i:\n+ self.df.loc[self.df.variable == i, 'units'] = 'K'\n+ self.df.loc[self.df.variable == i, 'obs'] += 273.15\n+ elif 'FLAG' in i or 'TYPE' in i:\n+ pass\n+ elif 'TEMP' in i:\n+ self.df.loc[self.df.variable == i, 'units'] = 'K'\n+ self.df.loc[self.df.variable == i, 'obs'] += 273.15\n+ elif 'MOISTURE' in i:\n+ self.df.loc[self.df.variable == i, 'units'] = 'm^3/m^3'\n+ elif 'RH' in i:\n+ self.df.loc[self.df.variable == i, 'units'] = '%'\n+ elif 'P_CALC' is i:\n+ self.df.loc[self.df.variable == i, 'units'] = 'mm'\n+\n+ def set_daterange(self, begin='', end=''):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ begin : type\n+ Description of parameter `begin` (the default is '').\n+ end : type\n+ Description of parameter `end` (the default is '').\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ dates = pd.date_range(\n+ start=begin, end=end, freq='H').values.astype('M8[s]').astype('O')\n+ self.dates = dates\n+\n+ def get_monitor_df(self):\n+ \"\"\"Short summary.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ self.monitor_df = pd.read_csv(self.monitor_file, delimiter='\\t')\ndiff --git a/monet/obs/epa_util.py b/monet/obs/epa_util.py\n--- a/monet/obs/epa_util.py\n+++ b/monet/obs/epa_util.py\n@@ -1,9 +1,47 @@\n from __future__ import print_function\n-\n+from ..util import mystats\n from future import standard_library\n+import pandas as pd\n \n standard_library.install_aliases()\n \n+def convert_epa_unit(df, obscolumn='SO2', unit='UG/M3'):\n+ \"\"\"\n+ converts ppb to ug/m3 for SO2 in aqs and airnow datasets\n+ See 40 CFR Part 50.5, Appendix A-1 to part 50, appendix A=2 to Part 50.\n+ to convert from ppb to ug/m3 multiply by 2.6178.\n+\n+ Also will convert from ug/m3 to ppb.\n+\n+ Parameters\n+ ----------\n+ df : pandas dataframe\n+ self.df attribute from aqs or airnow class.\n+ obscolumn : string\n+ name of column with SO2 data in it.\n+ unit : string\n+ either 'UG/M3' or 'PPB' (not case sensitive)\n+ will convert data to this unit.\n+ inplace : boolean\n+ if TRUE then changes self.df attribute\n+\n+ Returns\n+ -------\n+ df : pandas dataframe\n+ returns dataframe identical to original but with data converted to new\n+ unit.\n+ \"\"\"\n+ factor = 2.6178\n+ ppb = 'ppb'\n+ ugm3 = 'ug/m3'\n+ if unit.lower() == ugm3:\n+ df = df[df['units'] == ppb] # find columns with units of 'ppb'\n+ df['units'] = unit.upper()\n+ df[obscolumn] = df[obscolumn] * factor\n+ elif unit.lower() == ppb:\n+ df = df[df['units'] == ugm3] # find columns with units of 'ppb'\n+ df[obscolumn] = df[obscolumn] / factor\n+ return df\n \n def check_cmaq_units(df, param='O3', aqs_param='OZONE'):\n \"\"\"Short summary.\n@@ -23,7 +61,8 @@ def check_cmaq_units(df, param='O3', aqs_param='OZONE'):\n Description of returned object.\n \n \"\"\"\n- aunit = df[df.Species == aqs_param].Units.unique()[0]\n+ aunit = df[df.variable == aqs_param].Units.unique()[0]\n+\n if aunit == 'UG/M3':\n fac = 1.\n elif aunit == 'PPB':\n@@ -61,13 +100,23 @@ def ensure_values_indomain(df, lon, lat):\n Description of returned object.\n \n \"\"\"\n- con = ((df.Latitude.values > lat.min()) & (df.Latitude.values < lat.max()) & (\n- df.Longitude.values > lon.min()) & (df.Longitude.values < lon.max()))\n+ con = ((df.Latitude.values > lat.min()) &\n+ (df.Latitude.values < lat.max()) &\n+ (df.Longitude.values > lon.min()) &\n+ (df.Longitude.values < lon.max()))\n+\n df = df[con].copy()\n return df\n \n \n-def write_table(self, df=None, param='OZONE', fname='table', threasholds=[70, 1e5], site=None, city=None,\n+\n+def write_table(self,\n+ df=None,\n+ param='OZONE',\n+ fname='table',\n+ threasholds=[70, 1e5],\n+ site=None,\n+ city=None,\n region=None,\n state=None,\n append=False,\n@@ -113,32 +162,36 @@ def write_table(self, df=None, param='OZONE', fname='table', threasholds=[70, 1e\n if df is None:\n print('Please provide a DataFrame')\n else:\n- df = df.groupby('Species').get_group(param)\n+ df = df.groupby('variable').get_group(param)\n if not isinstance(site, type(None)):\n try:\n- df = df.groupby('SCS').get_group(site)\n+ df = df.groupby('siteid').get_group(site)\n single = True\n name = site\n except KeyError:\n- print('Site Number not valid. Enter a valid SCS')\n+ print('Site Number not valid. Enter a valid siteid')\n return\n elif not isinstance(city, type(None)):\n try:\n single = True\n- names = df.get_group('MSA_Name').dropna().unique()\n+ names = df.get_group('msa_name').dropna().unique()\n name = [j for j in names if city.upper() in j.upper()]\n- df = df.groupby('Species').get_group(param).groupby('MSA_Name').get_group(name[0])\n+ df = df.groupby('variable').get_group(param).groupby(\n+ 'MSA_name').get_group(name[0])\n single = True\n except KeyError:\n print(' City either does not contain montiors for ' + param)\n- print(' or City Name is not valid. Enter a valid City name: df.MSA_Name.unique()')\n+ print(\n+ ' or City Name is not valid. Enter a valid City name: '\n+ 'df.msa_name.unique()')\n return\n elif not isinstance(state, type(None)):\n try:\n single = True\n- names = df.get_group('State_Name').dropna().unique()\n+ names = df.get_group('State_name').dropna().unique()\n name = [j for j in names if state.upper() in j.upper()]\n- df = df.groupby('Species').get_group(param).groupby('State_Name').get_group(name[0])\n+ df = df.groupby('variable').get_group(param).groupby(\n+ 'state_name').get_group(name[0])\n except KeyError:\n print('State not valid. Please enter valid 2 digit state')\n return\n@@ -172,9 +225,17 @@ def write_table(self, df=None, param='OZONE', fname='table', threasholds=[70, 1e\n except KeyError:\n pass\n pd.options.display.float_format = '{:,.3f}'.format\n- stats = ['Region', 'Label', 'N', 'Obs', 'Mod', 'MB', 'NMB', 'RMSE', 'R', 'IOA', 'POD', 'FAR']\n+ stats = [\n+ 'Region', 'Label', 'N', 'Obs', 'Mod', 'MB', 'NMB', 'RMSE', 'R',\n+ 'IOA', 'POD', 'FAR'\n+ ]\n if append:\n- dff = pd.read_csv(fname + '.txt', skiprows=3, index_col=0, sep='\\s+', names=stats)\n+ dff = pd.read_csv(\n+ fname + '.txt',\n+ skiprows=3,\n+ index_col=0,\n+ sep='\\s+',\n+ names=stats)\n dd = pd.concat([dd, dff]).sort_values(by=['Region'])\n \n out = StringIO()\n@@ -182,9 +243,11 @@ def write_table(self, df=None, param='OZONE', fname='table', threasholds=[70, 1e\n out.seek(0)\n with open(fname + '.txt', 'w') as f:\n if single:\n- f.write('This is the statistics table for parameter=' + param + ' for area ' + name + '\\n')\n+ f.write('This is the statistics table for parameter=' + param +\n+ ' for area ' + name + '\\n')\n else:\n- f.write('This is the statistics table for parameter=' + param + '\\n')\n+ f.write('This is the statistics table for parameter=' + param +\n+ '\\n')\n f.write('\\n')\n f.writelines(out.readlines())\n if html:\n@@ -201,7 +264,9 @@ def write_table(self, df=None, param='OZONE', fname='table', threasholds=[70, 1e\n lines = cssstyle.split('\\n')\n with open(fname + '.html', 'r') as f:\n for line in f.readlines():\n- lines.append(line.replace('class=\"dataframe\"', 'class=\"GenericTable hoverTable\"'))\n+ lines.append(\n+ line.replace('class=\"dataframe\"',\n+ 'class=\"GenericTable hoverTable\"'))\n f.close()\n with open(fname + '.html', 'w') as f:\n for line in lines:\n@@ -227,10 +292,14 @@ def get_region(df):\n from numpy import array, concatenate\n from pandas import DataFrame, merge\n se = array(['AL', 'FL', 'GA', 'MS', 'NC', 'SC', 'TN', 'VA', 'WV'])\n- ne = array(['CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT'])\n+ ne = array([\n+ 'CT', 'DE', 'DC', 'ME', 'MD', 'MA', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT'\n+ ])\n nc = array(['IL', 'IN', 'IA', 'KY', 'MI', 'MN', 'MO', 'OH', 'WI'])\n sc = array(['AR', 'LA', 'OK', 'TX'])\n- r = array(['AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD', 'UT', 'WY'])\n+ r = array([\n+ 'AZ', 'CO', 'ID', 'KS', 'MT', 'NE', 'NV', 'NM', 'ND', 'SD', 'UT', 'WY'\n+ ])\n p = array(['CA', 'OR', 'WA'])\n ner = array(['Northeast' for i in ne])\n ser = array(['Southeast' for i in se])\n@@ -240,11 +309,17 @@ def get_region(df):\n pr = array(['Pacific' for i in p])\n states = concatenate([se, ne, nc, sc, r, p])\n region = concatenate([ser, ner, ncr, scr, rr, pr])\n- dd = DataFrame({'State_Name': states, 'Region': region})\n- return merge(df, dd, how='left', on='State_Name')\n+ dd = DataFrame({'state_name': states, 'region': region})\n+ return merge(df, dd, how='left', on='state_name')\n \n \n-def get_epa_location_df(df, param, site='', city='', region='', epa_region='', state=''):\n+def get_epa_location_df(df,\n+ param,\n+ site='',\n+ city='',\n+ region='',\n+ epa_region='',\n+ state=''):\n \"\"\"Short summary.\n \n Parameters\n@@ -270,34 +345,266 @@ def get_epa_location_df(df, param, site='', city='', region='', epa_region='', s\n Description of returned object.\n \n \"\"\"\n- cityname = True\n- if 'MSA_Name' in df.columns:\n- cityname = True\n- else:\n- cityname = False\n- new = df.groupby('Species').get_group(param)\n+ new = df.groupby('variable').get_group(param)\n if site != '':\n- if site in new.SCS.unique():\n- df2 = new.loc[new.SCS == site]\n- title = df2.SCS.unique().astype('str')[0].zfill(9)\n+ if site in new.siteid.unique():\n+ df2 = new.loc[new.siteid == site]\n+ title = df2.siteid.unique().astype('str')[0].zfill(9)\n elif city != '':\n- names = df.MSA_Name.dropna().unique()\n+ names = df.msa_name.dropna().unique()\n for i in names:\n if i.upper().find(city.upper()) != -1:\n name = i\n print(name)\n- df2 = new[new['MSA_Name'] == name].copy().drop_duplicates()\n+ df2 = new[new['msa_name'] == name].copy().drop_duplicates()\n title = name\n elif state != '':\n- df2 = new[new['State_Name'].str.upper() == state.upper()].copy().drop_duplicates()\n+ df2 = new[new['state_name'].str.upper() ==\n+ state.upper()].copy().drop_duplicates()\n title = 'STATE: ' + state.upper()\n elif region != '':\n- df2 = new[new['Region'].str.upper() == region.upper()].copy().drop_duplicates()\n+ df2 = new[new['Region'].str.upper() ==\n+ region.upper()].copy().drop_duplicates()\n title = 'REGION: ' + region.upper()\n elif epa_region != '':\n- df2 = new[new['EPA_region'].str.upper() == epa_region.upper()].copy().drop_duplicates()\n+ df2 = new[new['EPA_region'].str.upper() ==\n+ epa_region.upper()].copy().drop_duplicates()\n title = 'EPA_REGION: ' + epa_region.upper()\n else:\n df2 = new\n title = 'Domain'\n return df2, title\n+\n+def regulatory_resample(df, col='model', pollutant_standard=None):\n+ from pandas import to_timedelta, concat\n+ df['time_local'] = df.time + to_timedelta(df.gmt_offset, unit='H')\n+ if df.variable.unique()[0] == 'CO':\n+ df1 = calc_daily_max(df, rolling_frequency=1)\n+ df1['pollutant_standard'] = 'CO 1-hour 1971'\n+ df2 = calc_daily_max(df, rolling_frequency=8)\n+ df2['pollutant_standard'] = 'CO 8-hour 1971'\n+ dfreturn = concat([df1, df2], ignore_index=True)\n+ elif df.variable.unique()[0] == 'OZONE':\n+ dfreturn = calc_daily_max(df, rolling_frequency=8)\n+ elif df.variable.unique()[0] == 'SO2':\n+ df1 = calc_daily_max(df, rolling_frequency=1)\n+ df1['pollutant_standard'] = 'SO2 1-hour 1971'\n+ df2 = calc_daily_max(df, rolling_frequency=3)\n+ df2['pollutant_standard'] = 'SO2 8-hour 1971'\n+ dfreturn = concat([df1, df2], ignore_index=True)\n+ elif df.variable.unique()[0] == 'NO2':\n+ dfreturn = calc_daily_max(df, rolling_frequency=1)\n+ else: # do daily average\n+ dfn = df.drop_duplicates(subset=['siteid'])\n+ df = df.groupby('siteid')[col].resample(\n+ 'D').mean().reset_index().rename(columns={'level_1': 'time_local'})\n+ dfreturn = df.merge(dfn, how='left', on='siteid')\n+ return dfreturn\n+\n+\n+def calc_daily_max(df, param=None, rolling_frequency=8):\n+ from pandas import Index, to_timedelta\n+ if param is None:\n+ temp = df.copy()\n+ else:\n+ temp = df.groupby('variable').get_group(param)\n+ temp.index = temp.time_local\n+ if rolling_frequency > 1:\n+ g = temp.groupby('siteid')['model', 'gmt_offset'].rolling(\n+ rolling_frequency, center=True, win_type='boxcar').mean()\n+ q = g.reset_index(level=0)\n+ k = q.groupby('siteid').resample('D').max().reset_index(\n+ level=1).reset_index(drop='siteid').dropna()\n+ else:\n+ k = temp.groupby('siteid')['model', 'gmt_offset'].resample(\n+ 'D').max().reset_index().rename({\n+ 'level_1': 'time_local'\n+ })\n+ columnstomerge = temp.columns[~temp.columns.isin(k.columns) *\n+ (temp.columns != 'time')].append(\n+ Index(['siteid']))\n+ if param is None:\n+ dff = k.merge(\n+ df[columnstomerge], on='siteid',\n+ how='left').drop_duplicates(subset=['siteid', 'time_local'])\n+ else:\n+ dff = k.merge(\n+ df.groupby('variable').get_group(param)[columnstomerge],\n+ on='siteid',\n+ how='left').drop_duplicates(subset=['siteid', 'time_local'])\n+ dff['time'] = dff.time_local - to_timedelta(dff.gmt_offset, unit='H')\n+ return dff\n+\n+\n+def convert_statenames_to_abv(df):\n+ d = {\n+ 'Alabama': 'AL',\n+ 'Alaska': 'AK',\n+ 'Arizona': 'AZ',\n+ 'Arkansas': 'AR',\n+ 'California': 'CA',\n+ 'Colorado': 'CO',\n+ 'Connecticut': 'CT',\n+ 'Delaware': 'DE',\n+ 'Florida': 'FL',\n+ 'Georgia': 'GA',\n+ 'Hawaii': 'HI',\n+ 'Idaho': 'ID',\n+ 'Illinois': 'IL',\n+ 'Indiana': 'IN',\n+ 'Iowa': 'IA',\n+ 'Kansas': 'KS',\n+ 'Kentucky': 'KY',\n+ 'Louisiana': 'LA',\n+ 'Maine': 'ME',\n+ 'Maryland': 'MD',\n+ 'Massachusetts': 'MA',\n+ 'Michigan': 'MI',\n+ 'Minnesota': 'MN',\n+ 'Mississippi': 'MS',\n+ 'Missouri': 'MO',\n+ 'Montana': 'MT',\n+ 'Nebraska': 'NE',\n+ 'Nevada': 'NV',\n+ 'New Hampshire': 'NH',\n+ 'New Jersey': 'NJ',\n+ 'New Mexico': 'NM',\n+ 'New York': 'NY',\n+ 'North Carolina': 'NC',\n+ 'North Dakota': 'ND',\n+ 'Ohio': 'OH',\n+ 'Oklahoma': 'OK',\n+ 'Oregon': 'OR',\n+ 'Pennsylvania': 'PA',\n+ 'Rhode Island': 'RI',\n+ 'South Carolina': 'SC',\n+ 'South Dakota': 'SD',\n+ 'state': 'Postal',\n+ 'Tennessee': 'TN',\n+ 'Texas': 'TX',\n+ 'Utah': 'UT',\n+ 'Vermont': 'VT',\n+ 'Virginia': 'VA',\n+ 'Washington': 'WA',\n+ 'West Virginia': 'WV',\n+ 'Wisconsin': 'WI',\n+ 'Wyoming': 'WY'\n+ }\n+ for i in d:\n+ df['state_name'].loc[df.state_name.isin([i])] = d[i]\n+ df['state_name'].loc[df.state_name.isin(['Canada'])] = 'CC'\n+ df['state_name'].loc[df.state_name.isin(['Mexico'])] = 'MM'\n+ return df\n+\n+\n+def read_monitor_file(network=None, airnow=False, drop_latlon=True):\n+ import pandas as pd\n+ import os\n+ if airnow:\n+ monitor_airnow_url = 'https://s3-us-west-1.amazonaws.com//files.airnowtech.org/airnow/today/monitoring_site_locations.dat'\n+ colsinuse = [\n+ 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n+ 20, 21\n+ ]\n+ airnow = pd.read_csv(\n+ monitor_airnow_url,\n+ delimiter='|',\n+ header=None,\n+ usecols=colsinuse,\n+ dtype={0: str},\n+ encoding=\"ISO-8859-1\")\n+ airnow.columns = [\n+ 'siteid', 'Site_Code', 'Site_Name', 'Status', 'Agency',\n+ 'Agency_Name', 'EPA_region', 'latitude', 'longitude', 'Elevation',\n+ 'GMT_Offset', 'Country_Code', 'CMSA_Code', 'CMSA_Name', 'MSA_Code',\n+ 'MSA_Name', 'state_Code', 'state_Name', 'County_Code',\n+ 'County_Name', 'City_Code'\n+ ]\n+ airnow['airnow_flag'] = 'AIRNOW'\n+ airnow.columns = [i.lower() for i in airnow.columns]\n+ return airnow\n+ else:\n+ try:\n+ basedir = os.path.abspath(os.path.dirname(__file__))[:-3]\n+ fname = os.path.join(basedir, 'data',\n+ 'monitoring_site_locations.hdf')\n+ print('Monitor File Path: ' + fname)\n+ sss = pd.read_hdf(fname)\n+ # monitor_drop = ['state_code', u'county_code']\n+ # s.drop(monitor_drop, axis=1, inplace=True)\n+ except Exception:\n+ print('Monitor File Not Found... Reprocessing')\n+ baseurl = 'https://aqs.epa.gov/aqsweb/airdata/'\n+ site_url = baseurl + 'aqs_sites.zip'\n+ # has network info (CSN IMPROVE etc....)\n+ monitor_url = baseurl + 'aqs_monitors.zip'\n+ # Airnow monitor file\n+ monitor_airnow_url = 'https://s3-us-west-1.amazonaws.com//files.airnowtech.org/airnow/today/monitoring_site_locations.dat'\n+ colsinuse = [\n+ 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,\n+ 19, 20, 21\n+ ]\n+ airnow = pd.read_csv(\n+ monitor_airnow_url,\n+ delimiter='|',\n+ header=None,\n+ usecols=colsinuse,\n+ dtype={0: str},\n+ encoding=\"ISO-8859-1\")\n+ airnow.columns = [\n+ 'siteid', 'Site_Code', 'Site_Name', 'Status', 'Agency',\n+ 'Agency_Name', 'EPA_region', 'latitude', 'longitude',\n+ 'Elevation', 'GMT_Offset', 'Country_Code', 'CMSA_Code',\n+ 'CMSA_Name', 'MSA_Code', 'MSA_Name', 'state_Code',\n+ 'state_Name', 'County_Code', 'County_Name', 'City_Code'\n+ ]\n+ airnow['airnow_flag'] = 'AIRNOW'\n+ airnow.columns = [i.lower() for i in airnow.columns]\n+ # Read EPA Site file\n+ site = pd.read_csv(site_url, encoding='ISO-8859-1')\n+ # read epa monitor file\n+ monitor = pd.read_csv(monitor_url, encoding='ISO-8859-1')\n+ # make siteid column\n+ site['siteid'] = site['State Code'].astype(str).str.zfill(\n+ 2) + site['County Code'].astype(str).str.zfill(\n+ 3) + site['Site Number'].astype(str).str.zfill(4)\n+ monitor['siteid'] = monitor['State Code'].astype(str).str.zfill(\n+ 2) + monitor['County Code'].astype(str).str.zfill(\n+ 3) + monitor['Site Number'].astype(str).str.zfill(4)\n+ site.columns = [i.replace(' ', '_') for i in site.columns]\n+ s = monitor.merge(\n+ site[['siteid', 'Land_Use', 'Location_Setting', 'GMT_Offset']],\n+ on=['siteid'],\n+ how='left')\n+ s.columns = [i.replace(' ', '_').lower() for i in s.columns]\n+ monitor_drop = [\n+ 'state_code', u'county_code', u'site_number',\n+ 'extraction_date', 'parameter_code', 'parameter_name', 'poc',\n+ 'last_sample_date', 'pqao', 'reporting_agency', 'exclusions',\n+ u'monitoring_objective', 'last_method_code', 'last_method',\n+ u'naaqs_primary_monitor', u'qa_primary_monitor'\n+ ]\n+ s.drop(monitor_drop, axis=1, inplace=True)\n+ # drop airnow keys for merge\n+ airnow_drop = [\n+ u'site_Code', u'site_Name', u'status', u'agency',\n+ 'agency_name', 'country_code', u'cmsa_code', 'state_code',\n+ u'county_code', u'city_code', u'latitude', u'longitude',\n+ 'gmt_offset', 'state_name', 'county_name'\n+ ]\n+ airnow_drop = [i.lower() for i in airnow_drop]\n+ airnow.drop(airnow_drop, axis=1, inplace=True)\n+ ss = pd.concat([s, airnow], ignore_index=True, sort=True)\n+ sss = convert_statenames_to_abv(ss).dropna(\n+ subset=['latitude', 'longitude'])\n+ if network is not None:\n+ sss = sss.loc[sss.networks.isin(\n+ [network])].drop_duplicates(subset=['siteid'])\n+ # Getting error that 'latitude' 'longitude' not contained in axis\n+ drop_latlon = False\n+ if drop_latlon:\n+ if pd.Series(sss.keys()).isin(['latitude', 'longitude']):\n+ return sss.drop(\n+ ['latitude', 'longitude'], axis=1).drop_duplicates()\n+ else:\n+ return sss.drop_duplicates()\ndiff --git a/monet/obs/goes.py b/monet/obs/goes.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/goes.py\n@@ -0,0 +1,55 @@\n+\"\"\" this will read the modis data\"\"\"\n+import xarray as xr\n+from ..grids import _geos_16_grid\n+\n+\n+def _get_swath_from_fname(fname):\n+ vert_grid_num = fname.split('.')[-4].split('v')[-1]\n+ hori_grid_num = fname.split('.')[-4].split('v')[0].split('h')[-1]\n+ return hori_grid_num, vert_grid_num\n+\n+\n+def _get_time_from_fname(fname):\n+ import pandas as pd\n+ u = pd.Series([fname.split('.')[-2]])\n+ date = pd.to_datetime(u, format='%Y%j%H%M%S')[0]\n+ return date\n+\n+\n+def _open_single_file(fname):\n+ # open the file\n+ dset = xr.open_dataset(fname)\n+ dset = dset.rename({'t': 'time'})\n+ # get the area def\n+ area = _geos_16_grid(dset)\n+ dset.attrs['area'] = area\n+ # get proj4 string\n+ dset.attrs['proj4_srs'] = area.proj_str\n+ # get longitude and latitudes\n+ lon, lat = area.get_lonlats_dask()\n+ dset.coords['longitude'] = (('y', 'x'), lon)\n+ dset.coords['latitude'] = (('y', 'x'), lat)\n+\n+ for i in dset.variables:\n+ dset[i].attrs['proj4_srs'] = area.proj_str\n+ dset[i].attrs['area'] = area\n+\n+ # expand dimensions for time\n+ dset = dset.expand_dims('time')\n+ return dset\n+\n+\n+def open_files(fname):\n+\n+ if isinstance(fname, str):\n+ # single file\n+ dset = _open_single_file(fname)\n+ else:\n+ # loop over dsets and combine\n+ dsets = []\n+ for i in fname:\n+ dsets.append(_open_single_file(i))\n+\n+ dset = xr.auto_combine(dsets, concat_dim='time')\n+\n+ return dset\ndiff --git a/monet/obs/icartt.py b/monet/obs/icartt.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/icartt.py\n@@ -0,0 +1,29 @@\n+\"\"\" This module opens data from the ICARTT format and reformats it for use in\n+MONET. The module makes use of Barron Henderson's PseudoNetCDF\n+(https://github.com/barronh/pseudonetcdf)and xarray to read the data. It is\n+noti ntended to read more than ONE file at a time \"\"\"\n+\n+import xarray as xr\n+from pandas import Series, Timestamp, to_timedelta\n+\n+possible_lats = [\n+ 'Lat', 'Latitude', 'lat', 'latitude', 'Latitude_Deg', 'Latitude_deg',\n+ 'Lat_deg', 'Lat_degree', 'Lat_Degree'\n+]\n+possible_lons = [\n+ 'Lon', 'Longitude', 'lon', 'longitude', 'Longitude_Deg', 'Longitude_deg',\n+ 'Lon_deg', 'Lon_degree', 'Lon_Degree', 'Long', 'Long_Deg'\n+]\n+\n+# TODO: def add_data(fname, time_label='UTC', lat_label=None, lon_label=None):\n+# dset = xr.open_dataset(fname, engine='pseudonetcdf')\n+# vars = Series(dset.variables)\n+# if lat_label is None and lon_label is None:\n+# if vars.isin(possible_lats).max():\n+# latvar = vars.loc[vars.isin(possible_lats)][0]\n+# lonvar = vars.loc[vars.isin(possible_lons)][0]\n+# dset.coords['longitude'] = dset[lonvar]\n+# dset.coords['latitude'] = dset[latvar]\n+# # get the datetimes\n+# start = dset.SDATE.replace(', ', '-')\n+# time = Timestamp(start) + to_timedelta(dset[time_name])\ndiff --git a/monet/obs/improve_mod.py b/monet/obs/improve_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/improve_mod.py\n@@ -0,0 +1,176 @@\n+from __future__ import print_function\n+\n+from builtins import object\n+\n+import pandas as pd\n+from numpy import NaN\n+\n+\n+class IMPROVE(object):\n+ \"\"\"Short summary.\n+\n+ Attributes\n+ ----------\n+ datestr : type\n+ Description of attribute `datestr`.\n+ df : type\n+ Description of attribute `df`.\n+ daily : type\n+ Description of attribute `daily`.\n+ se_states : type\n+ Description of attribute `se_states`.\n+ ne_states : type\n+ Description of attribute `ne_states`.\n+ nc_states : type\n+ Description of attribute `nc_states`.\n+ sc_states : type\n+ Description of attribute `sc_states`.\n+ r_states : type\n+ Description of attribute `r_states`.\n+ p_states : type\n+ Description of attribute `p_states`.\n+\n+ \"\"\"\n+\n+ def __init__(self):\n+ self.datestr = []\n+ self.df = None\n+ self.daily = True\n+\n+ def add_data(self, fname, add_meta=False, delimiter='\\t'):\n+ \"\"\" This assumes that you have downloaded the data from\n+ http://views.cira.colostate.edu/fed/DataWizard/Default.aspx\n+ The data is the IMPROVE Aerosol dataset\n+ Any number of sites\n+ Parameters included are All\n+ Fields include Dataset,Site,Date,Parameter,POC,Data_value,Unit,\n+ Latitude,Longitude,State,EPA Site Code Options are delimited\n+ ',' data only and normalized skinny format\n+\n+ Parameters\n+ ----------\n+ fname : type\n+ Description of parameter `fname`.\n+ output : type\n+ Description of parameter `output` (the default is '').\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from .epa_util import read_monitor_file\n+ f = open(fname, 'r')\n+ lines = f.readlines()\n+ skiprows = 0\n+ skip = False\n+ for i, line in enumerate(lines):\n+ if line == 'Data\\n':\n+ skip = True\n+ skiprows = i + 1\n+ break\n+ # if meta data is inlcuded\n+ if skip:\n+ df = pd.read_csv(\n+ fname,\n+ delimiter=delimiter,\n+ parse_dates=[2],\n+ infer_datetime_format=True,\n+ dtype={'EPACode': str},\n+ skiprows=skiprows)\n+ else:\n+ df = pd.read_csv(\n+ fname,\n+ delimiter=delimiter,\n+ parse_dates=[2],\n+ infer_datetime_format=True,\n+ dtype={'EPACode': str})\n+ df.rename(columns={'EPACode': 'epaid'}, inplace=True)\n+ df.rename(columns={'Val': 'Obs'}, inplace=True)\n+ df.rename(columns={'State': 'state_name'}, inplace=True)\n+ df.rename(columns={'ParamCode': 'variable'}, inplace=True)\n+ df.rename(columns={'SiteCode': 'siteid'}, inplace=True)\n+ df.rename(columns={'Unit': 'Units'}, inplace=True)\n+ df.rename(columns={'Date': 'time'}, inplace=True)\n+ df.drop('Dataset', axis=1, inplace=True)\n+ df['time'] = pd.to_datetime(df.time, format='%Y%m%d')\n+ df.columns = [i.lower() for i in df.columns]\n+ if pd.Series(df.keys()).isin(['epaid']).max():\n+ df['epaid'] = df.epaid.astype(str).str.zfill(9)\n+ if add_meta:\n+ monitor_df = read_monitor_file(network='IMPROVE') # .drop(\n+ # dropkeys, axis=1)\n+ df = df.merge(\n+ monitor_df, how='left', left_on='epaid', right_on='siteid')\n+ df.drop(['siteid_y', 'state_name_y'], inplace=True, axis=1)\n+ df.rename(\n+ columns={\n+ 'siteid_x': 'siteid',\n+ 'state_name_x': 'state_name'\n+ },\n+ inplace=True)\n+\n+ try:\n+ df.obs.loc[df.obs < df.mdl] = NaN\n+ except Exception:\n+ df.obs.loc[df.obs < -900] = NaN\n+ self.df = df\n+ return df.copy()\n+\n+ def load_hdf(self, fname, dates):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ fname : type\n+ Description of parameter `fname`.\n+ dates : type\n+ Description of parameter `dates`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ self.df = pd.read_hdf(fname)\n+ self.get_date_range(self.dates)\n+\n+ def get_date_range(self, dates):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ dates : type\n+ Description of parameter `dates`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ self.dates = dates\n+ con = (self.df.time >= dates[0]) & (self.df.time <= dates[-1])\n+ self.df = self.df.loc[con]\n+\n+ def set_daterange(self, begin='', end=''):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ begin : type\n+ Description of parameter `begin` (the default is '').\n+ end : type\n+ Description of parameter `end` (the default is '').\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ dates = pd.date_range(\n+ start=begin, end=end, freq='H').values.astype('M8[s]').astype('O')\n+ self.dates = dates\ndiff --git a/monet/obs/ish_mod.py b/monet/obs/ish_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/ish_mod.py\n@@ -0,0 +1,278 @@\n+\"\"\"Python module for reading NOAA ISH files\"\"\"\n+from __future__ import division, print_function\n+from builtins import object, zip\n+\n+import dask\n+import dask.dataframe as dd\n+import numpy as np\n+import pandas as pd\n+from dask.diagnostics import ProgressBar\n+from future import standard_library\n+from past.utils import old_div\n+\n+standard_library.install_aliases()\n+\n+ProgressBar().register()\n+\n+\n+class ISH(object):\n+ \"\"\"\n+ Integrated Surface Hourly (also known as ISD, Integrated Surface Data)\n+ \"\"\"\n+\n+ def __init__(self):\n+ self.WIDTHS = [\n+ 4, 11, 8, 4, 1, 6, 7, 5, 5, 5, 4, 3, 1, 1, 4, 1, 5, 1, 1, 1, 6, 1,\n+ 1, 1, 5, 1, 5, 1, 5, 1\n+ ]\n+ self.DTYPES = [('varlength', 'i2'), ('station_id',\n+ 'S11'), ('date', 'i4'), ('htime',\n+ 'i2'),\n+ ('source_flag',\n+ 'S1'), ('latitude', 'float'), ('longitude', 'float'),\n+ ('code', 'S5'), ('elev', 'i2'), ('call_letters', 'S5'),\n+ ('qc_process',\n+ 'S4'), ('wdir', 'i2'), ('wdir_quality',\n+ 'S1'), ('wdir_type',\n+ 'S1'), ('ws', 'i2'),\n+ ('ws_quality',\n+ 'S1'), ('ceiling',\n+ 'i4'), ('ceiling_quality',\n+ 'S1'), ('ceiling_code',\n+ 'S1'), ('ceiling_cavok',\n+ 'S1'), ('vsb', 'i4'),\n+ ('vsb_quality',\n+ 'S1'), ('vsb_variability',\n+ 'S1'), ('vsb_variability_quality',\n+ 'S1'), ('t', 'i2'), ('t_quality',\n+ 'S1'),\n+ ('dpt', 'i2'), ('dpt_quality',\n+ 'S1'), ('p', 'i4'), ('p_quality', 'S1')]\n+ self.NAMES, _ = list(zip(*self.DTYPES))\n+ self.history_file = 'https://www1.ncdc.noaa.gov/pub/data/noaa/'\n+ 'isd-history.csv'\n+ self.history = None\n+ self.daily = False\n+\n+ def delimit(self, file_object, delimiter=','):\n+ \"\"\"Iterate over the lines in a file yielding comma delimited versions.\n+\n+ Arguments\n+ ---------\n+ file_object : file or filename\n+\n+ \"\"\"\n+\n+ try:\n+ file_object = open(file_object)\n+ except TypeError:\n+ pass\n+\n+ for line in file_object:\n+ items = []\n+ index = 0\n+ for w in self.WIDTHS:\n+ items.append(line[index:index + w])\n+ index = index + w\n+ yield ','.join(items)\n+\n+ def _clean_column(self, series, missing=9999, multiplier=1):\n+ series = series.apply(float)\n+ series[series == missing] = np.nan\n+ return old_div(series, multiplier)\n+\n+ def _clean_column_by_name(self, frame, name, *args, **kwargs):\n+ frame[name] = self._clean_column(frame[name], *args, **kwargs)\n+ return frame\n+\n+ def _clean(self, frame):\n+ \"\"\"Clean up the data frame\"\"\"\n+\n+ # index by time\n+ frame['time'] = [\n+ pd.Timestamp('{:08}{:04}'.format(date, htime))\n+ for date, htime in zip(frame['date'], frame['htime'])\n+ ]\n+ # these fields were combined into 'time'\n+ frame.drop(['date', 'htime'], axis=1, inplace=True)\n+ frame.set_index('time', drop=True, inplace=True)\n+ frame = self._clean_column_by_name(frame, 'wdir', missing=999)\n+ frame = self._clean_column_by_name(frame, 'ws', multiplier=10)\n+ frame = self._clean_column_by_name(frame, 'ceiling', missing=99999)\n+ frame = self._clean_column_by_name(frame, 'vsb', missing=999999)\n+ frame = self._clean_column_by_name(frame, 't', multiplier=10)\n+ frame = self._clean_column_by_name(frame, 'dpt', multiplier=10)\n+ frame = self._clean_column_by_name(\n+ frame, 'p', multiplier=10, missing=99999)\n+ return frame\n+\n+ def read_data_frame(self, file_object):\n+ \"\"\"Create a data frame from an ISH file.\"\"\"\n+ frame_as_array = np.genfromtxt(\n+ file_object, delimiter=self.WIDTHS, dtype=self.DTYPES)\n+ frame = pd.DataFrame.from_records(frame_as_array)\n+ df = self._clean(frame)\n+ df.drop(['latitude', 'longitude'], axis=1, inplace=True)\n+ # df.latitude = self.history.groupby('station_id').get_group(\n+ # df.station_id[0]).LAT.values[0]\n+ # df.longitude = self.history.groupby('station_id').get_group(\n+ # df.station_id[0]).lon.values[0]\n+ # df['STATION_NAME'] = self.history.groupby('station_id').get_group(\n+ # df.station_id[0])['STATION NAME'].str.strip().values[0]\n+ index = (df.index >= self.dates.min()) & (df.index <= self.dates.max())\n+\n+ return df.loc[index, :].reset_index()\n+\n+ def read_ish_history(self):\n+ \"\"\" read ISH history file \"\"\"\n+ fname = self.history_file\n+ self.history = pd.read_csv(\n+ fname, parse_dates=['BEGIN', 'END'], infer_datetime_format=True)\n+ self.history.columns = [i.lower() for i in self.history.columns]\n+\n+ index1 = (self.history.end >= self.dates.min()) & (self.history.begin\n+ <= self.dates.max())\n+ self.history = self.history.loc[index1, :].dropna(\n+ subset=['lat', 'lon'])\n+\n+ self.history.loc[:, 'usaf'] = self.history.usaf.astype(\n+ 'str').str.zfill(6)\n+ self.history.loc[:, 'wban'] = self.history.wban.astype(\n+ 'str').str.zfill(5)\n+ self.history['station_id'] = self.history.usaf + self.history.wban\n+ self.history.rename(\n+ columns={\n+ 'lat': 'latitude',\n+ 'lon': 'longitude'\n+ }, inplace=True)\n+\n+ def subset_sites(self,\n+ latmin=32.65,\n+ lonmin=-113.3,\n+ latmax=34.5,\n+ lonmax=-110.4):\n+ \"\"\" find sites within designated region\"\"\"\n+ latindex = (self.history.latitude >= latmin) & (self.history.latitude\n+ <= latmax)\n+ lonindex = (self.history.longitude >= lonmin) & (self.history.longitude\n+ <= lonmax)\n+ dfloc = self.history.loc[latindex & lonindex, :]\n+ print('SUBSET')\n+ print(dfloc.latitude.unique())\n+ print(dfloc.longitude.unique())\n+ return dfloc\n+\n+ def add_data(self,\n+ dates,\n+ box=None,\n+ country=None,\n+ state=None,\n+ site=None,\n+ resample=True,\n+ window='H'):\n+ \"\"\"\n+ dates : list of datetime objects\n+ description\n+ box : list of floats\n+ [latmin, lonmin, latmax, lonmax]\n+ country :\n+ state :\n+ site :\n+ resample : boolean\n+ window :\n+ \"\"\"\n+ from numpy import NaN\n+ self.dates = pd.to_datetime(dates)\n+ idate = dates[0]\n+ year = idate.strftime('%Y')\n+ url = 'https://www1.ncdc.noaa.gov/pub/data/noaa/' + year + '/'\n+ if self.history is None:\n+ self.read_ish_history()\n+ self.history['fname'] = url + self.history.usaf + \\\n+ '-' + self.history.wban + '-' + year + '.gz'\n+ dfloc = self.history.copy()\n+ # if isinstance(box, None): # type(box) is not type(None):\n+ if box is not None: # type(box) is not type(None):\n+ print('Retrieving Sites in: ' + ' '.join(map(str, box)))\n+ dfloc = self.subset_sites(\n+ latmin=box[0], lonmin=box[1], latmax=box[2], lonmax=box[3])\n+ elif country is not None:\n+ print('Retrieving Country: ' + country)\n+ dfloc = self.history.loc[self.history.ctry == country, :]\n+ elif state is not None:\n+ print('Retrieving State: ' + state)\n+ dfloc = self.history.loc[self.history.STATE == state, :]\n+ elif site is not None:\n+ print('Retrieving Site: ' + site)\n+ dfloc = self.history.loc[self.history.station_id == site, :]\n+ print(dfloc.fname.unique())\n+ objs = self.get_url_file_objs(dfloc.fname.unique())\n+ # return objs,size,self.history.fname\n+ # dfs = []\n+ # for f in objs:\n+ # try:\n+ # dfs.append(self.read_data_frame(f))\n+ # except:\n+ # pass\n+\n+ print(' Reading ISH into pandas DataFrame...')\n+ dfs = [dask.delayed(self.read_data_frame)(f) for f in objs]\n+ dff = dd.from_delayed(dfs)\n+ self.df = dff.compute()\n+ self.df.loc[self.df.vsb == 99999, 'vsb'] = NaN\n+ if resample:\n+ print(' Resampling to every ' + window)\n+ self.df.index = self.df.time\n+ self.df = self.df.groupby('station_id').resample(\n+ 'H').mean().reset_index()\n+ # this was encoded as byte literal but in dfloc it is a string so could\n+ # not merge on station_id correctly.\n+ try:\n+ self.df['station_id'] = self.df['station_id'].str.decode(\"utf-8\")\n+ except RuntimeError:\n+ pass\n+ self.df = self.df.merge(\n+ dfloc[['station_id', 'latitude', 'longitude', 'station name']],\n+ on=['station_id'],\n+ how='left')\n+\n+ return self.df.copy()\n+\n+ def get_url_file_objs(self, fname):\n+ \"\"\"\n+\n+ \"\"\"\n+ import gzip\n+ import shutil\n+ import requests\n+ objs = []\n+ print(' Constructing ISH file objects from urls...')\n+ mmm = 0\n+ jjj = 0\n+ for iii in fname:\n+ # print i\n+ try:\n+ r2 = requests.get(iii, stream=True)\n+ temp = iii.split('/')\n+ temp = temp[-1]\n+ fname = 'isd.' + temp.replace('.gz', '')\n+ if r2.status_code != 404:\n+ objs.append(fname)\n+ with open(fname, 'wb') as fid:\n+ # TODO. currently shutil writes the file to the hard\n+ # drive. try to find way around this step, so file does\n+ # not need to be written and then read.\n+ gzip_file = gzip.GzipFile(fileobj=r2.raw)\n+ shutil.copyfileobj(gzip_file, fid)\n+ print('SUCCEEDED REQUEST for ' + iii)\n+ else:\n+ print('404 message ' + iii)\n+ mmm += 1\n+ except RuntimeError:\n+ jjj += 1\n+ print('REQUEST FAILED ' + iii)\n+ pass\n+ if jjj > 100:\n+ print('Over ' + str(jjj) + ' failed. break loop')\n+ break\n+ return objs\ndiff --git a/monet/obs/modis_swath.py b/monet/obs/modis_swath.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/modis_swath.py\n@@ -0,0 +1,36 @@\n+# MODIS Swath data\n+\"\"\" this will read the modis data\"\"\"\n+import xarray as xr\n+from ..grids import get_modis_latlon_from_swath_hv, get_sinu_area_def\n+\n+\n+def _get_swath_from_fname(fname):\n+ vert_grid_num = fname.split('.')[-4].split('v')[-1]\n+ hori_grid_num = fname.split('.')[-4].split('v')[0].split('h')[-1]\n+ return hori_grid_num, vert_grid_num\n+\n+\n+def _get_time_from_fname(fname):\n+ import pandas as pd\n+ u = pd.Series([fname.split('.')[-2]])\n+ date = pd.to_datetime(u, format='%Y%j%H%M%S')[0]\n+ return date\n+\n+\n+def open_single_file(fname):\n+ # first get the h,v from the file name\n+ h, v = _get_swath_from_fname(fname)\n+ # get the grid boundary\n+ timestamp = _get_time_from_fname(fname)\n+ # open the dataset\n+ dset = xr.open_dataset(fname)\n+ # rename x and y dimensions\n+ dset = dset.rename({'XDim:MOD_Grid_BRDF': 'x', 'YDim:MOD_Grid_BRDF': 'y'})\n+ # get lat lon from dset and h, v\n+ dset = get_modis_latlon_from_swath_hv(h, v, dset)\n+ # get the area_def\n+ dset.attrs['area'] = get_sinu_area_def(dset)\n+ # set the time\n+ dset['time'] = timestamp\n+\n+ return dset\ndiff --git a/monet/obs/nadp_mod.py b/monet/obs/nadp_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/nadp_mod.py\n@@ -0,0 +1,186 @@\n+\"\"\" READS NAPD DATA \"\"\"\n+from __future__ import division, print_function\n+\n+from builtins import object\n+\n+import pandas as pd\n+from numpy import NaN\n+\n+\n+class NADP(object):\n+ def __init__(self):\n+ self.weekly = True\n+ self.network = None\n+ self.df = pd.DataFrame()\n+ self.objtype = 'NADP'\n+ self.url = None\n+\n+ def build_url(self, network='NTN', siteid=None):\n+ baseurl = 'http://nadp.slh.wisc.edu/datalib/'\n+ if siteid is not None:\n+ siteid = siteid.upper() + '-'\n+ else:\n+ siteid = ''\n+ if network.lower() == 'amnet':\n+ url = 'http://nadp.slh.wisc.edu/datalib/AMNet/AMNet-All.zip'\n+ elif network.lower() == 'amon':\n+ url = 'http://nadp.slh.wisc.edu/dataLib/AMoN/csv/all-ave.csv'\n+ elif network.lower() == 'airmon':\n+ url = 'http://nadp.slh.wisc.edu/datalib/AIRMoN/AIRMoN-ALL.csv'\n+ else:\n+ if self.weekly:\n+ url = baseurl + network.lower(\n+ ) + '/weekly/' + siteid + network.upper() + '-All-w.csv'\n+ else:\n+ url = baseurl + network.lower(\n+ ) + '/annual/' + siteid + network.upper() + '-All-a.csv'\n+ return url\n+\n+ def network_names(self):\n+ print('Available Networks: AMNET, NTN, MDN, AIRMON, AMON')\n+\n+ def read_ntn(self, url):\n+ print('Reading NADP-NTN Data...')\n+ print(url)\n+ # header = self.get_columns()\n+ df = pd.read_csv(url, infer_datetime_format=True, parse_dates=[2, 3])\n+ df.columns = [i.lower() for i in df.columns]\n+ df.rename(\n+ columns={\n+ 'dateon': 'time',\n+ 'dateoff': 'time_off'\n+ }, inplace=True)\n+ try:\n+ meta = pd.read_csv('https://bit.ly/2sPMvaO')\n+ except RuntimeError:\n+ meta = pd.read_csv(self.__path__ + '/../../data/ntn-sites.csv')\n+ meta.columns = [i.lower() for i in meta.columns]\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ dfn = pd.merge(df, meta, on='siteid', how='left')\n+ dfn.dropna(subset=['latitude', 'longitude'], inplace=True)\n+ dfn.loc[(dfn.flagmg == '<') | (dfn.mg < 0), 'mg'] = NaN\n+ dfn.loc[(dfn.flagbr == '<') | (dfn.br < 0), 'br'] = NaN\n+ dfn.loc[(dfn.flagso4 == '<') | (dfn.so4 < 0), 'so4'] = NaN\n+ dfn.loc[(dfn.flagcl == '<') | (dfn.cl < 0), 'cl'] = NaN\n+ dfn.loc[(dfn.flagno3 == '<') | (dfn.no3 < 0), 'no3'] = NaN\n+ dfn.loc[(dfn.flagnh4 == '<') | (dfn.nh4 < 0), 'nh4'] = NaN\n+ dfn.loc[(dfn.flagk == '<') | (dfn.k < 0), 'k'] = NaN\n+ dfn.loc[(dfn.flagna == '<') | (dfn.na < 0), 'na'] = NaN\n+ dfn.loc[(dfn.flagca == '<') | (dfn.ca < 0), 'ca'] = NaN\n+ return dfn\n+\n+ def read_mdn(self, url):\n+ print('Reading NADP-MDN Data...')\n+ # header = self.get_columns()\n+ df = pd.read_csv(url, infer_datetime_format=True, parse_dates=[1, 2])\n+ df.columns = [i.lower() for i in df.columns]\n+ df.rename(\n+ columns={\n+ 'dateon': 'time',\n+ 'dateoff': 'time_off'\n+ }, inplace=True)\n+ try:\n+ meta = pd.read_csv('https://bit.ly/2Lq6kgq')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ except RuntimeError:\n+ meta = pd.read_csv(self.__path__ + '/../../data/mdn-sites.csv')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ meta.columns = [i.lower() for i in meta.columns]\n+ dfn = pd.merge(df, meta, on='siteid', how='left')\n+ dfn.dropna(subset=['latitude', 'longitude'], inplace=True)\n+ dfn.loc[dfn.qr == 'C', ['rgppt', 'svol', 'subppt', 'hgconc', 'hgdep'\n+ ]] = NaN\n+ return dfn\n+\n+ def read_airmon(self, url):\n+ print('Reading NADP-AIRMoN Data...')\n+ # header = self.get_columns()\n+ df = pd.read_csv(url, infer_datetime_format=True, parse_dates=[2, 3])\n+ df.columns = [i.lower() for i in df.columns]\n+ df.rename(\n+ columns={\n+ 'dateon': 'time',\n+ 'dateoff': 'time_off'\n+ }, inplace=True)\n+ try:\n+ meta = pd.read_csv('https://bit.ly/2xMlgTW')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ except RuntimeError:\n+ meta = pd.read_csv(self.__path__ + '/../../data/airmon-sites.csv')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ meta.columns = [i.lower() for i in meta.columns]\n+ dfn = pd.merge(df, meta, on='siteid', how='left')\n+ dfn.dropna(subset=['latitude', 'longitude'], inplace=True)\n+ dfn.loc[dfn.qrcode == 'C', [\n+ 'subppt', 'pptnws', 'pptbel', 'svol', 'ca', 'mg', 'k', 'na', 'nh4',\n+ 'no3', 'cl', 'so4', 'po4', 'phlab', 'phfield', 'conduclab',\n+ 'conducfield'\n+ ]] = NaN\n+ return dfn\n+\n+ def read_amon(self, url):\n+ print('Reading NADP-AMoN Data...')\n+ # header = self.get_columns()\n+ df = pd.read_csv(url, infer_datetime_format=True, parse_dates=[2, 3])\n+ df.columns = [i.lower() for i in df.columns]\n+ df.rename(\n+ columns={\n+ 'startdate': 'time',\n+ 'enddate': 'time_off'\n+ }, inplace=True)\n+ try:\n+ meta = pd.read_csv('https://bit.ly/2sJmkCg')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ except RuntimeError:\n+ meta = pd.read_csv(self.__path__ + '/../../data/amon-sites.csv')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ meta.columns = [i.lower() for i in meta.columns]\n+ dfn = pd.merge(df, meta, on='siteid', how='left')\n+ dfn.dropna(subset=['latitude', 'longitude'], inplace=True)\n+ dfn.loc[dfn.qr == 'C', ['airvol', 'conc']] = NaN\n+ return dfn\n+\n+ def read_amnet(self, url):\n+ print('Reading NADP-AMNet Data...')\n+ # header = self.get_columns()\n+ df = pd.read_csv(url, infer_datetime_format=True, parse_dates=[2, 3])\n+ df.columns = [i.lower() for i in df.columns]\n+ df.rename(\n+ columns={\n+ 'startdate': 'time',\n+ 'enddate': 'time_off'\n+ }, inplace=True)\n+ try:\n+ meta = pd.read_csv('https://bit.ly/2sJmkCg')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ except RuntimeError:\n+ meta = pd.read_csv(self.__path__ + '/../../data/amnet-sites.csv')\n+ meta.drop(['startdate', 'stopdate'], axis=1, inplace=True)\n+ meta.columns = [i.lower() for i in meta.columns]\n+ dfn = pd.merge(df, meta, on='siteid', how='left')\n+ dfn.dropna(subset=['latitude', 'longitude'], inplace=True)\n+ dfn.loc[dfn.qr == 'C', ['airvol', 'conc']] = NaN\n+ return dfn\n+\n+ def add_data(self, dates, network='NTN', siteid=None, weekly=True):\n+ url = self.build_url(network=network, siteid=siteid)\n+ if network.lower() == 'ntn':\n+ df = self.read_ntn(url)\n+ elif network.lower() == 'mdn':\n+ df = self.read_mdn(url)\n+ elif network.lower() == 'amon':\n+ df = self.read_amon(url)\n+ elif network.lower() == 'airmon':\n+ df = self.read_airmon(url)\n+ else:\n+ df = self.read_amnet(url)\n+ self.df = df\n+ self.df = self.df.loc[(self.df.time >= dates.min())\n+ & (self.df.time_off <= dates.max())]\n+\n+ return df\n+\n+ def set_daterange(self, begin='', end=''):\n+ dates = pd.date_range(start=begin, end=end, freq='H')\n+ self.dates = dates\n+ return dates\ndiff --git a/monet/obs/obs_util.py b/monet/obs/obs_util.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/obs_util.py\n@@ -0,0 +1,199 @@\n+\"\"\" Obs Utilities \"\"\"\n+\n+from __future__ import print_function\n+import numpy as np\n+import datetime\n+import sys\n+\n+\n+def find_near(df, latlon, distance=100, sid='site_num', drange=None):\n+ \"\"\"find all values in the df dataframe column sid which are within distance\n+ (km) of lat lon point. output dictionary with key as value in column sid\n+ and value tuple (latitude, longitude)\n+\n+ Parameters\n+ ----------\n+ latlon : tuple or list\n+ (longitude, latitude)\n+ distance : float\n+ kilometers\n+ sid: string\n+ name of column\n+ drange: tuple or list with two datetimes\n+ consider rows with dates between these two dates.\n+\n+ Returns\n+ --------\n+ lhash: dictionary\n+ key is the value in column sid and value is (latitude, longitude)\n+ position.\n+ \"\"\"\n+ degree2km = 111\n+ if drange:\n+ df = timefilter(df.copy(), drange)\n+ lhash = get_lhash(df, sid)\n+ for key in lhash.keys:\n+ xd = (lhash[key][1] - latlon[1]) * degree2km * np.cos(\n+ latlon[1] * np.pi / 180.0)\n+ yd = (lhash[key][0] - latlon[0]) * degree2km\n+ dd = (xd**2 + yd**2)**0.5\n+ if dd > distance:\n+ lhash.pop(key, None)\n+ return lhash\n+\n+\n+def write_datem(df,\n+ obscolumn='obs',\n+ dname='datemfile.txt',\n+ sitename='1',\n+ info=None,\n+ drange=None):\n+ \"\"\"returns string in datem format (See NOAA ARL).\n+ datem format has the following columns:\n+ Year, Month, Day, Hour, Duration, lat, lon, Concentration (units), site\n+ id, height\n+\n+ Parameters\n+ ----------\n+ obscolumn : string\n+ name of column with values to write in the Concentration column.\n+ dname : string\n+ name of the output file.\n+ sitename : string.\n+ If it is the name of a column in the dataframe then\n+ that column will be used to generate the site name column in the\n+ datem file. If is not the name of a column, then the string will\n+ be used as the site name.\n+ info : string\n+ will be written to the second line of the header.\n+ drange : list of two time stamp objects.\n+ Returns\n+ --------\n+ runstring: string\n+ string in datem format.\n+ \"\"\"\n+ if drange:\n+ df = timefilter(df, drange)\n+\n+ units = df['units'].tolist()\n+ units = list(set(units))\n+ sdate = datetime.datetime(2010, 1, 1, 0)\n+ if len(units) > 1:\n+ print('WARNING, more than one type of unit ', units)\n+ ustr = ''\n+ for uuu in units:\n+ ustr += uuu + ' '\n+ runstring = \"Beginning date \" + sdate.strftime(\n+ \"%Y %m %d %H:%M\") + \" UTC ---\"\n+ runstring += 'Information '\n+ if info:\n+ runstring += info + \"\\n\"\n+ else:\n+ runstring += \"\\n\"\n+ runstring += 'Year, Month, Day, Hour:Minute (UTC), Dur(hhmm) , LAT, LON, Concentration (' + \\\n+ ustr + \"), sid, height\\n\"\n+ lat = df['latitude']\n+ lon = df['longitude']\n+ cval = df[obscolumn]\n+ # print t2\n+ t1 = df['time']\n+ duration = ' 0100 '\n+ height = '20'\n+ if sitename in df.columns.values:\n+ sval = df[sitename]\n+ else:\n+ sval = [sitename] * len(cval)\n+ for val in zip(t1, lat, lon, cval, sval):\n+ runstring += val[0].strftime('%Y %m %d %H%M') + duration\n+ try:\n+ runstring += str(val[1]) + ' ' + str(val[2]) + ' '\n+ except RuntimeError:\n+ print('WARNING1', val[1])\n+ print(val[2])\n+ print(type(val[1]))\n+ print(type(val[2]))\n+ sys.exit()\n+ if isinstance(val[4], str):\n+ runstring += \"{:.3f}\".format(\n+ val[3]) + ' ' + val[4] + ' ' + height + \"\\n\"\n+ else:\n+ runstring += \"{:.3f}\".format(val[3]) + ' ' + \\\n+ \"{0:d}\".format(val[4]) + ' ' + height + \"\\n\"\n+\n+ with open(dname, 'w') as fid:\n+ fid.write(runstring)\n+ return runstring\n+\n+\n+def dropna(df, inplace=True):\n+ \"\"\"remove columns which have all Nans.\n+ TO DO: is this needed?\"\"\"\n+ return df.dropna(axis=1, inplace=inplace)\n+\n+\n+def get_lhash(df, idn):\n+ \"\"\"returns a dictionary with the key as the input column value and the\n+ value a tuple of (lat, lon) Useful for getting lat lon locations of\n+ different sites in a dataframe.\n+ \"\"\"\n+ if 'latitude' in list(df.columns.values):\n+ dftemp = df.copy()\n+ pairs = zip(dftemp[idn], zip(dftemp['latitude'], dftemp['longitude']))\n+ pairs = list(set(pairs))\n+ lhash = dict(pairs) # key is facility id and value is name.\n+ print(lhash)\n+ return lhash\n+\n+\n+def summarize(df, verbose=False):\n+ \"\"\"prints list of columns. if verbose prints list of unique values in each\n+ column\"\"\"\n+ columns = list(df.columns.values)\n+ if verbose:\n+ for ccc in columns:\n+ print(ccc)\n+ print(df[ccc].unique())\n+ print('-------------------------------')\n+ for ccc in columns:\n+ print(ccc)\n+\n+\n+def latlonfilter(df, llcrnr, urcrnr):\n+ \"\"\"\n+ removes rows from self.df with latitude longitude outside of the box\n+ described by llcrnr (lower left corner) and urcrnr (upper right corner)\n+ Parameters\n+ ----------\n+ llcrnr : tuple\n+ lower left corner. (latitude, longitude)\n+ urcrnr : tuple\n+ upper right corner (latittude, longitude)\n+ inplace: boolean\n+ if TRUE then replaces self.df attribute\n+ removes rows with latitude longitude outside of the box\n+ described by llcrnr (lower left corner) and urcrnr (upper right corner)\n+\n+ \"\"\"\n+ lat1 = llcrnr[0]\n+ lat2 = urcrnr[0]\n+ lon1 = llcrnr[1]\n+ lon2 = urcrnr[1]\n+ df = df[df['latitude'] < lat2]\n+ df = df[df['latitude'] > lat1]\n+ df = df[df['longitude'] > lon1]\n+ df = df[df['longitude'] < lon2]\n+ return df\n+\n+\n+def timefilter(df, daterange, inplace=True):\n+ \"\"\"removes rows with dates outside of the daterange from self.df\n+ Parameters\n+ ----------\n+ daterange: tuple\n+ (datetime, datetime)\n+ inplace: boolean\n+ if TRUE then replaces self.df attribute\n+ \"\"\"\n+ df = df[df['time'] > daterange[0]]\n+ df = df[df['time'] < daterange[1]]\n+ return df\ndiff --git a/monet/obs/tolnet_mod.py b/monet/obs/tolnet_mod.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/obs/tolnet_mod.py\n@@ -0,0 +1,116 @@\n+import os\n+from builtins import object\n+\n+import pandas as pd\n+import xarray as xr\n+\n+\n+class TOLNet(object):\n+ \"\"\"Short summary.\n+\n+ Attributes\n+ ----------\n+ objtype : type\n+ Description of attribute `objtype`.\n+ cwd : type\n+ Description of attribute `cwd`.\n+ dates : type\n+ Description of attribute `dates`.\n+ dset : type\n+ Description of attribute `dset`.\n+ daily : type\n+ Description of attribute `daily`.\n+\n+ \"\"\"\n+\n+ def __init__(self):\n+ self.objtype = 'TOLNET'\n+ self.cwd = os.getcwd()\n+ self.dates = pd.date_range(\n+ start='2017-09-25', end='2017-09-26', freq='H')\n+ self.dset = None\n+ self.daily = False\n+\n+ def add_data(self, fname):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ fname : type\n+ Description of parameter `fname`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from h5py import File\n+ f = File(fname)\n+ atts = f['INSTRUMENT_ATTRIBUTES']\n+ data = f['DATA']\n+ self.dset = self.make_xarray_dataset(data, atts)\n+ return self.dset\n+\n+ @staticmethod\n+ def make_xarray_dataset(data, atts):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ data : type\n+ Description of parameter `data`.\n+ atts : type\n+ Description of parameter `atts`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from numpy import NaN\n+ # altitude variables\n+ alt = data['ALT'][:].squeeze()\n+ altvars = [\n+ 'AirND', 'AirNDUncert', 'ChRange', 'Press', 'Temp', 'TempUncert',\n+ 'O3NDResol', 'PressUncert'\n+ ]\n+ # time variables\n+ tseries = pd.Series(data[\"TIME_MID_UT_UNIX\"][:].squeeze())\n+ time = pd.Series(pd.to_datetime(tseries, unit='ms'), name='time')\n+ tseries = pd.Series(data[\"TIME_START_UT_UNIX\"][:].squeeze())\n+ # stime = pd.to_datetime(tseries, unit='ms')\n+ tseries = pd.Series(data[\"TIME_STOP_UT_UNIX\"][:].squeeze())\n+ # etime = pd.to_datetime(tseries, unit='ms')\n+ # all other variables\n+ ovars = ['O3MR', 'O3ND', 'O3NDUncert', 'O3MRUncert', 'Precision']\n+ dset = {}\n+ for i in ovars:\n+ val = data[i][:]\n+ val[data[i][:] < -1.] = NaN\n+ dset[i] = (['z', 't'], val)\n+ for i in altvars:\n+ dset[i] = (['z'], data[i][:].squeeze())\n+\n+ # coords = {'time': time, 'z': alt, 'start_time': stime, 'end_time': etime}\n+ attributes = {}\n+ for i in list(atts.attrs.keys()):\n+ attributes[i] = atts.attrs[i]\n+ dataset = xr.Dataset(data_vars=dset, attrs=attributes)\n+ dataset['time'] = (['t'], time)\n+ dataset['t'] = dataset['time']\n+ dataset = dataset.drop('time').rename({'t': 'time'})\n+ dataset['z'] = alt\n+ # get latlon\n+ a, b = dataset.Location_Latitude.decode('ascii').split()\n+ if b == 'S':\n+ dataset['latitude'] = -1 * float(a)\n+ else:\n+ dataset['latitude'] = float(a)\n+ a, b = dataset.Location_Longitude.decode('ascii').split()\n+ if b == 'W':\n+ dataset['longitude'] = -1 * float(a)\n+ else:\n+ dataset['longitude'] = float(a)\n+ return dataset\ndiff --git a/monet/plots/__init__.py b/monet/plots/__init__.py\n--- a/monet/plots/__init__.py\n+++ b/monet/plots/__init__.py\n@@ -1,9 +1,10 @@\n from __future__ import absolute_import, print_function\n \n from . import colorbars, plots, taylordiagram\n+from .plots import *\n+from .mapgen import *\n+from .colorbars import *\n \n-__all__ = ['colorbars', 'plots', 'taylordiagram']\n-\n-__name__ = 'plots'\n+__all__ = ['colorbars', 'plots', 'taylordiagram', 'mapgen']\n \n # This is the driver for all verify objects\ndiff --git a/monet/plots/colorbars.py b/monet/plots/colorbars.py\n--- a/monet/plots/colorbars.py\n+++ b/monet/plots/colorbars.py\n@@ -1,31 +1,36 @@\n+\"\"\" colorbar helper functions\"\"\"\n from builtins import range\n import matplotlib.colors as mcolors\n import matplotlib.pyplot as plt\n-from matplotlib import cm\n from numpy import arange, linspace, vstack\n \n \n-def colorbar_index(ncolors, cmap, minval=None, maxval=None, dtype='int', basemap=None):\n+def colorbar_index(ncolors,\n+ cmap,\n+ minval=None,\n+ maxval=None,\n+ dtype='int',\n+ basemap=None):\n import matplotlib.cm as cm\n import numpy as np\n cmap = cmap_discretize(cmap, ncolors)\n mappable = cm.ScalarMappable(cmap=cmap)\n mappable.set_array([])\n mappable.set_clim(-0.5, ncolors + 0.5)\n- if type(basemap) is not None:\n+ if basemap is not None:\n colorbar = basemap.colorbar(mappable, format='%1.2g')\n else:\n colorbar = plt.colorbar(mappable, format='%1.2g', fontsize=12)\n colorbar.set_ticks(np.linspace(0, ncolors, ncolors))\n- if (type(minval) is None) & (type(maxval) is not None):\n+ if (minval is None) & (maxval is not None):\n colorbar.set_ticklabels(\n np.around(np.linspace(0, maxval, ncolors).astype(dtype), 2))\n- elif (type(minval) is None) & (type(maxval) is None):\n+ elif (minval is None) & (maxval is None):\n colorbar.set_ticklabels(\n np.around(np.linspace(0, ncolors, ncolors).astype(dtype), 2))\n else:\n- colorbar.set_ticklabels(np.around(np.linspace(\n- minval, maxval, ncolors).astype(dtype), 2))\n+ colorbar.set_ticklabels(\n+ np.around(np.linspace(minval, maxval, ncolors).astype(dtype), 2))\n \n return colorbar, cmap\n \n@@ -57,93 +62,138 @@ def cmap_discretize(cmap, N):\n # Return colormap object.\n return mcolors.LinearSegmentedColormap(cmap.name + \"_%d\" % N, cdict, 1024)\n \n-\n-def o3cmap():\n- # This function returns the colormap and bins for the ozone spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.OrRd(linspace(.2, 1, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('o3cmap', colors), arange(0, 140.5, .5)\n-\n-\n-def pm25cmap():\n- # This function returns the colormap and bins for the PM spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.OrRd(linspace(.2, 1, 128))\n- colors = vstack((colors1, colors2))\n- cc = mcolors.LinearSegmentedColormap.from_list('pm25cmap', colors), arange(0, 70.2, .2)\n- return cc\n-\n-\n-def wscmap():\n- # This function returns the colormap and bins for the PM spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.OrRd(linspace(.2, 1, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('wscmap', colors), arange(0, 40.2, .2)\n-\n-\n-def tempcmap():\n- # This function returns the colormap and bins for the PM spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.OrRd(linspace(.2, 1, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('tempcmap', colors), arange(250, 320.5, .5)\n-\n-\n-def sradcmap():\n- # This function returns the colormap and bins for the PM spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.plasma(linspace(.2, 1, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('sradcmap', colors), arange(0, 1410., 10)\n-\n-\n-def noxcmap():\n- # This function returns the colormap and bins for the NO2/NO/NOx spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.plasma_r(linspace(.042, .75, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('noxcmap', colors), arange(0, 40.2, .2)\n-\n-\n-def rhcmap():\n- # This function returns the colormap and bins for the NO2/NO/NOx spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.plasma_r(linspace(.042, .75, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('noxcmap', colors), arange(0, 100.5, .5)\n-\n-\n-def so2cmap():\n- # This function returns the colormap and bins for the NO2/NO/NOx spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.plasma_r(linspace(.042, .75, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('noxcmap', colors), arange(0, 14.1, .1)\n-\n-\n-def pm10cmap():\n- # This function returns the colormap and bins for the NO2/NO/NOx spatial plots\n- # this is designed to have a vmin =0 and vmax = 140\n- # return cmap,bins\n- colors1 = cm.viridis(linspace(0, 1, 128))\n- colors2 = cm.plasma_r(linspace(.042, .75, 128))\n- colors = vstack((colors1, colors2))\n- return mcolors.LinearSegmentedColormap.from_list('noxcmap', colors), arange(0, 150.5, .5)\n+# def o3cmap():\n+# import matplotlib.cm as cm\n+# # This function returns the colormap and bins for the ozone spatial plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = cm.viridis(linspace(0, 1, 128))\n+# colors2 = cm.OrRd(linspace(.2, 1, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('o3cmap', colors), arange(\n+# 0, 140.5, .5)\n+#\n+#\n+# def pm25cmap():\n+# from matplotlib.cm import viridis, OrRd\n+# # This function returns the colormap and bins for the PM spatial plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = viridis(linspace(0, 1, 128))\n+# colors2 = OrRd(linspace(.2, 1, 128))\n+# colors = vstack((colors1, colors2))\n+# cc = mcolors.LinearSegmentedColormap.from_list('pm25cmap', colors), arange(\n+# 0, 70.2, .2)\n+# return cc\n+#\n+#\n+# def wscmap():\n+# from matplotlib.cm import viridis, OrRd\n+# # This function returns the colormap and bins for the PM spatial plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = viridis(linspace(0, 1, 128))\n+# colors2 = OrRd(linspace(.2, 1, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('wscmap', colors), arange(\n+# 0, 40.2, .2)\n+#\n+#\n+# def tempcmap():\n+# from matplotlib.cm import viridis, OrRd\n+# # This function returns the colormap and bins for the PM spatial plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = viridis(linspace(0, 1, 128))\n+# colors2 = OrRd(linspace(.2, 1, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('tempcmap',\n+# colors), arange(\n+# 250, 320.5, .5)\n+#\n+#\n+# def sradcmap():\n+# from matplotlib.cm import viridis, plasma_r\n+# # This function returns the colormap and bins for the PM spatial plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = viridis(linspace(0, 1, 128))\n+# colors2 = plasma_r(linspace(.2, 1, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('sradcmap',\n+# colors), arange(\n+# 0, 1410., 10)\n+#\n+#\n+# def noxcmap():\n+# \"\"\"Short summary.\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# \"\"\"\n+# from matplotlib.cm import viridis, plasma_r\n+# # This function returns the colormap and bins for the NO2/NO/NOx spatial plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = viridis(linspace(0, 1, 128))\n+# colors2 = plasma_r(linspace(.042, .75, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('noxcmap',\n+# colors), arange(\n+# 0, 40.2, .2)\n+#\n+#\n+# def rhcmap():\n+# \"\"\"Short summary.\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# \"\"\"\n+# from matplotlib.cm import viridis, plasma_r\n+# # This function returns the colormap and bins for the NO2/NO/NOx spatial\n+# # plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = viridis(linspace(0, 1, 128))\n+# colors2 = plasma_r(linspace(.042, .75, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('noxcmap',\n+# colors), arange(\n+# 0, 100.5, .5)\n+#\n+#\n+# def so2cmap():\n+# \"\"\"Short summary.\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# \"\"\"\n+# from matplotlib.cm import viridis, plasma_r\n+# colors1 = viridis(linspace(0, 1, 128))\n+# colors2 = plasma_r(linspace(.042, .75, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('noxcmap',\n+# colors), arange(\n+# 0, 14.1, .1)\n+#\n+#\n+# def pm10cmap():\n+# import matplotlib.cm as cm\n+# # This function returns the colormap and bins for the NO2/NO/NOx spatial plots\n+# # this is designed to have a vmin =0 and vmax = 140\n+# # return cmap,bins\n+# colors1 = cm.viridis(linspace(0, 1, 128))\n+# colors2 = cm.plasma_r(linspace(.042, .75, 128))\n+# colors = vstack((colors1, colors2))\n+# return mcolors.LinearSegmentedColormap.from_list('noxcmap',\n+# colors), arange(\n+# 0, 150.5, .5)\ndiff --git a/monet/plots/mapgen.py b/monet/plots/mapgen.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/plots/mapgen.py\n@@ -0,0 +1,80 @@\n+\"\"\" map utilities \"\"\"\n+import cartopy.crs as ccrs\n+import cartopy.feature as cfeature\n+import matplotlib.pyplot as plt\n+\n+\n+def draw_map(ax=None,\n+ crs=None,\n+ natural_earth=False,\n+ coastlines=True,\n+ states=False,\n+ countries=True,\n+ resolution='10m',\n+ extent=None,\n+ **kwargs):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ ax : type\n+ Description of parameter `ax` (the default is None).\n+ natural_earth : bool\n+ Description of parameter `natural_earth` (the default is True).\n+ coastlines : bool\n+ Description of parameter `coastlines` (the default is True).\n+ states : bool\n+ Description of parameter `states` (the default is True).\n+ countries : bool\n+ Description of parameter `countries` (the default is True).\n+ state_resolutions : bool\n+ Description of parameter `state_resolutions` (the default is '10m').\n+ extent : [lon_min,lon_max,lat_min,lat_max]\n+ Description of parameter `extent` (the default is None).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ con2 = 'subplot_kw' in kwargs and 'projection' not in kwargs['subplot_kw']\n+ if ax is None and kwargs is not None and crs is None:\n+ if 'subplot_kw' not in kwargs:\n+ kwargs['subplot_kw'] = {'projection': ccrs.PlateCarree()}\n+ elif con2:\n+ kwargs['subplot_kw']['projection'] = ccrs.PlateCarree()\n+ f, ax = plt.subplots(figsize=(10, 5), **kwargs)\n+ elif ax is None and crs is not None:\n+ f, ax = plt.subplots(figsize=(10, 5), subplot_kw={'projection': crs})\n+ else:\n+ f, ax = plt.subplots(\n+ figsize=(10, 5), subplot_kw={'projection': ccrs.PlateCarree()})\n+ if natural_earth:\n+ # ax.stock_img()\n+ ax.add_feature(cfeature.OCEAN)\n+ ax.add_feature(cfeature.LAND)\n+ ax.add_feature(cfeature.LAKES)\n+ ax.add_feature(cfeature.RIVERS)\n+\n+ if states:\n+ states_provinces = cfeature.NaturalEarthFeature(\n+ category='cultural',\n+ name='admin_1_states_provinces_lines',\n+ scale=resolution,\n+ facecolor='none')\n+ ax.add_feature(states_provinces, edgecolor='black')\n+\n+ if coastlines:\n+ ax.coastlines(resolution)\n+\n+ if countries:\n+ ax.add_feature(cfeature.BORDERS)\n+\n+ if states:\n+ ax.add_feature(states_provinces)\n+\n+ if extent is not None:\n+ ax.set_extent(extent)\n+\n+ return ax\ndiff --git a/monet/plots/plots.py b/monet/plots/plots.py\n--- a/monet/plots/plots.py\n+++ b/monet/plots/plots.py\n@@ -1,10 +1,9 @@\n+\"\"\"plotting routines\"\"\"\n import matplotlib.pyplot as plt\n import seaborn as sns\n \n-import taylordiagram as td\n-from colorbars import colorbar_index\n-\n-from ..util import mystats\n+from . import taylordiagram as td\n+from .colorbars import colorbar_index\n \n # colors = ['#1e90ff','#045C5C','#00A847','#DB4291','#BB7E5D']\n colors = ['#1e90ff', '#DA70D6', '#228B22', '#FA8072', '#FF1493']\n@@ -14,27 +13,38 @@\n \n \n # CMAQ Spatial Plots\n+def make_spatial_plot(modelvar,\n+ m,\n+ dpi=None,\n+ plotargs={},\n+ ncolors=15,\n+ discrete=False):\n \n-\n-def make_spatial_plot(cmaqvar, m, dpi=None, plotargs={}, ncolors=15, discrete=False):\n # create figure\n f, ax = plt.subplots(1, 1, figsize=(11, 6), frameon=False)\n # determine colorbar\n if 'cmap' not in plotargs:\n plotargs['cmap'] = 'viridis'\n if discrete and 'vmin' in plotargs and 'vmax' in plotargs:\n- c, cmap = colorbar_index(ncolors, plotargs['cmap'], minval=plotargs['vmin'], maxval=plotargs['vmax'], basemap=m)\n+\n+ c, cmap = colorbar_index(\n+ ncolors,\n+ plotargs['cmap'],\n+ minval=plotargs['vmin'],\n+ maxval=plotargs['vmax'],\n+ basemap=m)\n plotargs['cmap'] = cmap\n- m.imshow(cmaqvar, **plotargs)\n+ m.imshow(modelvar, **plotargs)\n vmin, vmax = plotargs['vmin'], plotargs['vmax']\n elif discrete:\n- temp = m.imshow(cmaqvar, **plotargs)\n+ temp = m.imshow(modelvar, **plotargs)\n vmin, vmax = temp.get_clim()\n- c, cmap = colorbar_index(ncolors, plotargs['cmap'], minval=vmin, maxval=vmax, basemap=m)\n+ c, cmap = colorbar_index(\n+ ncolors, plotargs['cmap'], minval=vmin, maxval=vmax, basemap=m)\n plotargs['cmap'] = cmap\n- m.imshow(cmaqvar, vmin=vmin, vmax=vmax, **plotargs)\n+ m.imshow(modelvar, vmin=vmin, vmax=vmax, **plotargs)\n else:\n- temp = m.imshow(cmaqvar, **plotargs)\n+ temp = m.imshow(modelvar, **plotargs)\n c = m.colorbar()\n vmin, vmax = temp.get_clim()\n cmap = plotargs['cmap']\n@@ -44,10 +54,25 @@ def make_spatial_plot(cmaqvar, m, dpi=None, plotargs={}, ncolors=15, discrete=Fa\n m.drawcountries()\n return f, ax, c, cmap, vmin, vmax\n \n+def spatial(modelvar, **kwargs):\n+ if kwargs['ax'] is None:\n+ f, ax = plt.subplots(1, 1, figsize=(11, 6), frameon=False)\n+ kwargs['ax'] = ax\n+ ax = modelvar.plot(**kwargs)\n+ return ax\n+\n \n-def make_spatial_contours(cmaqvar, gridobj, date, m, dpi=None, savename='', discrete=True, ncolors=None, dtype='int',\n+def make_spatial_contours(modelvar,\n+ gridobj,\n+ date,\n+ m,\n+ dpi=None,\n+ savename='',\n+ discrete=True,\n+ ncolors=None,\n+ dtype='int',\n **kwargs):\n- fig = plt.figure(figsize=(11, 6), frameon=False)\n+ plt.figure(figsize=(11, 6), frameon=False)\n lat = gridobj.variables['LAT'][0, 0, :, :].squeeze()\n lon = gridobj.variables['LON'][0, 0, :, :].squeeze()\n # define map and draw boundries\n@@ -56,13 +81,17 @@ def make_spatial_contours(cmaqvar, gridobj, date, m, dpi=None, savename='', disc\n m.drawcountries()\n x, y = m(lon, lat)\n plt.axis('off')\n- m.contourf(x, y, cmaqvar, **kwargs)\n+ m.contourf(x, y, modelvar, **kwargs)\n cmap = kwargs['cmap']\n levels = kwargs['levels']\n if discrete:\n- c, cmap = colorbar_index(ncolors, cmap, minval=levels[0], maxval=levels[-1], basemap=m, dtype=dtype)\n- # m.contourf(x, y, cmaqvar, **kwargs,cmap=cmap)\n- # c, cmap = colorbar_index(ncolors, cmap, minval=vmin, maxval=vmax)\n+ c, cmap = colorbar_index(\n+ ncolors,\n+ cmap,\n+ minval=levels[0],\n+ maxval=levels[-1],\n+ basemap=m,\n+ dtype=dtype)\n else:\n c = m.colorbar()\n titstring = date.strftime('%B %d %Y %H')\n@@ -76,13 +105,15 @@ def make_spatial_contours(cmaqvar, gridobj, date, m, dpi=None, savename='', disc\n \n \n def wind_quiver(ws, wdir, gridobj, m, **kwargs):\n- import tools\n+ from . import tools\n+\n lat = gridobj.variables['LAT'][0, 0, :, :].squeeze()\n lon = gridobj.variables['LON'][0, 0, :, :].squeeze()\n # define map and draw boundries\n x, y = m(lon, lat)\n u, v = tools.wsdir2uv(ws, wdir)\n- quiv = m.quiver(x[::15, ::15], y[::15, ::15], u[::15, ::15], v[::15, ::15], **kwargs)\n+ quiv = m.quiver(x[::15, ::15], y[::15, ::15], u[::15, ::15], v[::15, ::15],\n+ **kwargs)\n return quiv\n \n \n@@ -93,7 +124,8 @@ def wind_barbs(ws, wdir, gridobj, m, **kwargs):\n # define map and draw boundries\n x, y = m(lon, lat)\n u, v = tools.wsdir2uv(ws, wdir)\n- m.barbs(x[::15, ::15], y[::15, ::15], u[::15, ::15], v[::15, ::15], **kwargs)\n+ m.barbs(x[::15, ::15], y[::15, ::15], u[::15, ::15], v[::15, ::15],\n+ **kwargs)\n \n \n def normval(vmin, vmax, cmap):\n@@ -103,33 +135,47 @@ def normval(vmin, vmax, cmap):\n norm = BoundaryNorm(boundaries=bounds, ncolors=cmap.N)\n return norm\n \n+# def spatial_scatter(df, m, discrete=False, plotargs={}, create_cbar=True):\n+# from .colorbars import cmap_discretize\n+# x, y = m(df.longitude.values, df.Latitude.values)\n+# s = 20\n+# if create_cbar:\n+# if discrete:\n+# cmap = cmap_discretize(cmap, ncolors)\n+# # s = 20\n+# if (type(plotargs(vmin)) == None) | (type(plotargs(vmax)) == None):\n+# plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n+# else:\n+# plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n+# else:\n+# plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n+# else:\n+# plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n+\n+# def spatial_stat_scatter(df,\n+# m,\n+# date,\n+# stat=mystats.MB,\n+# ncolors=15,\n+# fact=1.5,\n+# cmap='RdYlBu_r'):\n+# new = df[df.datetime == date]\n+# x, y = m(new.longitude.values, new.latitude.values)\n+# cmap = cmap_discretize(cmap, ncolors)\n+# colors = new.CMAQ - new.Obs\n+# ss = (new.Obs - new.CMAQ).abs() * fact\n+\n+\n+def spatial_bias_scatter(df,\n+ m,\n+ date,\n+ vmin=None,\n+ vmax=None,\n+ savename='',\n+ ncolors=15,\n+ fact=1.5,\n+ cmap='RdBu_r'):\n \n-def spatial_scatter(df, m, discrete=False, plotargs={}, create_cbar=True):\n- x, y = m(df.Longitude.values, df.Latitude.values)\n- s = 20\n- if create_cbar:\n- if discrete:\n- cmap = cmap_discretize(cmap, ncolors)\n- # s = 20\n- if (type(plotargs(vmin)) == None) | (type(plotargs(vmax)) == None):\n- plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n- else:\n- plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n- else:\n- plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n- else:\n- plt.scatter(x, y, c=df['Obs'].values, **plotargs)\n-\n-\n-def spatial_stat_scatter(df, m, date, stat=mystats.MB, ncolors=15, fact=1.5, cmap='RdYlBu_r'):\n- new = df[df.datetime == date]\n- x, y = m(new.Longitude.values, new.Latitude.values)\n- cmap = cmap_discretize(cmap, ncolors)\n- colors = new.CMAQ - new.Obs\n- ss = (new.Obs - new.CMAQ).abs() * fact\n-\n-\n-def spatial_bias_scatter(df, m, date, vmin=None, vmax=None, savename='', ncolors=15, fact=1.5, cmap='RdBu_r'):\n from scipy.stats import scoreatpercentile as score\n from numpy import around\n # plt.figure(figsize=(11, 6), frameon=False)\n@@ -138,40 +184,60 @@ def spatial_bias_scatter(df, m, date, vmin=None, vmax=None, savename='', ncolors\n diff = (df.CMAQ - df.Obs)\n top = around(score(diff.abs(), per=95))\n new = df[df.datetime == date]\n- x, y = m(new.Longitude.values, new.Latitude.values)\n- c, cmap = colorbar_index(ncolors, cmap, minval=top * -1, maxval=top, basemap=m)\n+ x, y = m(new.longitude.values, new.latitude.values)\n+ c, cmap = colorbar_index(\n+ ncolors, cmap, minval=top * -1, maxval=top, basemap=m)\n+\n c.ax.tick_params(labelsize=13)\n # cmap = cmap_discretize(cmap, ncolors)\n colors = new.CMAQ - new.Obs\n ss = (new.CMAQ - new.Obs).abs() / top * 100.\n ss[ss > 300] = 300.\n- plt.scatter(x, y, c=colors, s=ss, vmin=-1. * top, vmax=top, cmap=cmap, edgecolors='k', linewidths=.25, alpha=.7)\n- if savename != '':\n- plt.savefig(savename + date + '.jpg', dpi=75.)\n- plt.close()\n- return f, ax, c\n-\n+ plt.scatter(\n+ x,\n+ y,\n+ c=colors,\n+ s=ss,\n+ vmin=-1. * top,\n+ vmax=top,\n+ cmap=cmap,\n+ edgecolors='k',\n+ linewidths=.25,\n+ alpha=.7)\n \n-def eight_hr_spatial_scatter(df, m, date, savename=''):\n- fig = plt.figure(figsize=(11, 6), frameon=False)\n- m.drawcoastlines(linewidth=.3)\n- m.drawstates()\n- m.drawcountries()\n-\n- plt.axis('off')\n- new = df[df.datetime_local == date]\n- x, y = m(new.Longitude.values, new.Latitude.values)\n- cmap = plt.cm.get_cmap('plasma')\n- norm = normval(-40, 40., cmap)\n- ss = (new.Obs - new.CMAQ).abs() / top * 100.\n- colors = new.Obs - new.CMAQ\n- m.scatter(x, y, s=ss, c=colors, norm=norm, cmap=cmap)\n if savename != '':\n plt.savefig(savename + date + '.jpg', dpi=75.)\n plt.close()\n+ return f, ax, c\n \n-\n-def timeseries_param(df, col='Obs', ax=None, sample='H', plotargs={}, fillargs={}, title='', label=None):\n+# def eight_hr_spatial_scatter(df, m, date, savename=''):\n+# fig = plt.figure(figsize=(11, 6), frameon=False)\n+# m.drawcoastlines(linewidth=.3)\n+# m.drawstates()\n+# m.drawcountries()\n+#\n+# plt.axis('off')\n+# new = df[df.datetime_local == date]\n+# x, y = m(new.longitude.values, new.latitude.values)\n+# cmap = plt.cm.get_cmap('plasma')\n+# norm = normval(-40, 40., cmap)\n+# ss = (new.Obs - new.CMAQ).abs() / top * 100.\n+# colors = new.Obs - new.CMAQ\n+# m.scatter(x, y, s=ss, c=colors, norm=norm, cmap=cmap)\n+# if savename != '':\n+# plt.savefig(savename + date + '.jpg', dpi=75.)\n+# plt.close()\n+\n+\n+def timeseries(df,\n+ x='time',\n+ y='obs',\n+ ax=None,\n+ plotargs={},\n+ fillargs={'alpha': .2},\n+ title='',\n+ ylabel=None,\n+ label=None):\n \"\"\"Short summary.\n \n Parameters\n@@ -199,273 +265,58 @@ def timeseries_param(df, col='Obs', ax=None, sample='H', plotargs={}, fillargs={\n Description of returned object.\n \n \"\"\"\n- import pandas as pd\n-\n if ax is None:\n f, ax = plt.subplots(figsize=(11, 6), frameon=False)\n \n sns.set_palette(sns.color_palette(colors))\n sns.set_style('ticks')\n- df.index = df.datetime\n- m = df.groupby(pd.Grouper(freq=sample)).mean()\n- e = df.groupby(pd.Grouper(freq=sample)).std()\n- species = df.Species[0]\n- unit = df.Units[0]\n- upper = m[col] + e[col]\n- lower = m[col] - e[col]\n- lower.loc[lower < 0] = 0\n- lower = lower.values\n- if col == 'Obs':\n- plotargs['color'] = 'darkslategrey'\n- if col == 'Obs':\n- fillargs['color'] = 'darkslategrey'\n- if col != 'Obs' and 'color' not in plotargs:\n- plotargs['color'] = None\n-\n- m[col].plot(ax=ax, **plotargs)\n- ax.fill_between(m[col].index, lower, upper, **fillargs)\n- if label is None:\n- ax.set_ylabel(species + ' (' + unit + ')')\n+ df.index = df[x]\n+ m = df.groupby('time').mean() # mean values for each sample time period\n+ e = df.groupby('time').std() # std values for each sample time period\n+ variable = df.variable[0]\n+ if df.columns.isin(['units']).max():\n+ unit = df.units[0]\n else:\n- ax.set_ylabel(label)\n- plt.set_xlabel('')\n- plt.legend()\n- plt.title(title)\n- plt.tight_layout()\n- return ax\n-\n-\n-def timeseries_error_param(df, col='Obs', ax=None, resample=False, freq='H', plotargs={}, fillargs={}, title='',\n- label=None):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- df : pandas DataFrame\n- pandas dataframe with a column labeled datetime\n- col : string\n- Description of parameter `col` (the default is 'Obs').\n- ax : matplotlib axis handle\n- pass a matplotlib axis handle. Default none creates a new figure and axes handle.\n- resample : bool\n- Set to true or false to resample the dataframe. (the default is 'H' plotargs)\n- freq : str\n- String for the default frequency to resample. See http://pandas.pydata.org/pandas-docs/stable/timeseries.html for more documentation (the default is 'H' plotargs).\n- fillargs : dictionary\n- (the default is {}).\n- title : type\n- Description of parameter `title` (the default is '').\n- label : type\n- Description of parameter `label` (the default is None).\n-\n- Returns\n- -------\n- type\n- Description of returned object. \"\"\"\n- import pandas as pd\n-\n- if ax is None:\n- f, ax = plt.subplots(figsize=(11, 6), frameon=False)\n-\n- sns.set_palette(sns.color_palette(colors))\n- sns.set_style('ticks')\n- df.index = df.datetime\n- m = df.groupby(pd.Grouper(freq=sample)).mean()\n- e = df.groupby(pd.Grouper(freq=sample)).std()\n- species = df.Species[0]\n- unit = df.Units[0]\n- upper = m[col] + e[col]\n- lower = m[col] - e[col]\n+ unit = 'None'\n+ upper = m[y] + e[y]\n+ lower = m[y] - e[y]\n lower.loc[lower < 0] = 0\n lower = lower.values\n- if col == 'Obs':\n- plotargs['color'] = 'darkslategrey'\n- if col == 'Obs':\n- fillargs['color'] = 'darkslategrey'\n- if col != 'Obs' and 'color' not in plotargs:\n- plotargs['color'] = None\n-\n- m[col].plot(ax=ax, **plotargs)\n- ax.fill_between(m[col].index, lower, upper, **fillargs)\n- if label is None:\n- ax.set_ylabel(species + ' (' + unit + ')')\n+ if 'alpha' not in fillargs:\n+ fillargs['alpha'] = 0.2\n+ if label is not None:\n+ m.rename(columns={y: label}, inplace=True)\n+ else:\n+ label = y\n+ m[label].plot(ax=ax, **plotargs)\n+ ax.fill_between(m[label].index, lower, upper, **fillargs)\n+ if ylabel is None:\n+ ax.set_ylabel(variable + ' (' + unit + ')')\n else:\n ax.set_ylabel(label)\n+ ax.set_xlabel('')\n plt.legend()\n plt.title(title)\n plt.tight_layout()\n return ax\n \n-\n-# def timeseries_error_param(df, title='', fig=None, label=None, footer=True, sample='H'):\n-# \"\"\"\n-#\n-# :param df:\n-# :param title:\n-# :param fig:\n-# :param label:\n-# :param footer:\n-# :param sample:\n-# \"\"\"\n-# import matplotlib.dates as mdates\n-# from numpy import sqrt\n-# sns.set_style('ticks')\n-#\n-# df.index = df.datetime\n-# if fig is None:\n-# plt.figure(figsize=(13, 8))\n-#\n-# species = df.Species.unique().astype('|S8')[0]\n-# units = df.Units.unique().astype('|S8')[0]\n-#\n-# mb = (df.CMAQ - df.Obs).resample(sample).mean()\n-# rmse = sqrt((df.CMAQ - df.Obs) ** 2).resample(sample).mean()\n-#\n-# a = plt.plot(mb, label='Mean Bias', color='dodgerblue')\n-# ax = plt.gca().axes\n-# ax2 = ax.twinx()\n-# b = ax2.plot(rmse, label='RMSE', color='tomato')\n-# lns = a + b\n-# labs = [l.get_label() for l in lns]\n-# plt.legend(lns, labs, loc='best')\n-#\n-# ax.set_xlabel('UTC Time (mm/dd HH)')\n-# ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d %H'))\n-# plt.title(title)\n-# ylabel = species + ' (' + units + ')'\n-# ax.set_ylabel('MB ' + ylabel, color='dodgerblue')\n-# ax2.set_ylabel('RMSE ' + ylabel, color='tomato')\n-# if footer:\n-# footer_text(df)\n-# plt.tight_layout()\n-# plt.grid(alpha=.5)\n-# else:\n-# ax1 = fig.get_axes()[0]\n-# ax2 = fig.get_axes()[1]\n-# mb = (df.CMAQ - df.Obs).resample(sample).mean()\n-# rmse = sqrt((df.CMAQ - df.Obs) ** 2).resample(sample).mean()\n-# ax1.plot(mb, label=label + ' MB')\n-# ax2.plot(rmse, label=label + ' RMSE')\n-# lns = ax1.get_lines()[:] + ax2.get_lines()[1:]\n-# labs = [l.get_label() for l in lns]\n-# plt.legend(lns, labs, loc='best')\n-\n-\n-def timeseries_rmse_param(df, title='', fig=None, label=None, footer=True, sample='H'):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- df : type\n- Description of parameter `df`.\n- title : type\n- Description of parameter `title` (the default is '').\n- fig : type\n- Description of parameter `fig` (the default is None).\n- label : type\n- Description of parameter `label` (the default is None).\n- footer : type\n- Description of parameter `footer` (the default is True).\n- sample : type\n- Description of parameter `sample` (the default is 'H').\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- \"\"\"\n- import matplotlib.dates as mdates\n- from numpy import sqrt\n- sns.set_style('ticks')\n- df.index = df.datetime\n- if fig is None:\n- plt.figure(figsize=(13, 8))\n- species = df.Species.unique().astype('|S8')[0]\n- units = df.Units.unique().astype('|S8')[0]\n- rmse = sqrt((df.CMAQ - df.Obs) ** 2).resample(sample).mean()\n- plt.plot(rmse, color='dodgerblue', label=label)\n- ylabel = species + ' (' + units + ')'\n- plt.gca().axes.set_ylabel('RMSE ' + ylabel)\n- if footer:\n- footer_text(df)\n- ax = plt.gca().axes\n- ax.set_xlabel('UTC Time (mm/dd HH)')\n- ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d %H'))\n- plt.tight_layout()\n- plt.grid(alpha=.5)\n- else:\n- ax = fig.get_axes()[0]\n- rmse = sqrt((df.CMAQ - df.Obs) ** 2).resample(sample).mean()\n- ax.plot(rmse, label=label)\n- plt.legend(loc='best')\n-\n-\n-def timeseries_mb_param(df, title='', fig=None, label=None, footer=True, sample='H'):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- df : type\n- Description of parameter `df`.\n- title : type\n- Description of parameter `title` (the default is '').\n- fig : type\n- Description of parameter `fig` (the default is None).\n- label : type\n- Description of parameter `label` (the default is None).\n- footer : type\n- Description of parameter `footer` (the default is True).\n- sample : type\n- Description of parameter `sample` (the default is 'H').\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- \"\"\"\n- import matplotlib.dates as mdates\n- sns.set_style('ticks')\n- df.index = df.datetime\n- if fig is None:\n- plt.figure(figsize=(13, 8))\n- species = df.Species.unique().astype('|S8')[0]\n- units = df.Units.unique().astype('|S8')[0]\n- mb = (df.CMAQ - df.Obs).resample(sample).mean()\n- plt.plot(mb, color='dodgerblue', label=label)\n- ylabel = species + ' (' + units + ')'\n- plt.gca().axes.set_ylabel('MB ' + ylabel)\n- plt.gca().axes.set_xlabel('UTC Time (mm/dd HH)')\n- plt.gca().axes.xaxis.set_major_formatter(mdates.DateFormatter('%m/%d %H'))\n- if footer:\n- footer_text(df)\n- plt.tight_layout()\n- plt.grid(alpha=.5)\n- else:\n- ax = fig.get_axes()[0]\n- rmse = (df.CMAQ - df.Obs).resample(sample).mean()\n- ax.plot(rmse, label=label)\n- plt.legend(loc='best')\n-\n-\n-def kdeplots_param(df, title=None, fig=None, label=None, footer=True, cumulative=False):\n+def kdeplot(df, title=None, label=None, ax=None, **kwargs):\n \"\"\"Short summary.\n \n Parameters\n ----------\n df : type\n Description of parameter `df`.\n+ col : type\n+ Description of parameter `col` (the default is 'obs').\n title : type\n Description of parameter `title` (the default is None).\n- fig : type\n- Description of parameter `fig` (the default is None).\n label : type\n Description of parameter `label` (the default is None).\n- footer : type\n- Description of parameter `footer` (the default is True).\n- cumulative : type\n- Description of parameter `cumulative` (the default is False).\n+ ax : type\n+ Description of parameter `ax` (the default is ax).\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n \n Returns\n -------\n@@ -473,98 +324,29 @@ def kdeplots_param(df, title=None, fig=None, label=None, footer=True, cumulative\n Description of returned object.\n \n \"\"\"\n- from scipy.stats import scoreatpercentile as score\n sns.set_style('ticks')\n \n- if fig is None:\n-\n- if cumulative:\n- plt.figure(figsize=(13, 8))\n- sns.kdeplot(df.Obs, color='darkslategrey', cumulative=True, label='Obs')\n- sns.kdeplot(df.CMAQ, color='dodgerblue', cumulative=True, label=label)\n- else:\n- maxval1 = score(df.CMAQ.values, per=99.5)\n- maxval2 = score(df.Obs.values, per=99.5)\n- maxval = max([maxval1, maxval2])\n- plt.figure(figsize=(13, 8))\n- sns.kdeplot(df.Obs, color='darkslategrey')\n- sns.kdeplot(df.CMAQ, color='dodgerblue', label=label)\n-\n+ if ax is None:\n+ f, ax = plt.subplots(figsize=(11, 6), frameon=False)\n sns.despine()\n- if not cumulative:\n- plt.xlim([0, maxval])\n- plt.xlabel(df.Species.unique()[0] + ' (' + df.Units.unique()[0] + ')')\n- plt.title(title)\n- plt.gca().axes.set_ylabel('P(' + df.Species.unique()[0] + ')')\n- if footer:\n- footer_text(df)\n- plt.tight_layout()\n- plt.grid(alpha=.5)\n- else:\n- ax = fig.get_axes()[0]\n- sns.kdeplot(df.CMAQ, ax=ax, label=label, cumulative=cumulative)\n-\n-\n-def diffpdfs_param(df, title=None, fig=None, label=None, footer=True):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- df : type\n- Description of parameter `df`.\n- title : type\n- Description of parameter `title` (the default is None).\n- fig : type\n- Description of parameter `fig` (the default is None).\n- label : type\n- Description of parameter `label` (the default is None).\n- footer : type\n- Description of parameter `footer` (the default is True).\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- \"\"\"\n- from scipy.stats import scoreatpercentile as score\n- sns.set_style('ticks')\n \n- maxval = score(df.CMAQ.values - df.Obs.values, per=99.9)\n- minval = score(df.CMAQ.values - df.Obs.values, per=.1)\n- if fig is None:\n- plt.figure(figsize=(10, 7))\n- if label == 'None':\n- label = 'CMAQ - Obs'\n- sns.kdeplot(df.CMAQ.values - df.Obs.values, color='darkslategrey', label=label)\n- sns.despine()\n- plt.xlim([minval, maxval])\n- plt.xlabel(df.Species.unique()[0] + ' Difference (' + df.Units.unique()[0] + ')')\n- plt.title(title)\n- plt.gca().axes.set_ylabel('P( Model - Obs )')\n- if footer:\n- footer_text(df)\n- plt.tight_layout()\n- else:\n- ax = fig.get_axes()[0]\n- sns.kdeplot(df.CMAQ.values - df.Obs.values, ax=ax, label=label)\n+ ax = sns.kdeplot(df, ax=ax, label=label, **kwargs)\n+ return ax\n \n \n-def scatter_param(df, title=None, fig=None, label=None, footer=True):\n+def scatter(df, x=None, y=None, title=None, label=None, ax=None, **kwargs):\n \"\"\"Short summary.\n \n Parameters\n ----------\n df : type\n Description of parameter `df`.\n- title : type\n- Description of parameter `title` (the default is None).\n- fig : type\n- Description of parameter `fig` (the default is None).\n- label : type\n- Description of parameter `label` (the default is None).\n- footer : type\n- Description of parameter `footer` (the default is True).\n+ x : type\n+ Description of parameter `x` (the default is 'obs').\n+ y : type\n+ Description of parameter `y` (the default is 'model').\n+ **kwargs : type\n+ Description of parameter `**kwargs`.\n \n Returns\n -------\n@@ -572,114 +354,36 @@ def scatter_param(df, title=None, fig=None, label=None, footer=True):\n Description of returned object.\n \n \"\"\"\n- from numpy import max, arange, linspace, isnan\n- from scipy.stats import scoreatpercentile as score\n- from scipy.stats import linregress\n sns.set_style('ticks')\n \n- species, units = df.Species.unique()[0], df.Units.unique()[0]\n- mask = ~isnan(df.Obs.values) & ~isnan(df.CMAQ.values)\n- maxval1 = score(df.CMAQ.values[mask], per=99.5)\n- maxval2 = score(df.Obs.values[mask], per=99.5)\n- maxval = max([maxval1, maxval2])\n- print maxval\n- if fig is None:\n- plt.figure(figsize=(10, 7))\n-\n- plt.scatter(df.Obs, df.CMAQ, c='cornflowerblue', marker='o', edgecolors='w', alpha=.3, label=label)\n- x = arange(0, maxval + 1)\n- if maxval <= 10.:\n- x = linspace(0, maxval, 25)\n- plt.plot(x, x, '--', color='slategrey')\n- tt = linregress(df.Obs.values[mask], df.CMAQ.values[mask])\n- plt.plot(x, tt[0] * x + tt[1], color='tomato')\n-\n- plt.xlim([0, maxval])\n- plt.ylim([0, maxval])\n- plt.xlabel('Obs ' + species + ' (' + units + ')')\n- plt.title(title)\n- plt.gca().axes.set_ylabel('Model ' + species + ' (' + units + ')')\n- if footer:\n- footer_text(df)\n- plt.tight_layout()\n- plt.grid(alpha=.5)\n- else:\n- ax = fig.get_axes()[0]\n- l, = ax.scatter(df.Obs, df.CMAQ, marker='o', edgecolors='w', alpha=.3, label=label)\n- tt = linregress(df.Obs.values, df.CMAQ.values)\n- ax.plot(df.Obs.unique(), tt[0] * df.Obs.unique() + tt[1], color=l.get_color())\n- plt.legend(loc='Best')\n-\n-\n-def diffscatter_param(df, title=None, fig=None, label=None, footer=True):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- df : type\n- Description of parameter `df`.\n- title : type\n- Description of parameter `title` (the default is None).\n- fig : type\n- Description of parameter `fig` (the default is None).\n- label : type\n- Description of parameter `label` (the default is None).\n- footer : type\n- Description of parameter `footer` (the default is True).\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- \"\"\"\n- from scipy.stats import scoreatpercentile as score\n- from numpy import isnan\n- sns.set_style('ticks')\n- df = df.dropna()\n- mask = ~isnan(df.Obs.values) & ~isnan(df.CMAQ.values)\n- if fig is None:\n- species, units = df.Species.unique()[0], df.Units.unique()[0]\n- maxval = score(df.Obs.values[mask], per=99.9)\n- minvaly = score(df.CMAQ.values[mask] - df.Obs.values[mask], per=.1)\n- maxvaly = score(df.CMAQ.values[mask] - df.Obs.values[mask], per=99.9)\n- plt.figure(figsize=(10, 7))\n-\n- plt.scatter(df.Obs.values[mask], df.CMAQ.values[mask] - df.Obs.values[mask], c='cornflowerblue', marker='o',\n- edgecolors='w', alpha=.3, label=label)\n- plt.plot((0, maxval), (0, 0), '--', color='darkslategrey')\n-\n- plt.xlim([0, maxval])\n- plt.ylim([minvaly, maxvaly])\n- plt.xlabel('Obs ' + species + ' (' + units + ')')\n- plt.title(title)\n- plt.gca().axes.set_ylabel('Model - Obs ' + species + ' (' + units + ')')\n- if footer:\n- footer_text(df)\n- plt.tight_layout()\n- else:\n- ax = fig.get_axes()[0]\n- mask = ~isnan(df.Obs.values) & ~isnan(df.CMAQ.values)\n- ax.scatter(df.Obs.values[mask], df.CMAQ.values[mask] - df.Obs.values[mask], marker='o', edgecolors='w',\n- alpha=.3, label=label)\n- plt.legend(loc='best')\n+ if ax is None:\n+ f, ax = plt.subplots(figsize=(8, 6), frameon=False)\n+ ax = sns.regplot(data=df, x=x, y=y, label=label, **kwargs)\n+ plt.title(title)\n+ return ax\n \n \n-def taylordiagram(df, marker='o', label='CMAQ', addon=False, dia=None):\n+def taylordiagram(df,\n+ marker='o',\n+ col1='obs',\n+ col2='model',\n+ label='CMAQ',\n+ addon=False,\n+ dia=None):\n from numpy import corrcoef\n \n- df = df.drop_duplicates().dropna(subset=['Obs', 'CMAQ'])\n+ df = df.drop_duplicates().dropna(subset=[col1, col2])\n \n if not addon and dia is None:\n f = plt.figure(figsize=(12, 10))\n sns.set_style('ticks')\n- obsstd = df.Obs.std()\n+ obsstd = df[col1].std()\n \n dia = td.TaylorDiagram(obsstd, fig=f, rect=111, label='Obs')\n plt.grid(linewidth=1, alpha=.5)\n-\n- cc = corrcoef(df.Obs.values, df.CMAQ.values)[0, 1]\n- dia.add_sample(df.CMAQ.std(), cc, marker=marker, zorder=9, ls=None, label=label)\n+ cc = corrcoef(df[col1].values, df[col2].values)[0, 1]\n+ dia.add_sample(\n+ df[col2].std(), cc, marker=marker, zorder=9, ls=None, label=label)\n contours = dia.add_contours(colors='0.5')\n plt.clabel(contours, inline=1, fontsize=10)\n plt.grid(alpha=.5)\n@@ -687,12 +391,15 @@ def taylordiagram(df, marker='o', label='CMAQ', addon=False, dia=None):\n plt.tight_layout()\n \n elif not addon and dia is not None:\n- print 'Do you want to add this on? if so please turn the addon keyword to True'\n+ print('Do you want to add this on? if so please turn '\n+ 'the addon keyword to True')\n elif addon and dia is None:\n- print 'Please pass the previous Taylor Diagram Instance with dia keyword...'\n+ print('Please pass the previous Taylor Diagram Instance with dia '\n+ 'keyword...')\n else:\n cc = corrcoef(df.Obs.values, df.CMAQ.values)[0, 1]\n- dia.add_sample(df.CMAQ.std(), cc, marker=marker, zorder=9, ls=None, label=label)\n+ dia.add_sample(\n+ df.CMAQ.std(), cc, marker=marker, zorder=9, ls=None, label=label)\n plt.legend(fontsize='small', loc='best')\n plt.tight_layout()\n return dia\ndiff --git a/monet/plots/taylordiagram.py b/monet/plots/taylordiagram.py\n--- a/monet/plots/taylordiagram.py\n+++ b/monet/plots/taylordiagram.py\n@@ -1,24 +1,18 @@\n-#!/usr/bin/env python\n-# Copyright: This document has been placed in the public domain.\n-\n \"\"\"\n Taylor diagram (Taylor, 2001) test implementation.\n http://www-pcmdi.llnl.gov/about/staff/Taylor/CV/Taylor_diagram_primer.htm\n \"\"\"\n-from __future__ import division\n-from __future__ import print_function\n-\n-from builtins import zip\n-from builtins import map\n-from builtins import object\n-from past.utils import old_div\n+from __future__ import division, print_function\n \n-__version__ = \"Time-stamp: <2012-02-17 20:59:35 ycopin>\"\n-__author__ = \"Yannick Copin \"\n+from builtins import map, object, zip\n \n import matplotlib.pyplot as PLT\n import numpy as NP\n import seaborn as sns\n+from past.utils import old_div\n+\n+__version__ = \"Time-stamp: <2012-02-17 20:59:35 ycopin>\"\n+__author__ = \"Yannick Copin \"\n \n colors = ['#DA70D6', '#228B22', '#FA8072', '#FF1493']\n sns.set_palette(sns.color_palette(colors))\n@@ -53,13 +47,16 @@ def __init__(self, refstd, fig=None, rect=111, label='_'):\n # Standard deviation axis extent\n self.smin = 0\n self.smax = 1.5 * self.refstd\n-\n- ghelper = FA.GridHelperCurveLinear(tr,\n- extremes=(0, old_div(NP.pi, 2), # 1st quadrant\n- self.smin, self.smax),\n- grid_locator1=gl1,\n- tick_formatter1=tf1,\n- )\n+ ghelper = FA.GridHelperCurveLinear(\n+ tr,\n+ extremes=(\n+ 0,\n+ old_div(NP.pi, 2), # 1st quadrant\n+ self.smin,\n+ self.smax),\n+ grid_locator1=gl1,\n+ tick_formatter1=tf1,\n+ )\n \n if fig is None:\n fig = PLT.figure()\n@@ -91,8 +88,8 @@ def __init__(self, refstd, fig=None, rect=111, label='_'):\n \n # Add reference point and stddev contour\n print(\"Reference std:\", self.refstd)\n- l, = self.ax.plot([0], self.refstd, 'k*',\n- ls='', ms=10, label=label)\n+ l, = self.ax.plot(\n+ [0], self.refstd, 'r*', ls='', ms=14, label=label, zorder=10)\n t = NP.linspace(0, old_div(NP.pi, 2))\n r = NP.zeros_like(t) + self.refstd\n self.ax.plot(t, r, 'k--', label='_')\n@@ -104,9 +101,8 @@ def add_sample(self, stddev, corrcoef, *args, **kwargs):\n \"\"\"Add sample (stddev,corrcoeff) to the Taylor diagram. args\n and kwargs are directly propagated to the Figure.plot\n command.\"\"\"\n-\n- l, = self.ax.plot(NP.arccos(corrcoef), stddev,\n- *args, **kwargs) # (theta,radius)\n+ l, = self.ax.plot(NP.arccos(corrcoef), stddev, *args,\n+ **kwargs) # (theta,radius)\n self.samplePoints.append(l)\n \n return l\n@@ -114,10 +110,12 @@ def add_sample(self, stddev, corrcoef, *args, **kwargs):\n def add_contours(self, levels=5, **kwargs):\n \"\"\"Add constant centered RMS difference contours.\"\"\"\n \n- rs, ts = NP.meshgrid(NP.linspace(self.smin, self.smax),\n- NP.linspace(0, old_div(NP.pi, 2)))\n+ rs, ts = NP.meshgrid(\n+ NP.linspace(self.smin, self.smax), NP.linspace(\n+ 0, old_div(NP.pi, 2)))\n # Compute centered RMS difference\n- rms = NP.sqrt(self.refstd ** 2 + rs ** 2 - 2 * self.refstd * rs * NP.cos(ts))\n+ rms = NP.sqrt(self.refstd**2 + rs**2 -\n+ 2 * self.refstd * rs * NP.cos(ts))\n \n contours = self.ax.contour(ts, rs, rms, levels, **kwargs)\n \ndiff --git a/monet/util/__init__.py b/monet/util/__init__.py\n--- a/monet/util/__init__.py\n+++ b/monet/util/__init__.py\n@@ -1,7 +1,7 @@\n from __future__ import absolute_import, print_function\n \n-from . import mystats # , tools\n+from . import mystats, tools, interp_util, resample\n \n-__all__ = ['mystats', 'tools']\n+__all__ = ['mystats', 'tools', 'interp_util', 'resample']\n \n-__name__ = 'util'\n+#__name__ = 'util'\ndiff --git a/monet/util/interp_util.py b/monet/util/interp_util.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/util/interp_util.py\n@@ -0,0 +1,276 @@\n+\"\"\" Interpolation functions \"\"\"\n+from __future__ import print_function\n+\n+from builtins import str, zip\n+\n+\n+def lonlat_to_swathdefinition(longitude=None, latitude=None):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ longitude : type\n+ Description of parameter `longitude`.\n+ latitude : type\n+ Description of parameter `latitude`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from pyresample.geometry import SwathDefinition\n+ from numpy import vstack\n+ if len(longitude.shape) < 2:\n+ lons = vstack(longitude)\n+ lats = vstack(latitude)\n+ else:\n+ lons = longitude\n+ lats = latitude\n+ return SwathDefinition(lons=lons, lats=lats)\n+\n+\n+def nearest_point_swathdefinition(longitude=None, latitude=None):\n+ \"\"\"Creates a pyreample.geometry.SwathDefinition for a single point.\n+\n+ Parameters\n+ ----------\n+ longitude : float\n+ longitude.\n+ latitude : float\n+ latitude.\n+\n+ Returns\n+ -------\n+ pyreample.geometry.SwathDefinition\n+\n+\n+ \"\"\"\n+ from pyresample.geometry import SwathDefinition\n+ from numpy import vstack\n+ lons = vstack([longitude])\n+ lats = vstack([latitude])\n+ return SwathDefinition(lons=lons, lats=lats)\n+\n+\n+def constant_lat_swathdefition(longitude=None, latitude=None):\n+ \"\"\"Creates a pyreample.geometry.SwathDefinition with a constant latitude along\n+ the longitude array. Longitude can be a 1d or 2d np.array or xr.DataArray\n+\n+ Parameters\n+ ----------\n+ longitude : numpy.array or xarray.DataArray\n+ Array of longitude values\n+ latitude : float\n+ latitude for constant\n+\n+ Returns\n+ -------\n+ pyreample.geometry.SwathDefinition\n+\n+ \"\"\"\n+ from pyresample import geometry\n+ from xarray import DataArray\n+ from numpy import vstack\n+ if len(longitude.shape) < 2:\n+ lons = vstack(longitude)\n+ else:\n+ lons = longitude\n+ lats = lons * 0. + latitude\n+ if isinstance(lats, DataArray):\n+ lats.name = 'lats'\n+ return geometry.SwathDefinition(lons=lons, lats=lats)\n+\n+\n+def constant_lon_swathdefition(longitude=None, latitude=None):\n+ \"\"\"Creates a pyreample.geometry.SwathDefinition with a constant longitude along\n+ the latitude array. latitude can be a 1d or 2d np.array or xr.DataArray\n+\n+ Parameters\n+ ----------\n+ longitude :\n+ latitude for constant\n+ latitude : numpy.array or xarray.DataArray\n+ Array of longitude values\n+\n+ Returns\n+ -------\n+ pyreample.geometry.SwathDefinition\n+\n+ \"\"\"\n+ from pyresample import geometry\n+ from xarray import DataArray\n+ from numpy import vstack\n+ if len(latitude.shape) < 2:\n+ lats = vstack(latitude)\n+ else:\n+ lats = latitude\n+ lons = lats * 0. + longitude\n+ if isinstance(lats, DataArray):\n+ lons.name = 'lons'\n+ return geometry.SwathDefinition(lons=lons, lats=lats)\n+\n+\n+def get_smops_area_def(nx=1440, ny=720):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ nx : type\n+ Description of parameter `nx` (the default is 1440).\n+ ny : type\n+ Description of parameter `ny` (the default is 720).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from pyproj import Proj\n+ from pyresample import utils\n+ p = Proj(\n+ proj='eqc',\n+ lat_ts=0.,\n+ lat_0=0.,\n+ lon_0=0.,\n+ x_0=0.,\n+ y_0=0.,\n+ a=6378137,\n+ b=6378137,\n+ units='m')\n+ proj4_args = p.srs\n+ area_name = 'Global .25 degree SMOPS Grid'\n+ area_id = 'smops'\n+ proj_id = area_id\n+ aa = p([-180, 180], [-90, 90])\n+ area_extent = (aa[0][0], aa[1][0], aa[0][1], aa[1][1])\n+ area_def = utils.get_area_def(area_id, area_name, proj_id, proj4_args, nx,\n+ ny, area_extent)\n+ return area_def\n+\n+\n+def get_gfs_area_def(nx=1440, ny=721):\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ nx : type\n+ Description of parameter `nx` (the default is 1440).\n+ ny : type\n+ Description of parameter `ny` (the default is 721).\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ from pyresample import utils\n+ from pyproj import Proj\n+ # proj4_args = '+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0\n+ # +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m'\n+ p = Proj(\n+ proj='eqc',\n+ lat_ts=0.,\n+ lat_0=0.,\n+ lon_0=0.,\n+ x_0=0.,\n+ y_0=0.,\n+ a=6378137,\n+ b=6378137,\n+ units='m')\n+ proj4_args = p.srs\n+ area_name = 'Global .25 degree SMOPS Grid'\n+ area_id = 'smops'\n+ proj_id = area_id\n+ aa = p([0, 360 - .25], [-90, 90.])\n+ area_extent = (aa[0][0], aa[1][0], aa[0][1], aa[1][1])\n+ area_def = utils.get_area_def(area_id, area_name, proj_id, proj4_args, nx,\n+ ny, area_extent)\n+ return area_def\n+\n+\n+def geotiff_meta_to_areadef(meta):\n+ \"\"\"\n+ Transform (Rasterio) geotiff meta dictionary to pyresample area definition\n+ Arguments:\n+ meta (dictionary) : dictionary containing projection and image geometry\n+ information (formed by Rasterio)\n+ Returns:\n+ area_def (pyresample.geometry.AreaDefinition) : Area definition object\n+ \"\"\"\n+ import pyresample\n+ area_id = \"\"\n+ name = \"\"\n+ proj_id = \"Generated from GeoTIFF\"\n+ proj_dict = meta['crs']\n+ proj_dict_with_string_values = dict(\n+ list(\n+ zip([str(key) for key in list(proj_dict.keys())],\n+ [str(value) for value in list(proj_dict.values())])))\n+ x_size = meta['width']\n+ x_res = meta['transform'][0]\n+ y_res = meta['transform'][4] * -1\n+ y_size = meta['height']\n+ x_ll = meta['transform'][2]\n+ y_ur = meta['transform'][5]\n+ y_ll = y_ur - y_size * y_res\n+ x_ur = x_ll + x_size * x_res\n+ area_extent = [x_ll, y_ll, x_ur, y_ur]\n+ print(area_extent, x_size, y_size, x_res, y_res)\n+\n+ area_def = pyresample.geometry.AreaDefinition(\n+ area_id, name, proj_id, proj_dict_with_string_values, x_size, y_size,\n+ area_extent)\n+ # print(area_extent, x_size, y_size)\n+ return area_def\n+\n+\n+def geotiff_meta_to_areadef2(meta):\n+ \"\"\"\n+ Transform (Rasterio) geotiff meta dictionary to pyresample area definition\n+ Arguments:\n+ meta (dictionary) : dictionary containing projection and image geometry\n+ information (formed by Rasterio)\n+ Returns:\n+ area_def (pyresample.geometry.AreaDefinition) : Area definition object\n+ \"\"\"\n+ import pyresample\n+ area_id = \"\"\n+ name = \"\"\n+ proj_id = \"Generated from GeoTIFF\"\n+ proj_dict = meta['crs']\n+ proj_dict_with_string_values = dict(\n+ list(\n+ zip([str(key) for key in list(proj_dict.keys())],\n+ [str(value) for value in list(proj_dict.values())])))\n+ x_size = meta['width']\n+ x_res = 50000.\n+ y_res = 50000.\n+ y_size = meta['height']\n+ x_ll = meta['transform'][2]\n+ y_ur = meta['transform'][5]\n+ y_ll = y_ur - y_size * y_res\n+ x_ur = x_ll + x_size * x_res\n+ area_extent = [x_ll, y_ll, x_ur, y_ur]\n+ print(area_extent, x_size, y_size, x_res, y_res)\n+\n+ area_def = pyresample.geometry.AreaDefinition(\n+ area_id, name, proj_id, proj_dict_with_string_values, x_size, y_size,\n+ area_extent)\n+ return area_def\n+ \"\"\"Short summary.\n+\n+ Parameters\n+ ----------\n+ meta : type\n+ Description of parameter `meta`.\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\ndiff --git a/monet/util/mystats.py b/monet/util/mystats.py\n--- a/monet/util/mystats.py\n+++ b/monet/util/mystats.py\n@@ -18,27 +18,33 @@ def STDP(obs, mod, axis=None):\n \n def MNB(obs, mod, axis=None):\n \"\"\" Mean Normalized Bias (%)\"\"\"\n- return np.ma.masked_invalid(old_div((mod - obs), obs)).mean(axis=axis) * 100.\n+ return np.ma.masked_invalid(old_div(\n+ (mod - obs), obs)).mean(axis=axis) * 100.\n+\n \n \n def MNE(obs, mod, axis=None):\n \"\"\" Mean Normalized Gross Error (%)\"\"\"\n- return np.ma.masked_invalid(old_div(np.ma.abs(mod - obs), obs)).mean(axis=axis) * 100.\n-\n+ return np.ma.masked_invalid(old_div(np.ma.abs(mod - obs),\n+ obs)).mean(axis=axis) * 100.\n \n def MdnNB(obs, mod, axis=None):\n \"\"\" Median Normalized Bias (%)\"\"\"\n- return np.ma.median(np.ma.masked_invalid(old_div((mod - obs), obs)), axis=axis) * 100.\n-\n+ return np.ma.median(\n+ np.ma.masked_invalid(old_div((mod - obs), obs)), axis=axis) * 100.\n \n def MdnNE(obs, mod, axis=None):\n \"\"\" Median Normalized Gross Error (%)\"\"\"\n- return np.ma.median(np.ma.masked_invalid(old_div(np.ma.abs(mod - obs), obs)), axis=axis) * 100.\n+ return np.ma.median(\n+ np.ma.masked_invalid(old_div(np.ma.abs(mod - obs), obs)),\n+ axis=axis) * 100.\n \n \n-def NMdnE(obs, mod, axis=None):\n+def NMdnGE(obs, mod, axis=None):\n \"\"\" Normalized Median Gross Error (%)\"\"\"\n- return np.ma.masked_invalid(old_div(np.ma.abs(mod - obs).mean(axis=axis), obs.mean(axis=axis))) * 100.\n+ return np.ma.masked_invalid(\n+ old_div(np.ma.abs(mod - obs).mean(axis=axis),\n+ obs.mean(axis=axis))) * 100.\n \n \n def NO(obs, mod, axis=None):\n@@ -114,12 +120,15 @@ def NMB(obs, mod, axis=None):\n \n def NMdnB(obs, mod, axis=None):\n \"\"\" Normalized Median Bias (%)\"\"\"\n- return np.ma.median(mod - obs, axis=axis) / np.ma.median(obs, axis=axis) * 100.\n+ return np.ma.median(\n+ mod - obs, axis=axis) / np.ma.median(\n+ obs, axis=axis) * 100.\n \n \n def FB(obs, mod, axis=None):\n \"\"\" Fractional Bias (%)\"\"\"\n- return ((np.ma.masked_invalid(old_div((mod - obs), (mod + obs)))).mean(axis=axis) * 2.) * 100.\n+ return ((np.ma.masked_invalid(old_div(\n+ (mod - obs), (mod + obs)))).mean(axis=axis) * 2.) * 100.\n \n \n def ME(obs, mod, axis=None):\n@@ -145,73 +154,93 @@ def WDMdnE(obs, mod, axis=None):\n \n def NME(obs, mod, axis=None):\n \"\"\" Normalized Mean Error (%)\"\"\"\n- out = (old_div(np.ma.abs(mod - obs).sum(axis=axis), obs.sum(axis=axis))) * 100\n+ out = (old_div(np.ma.abs(mod - obs).sum(axis=axis),\n+ obs.sum(axis=axis))) * 100\n return out\n \n \n def NMdnE(obs, mod, axis=None):\n \"\"\" Normalized Median Error (%)\"\"\"\n- out = np.ma.median(np.ma.abs(mod - obs), axis=axis) / np.ma.median(obs, axis=axis) * 100\n+ out = np.ma.median(\n+ np.ma.abs(mod - obs), axis=axis) / np.ma.median(\n+ obs, axis=axis) * 100\n return out\n \n \n def FE(obs, mod, axis=None):\n \"\"\" Fractional Error (%)\"\"\"\n- return (old_div(np.ma.abs(mod - obs), (mod + obs))).mean(axis=axis) * 2. * 100.\n+ return (old_div(np.ma.abs(mod - obs),\n+ (mod + obs))).mean(axis=axis) * 2. * 100.\n \n \n def USUTPB(obs, mod, axis=None):\n \"\"\" Unpaired Space/Unpaired Time Peak Bias (%)\"\"\"\n- return (old_div((mod.max(axis=axis) - obs.max(axis=axis)), obs.max(axis=axis))) * 100.\n+ return (old_div(\n+ (mod.max(axis=axis) - obs.max(axis=axis)), obs.max(axis=axis))) * 100.\n \n \n def USUTPE(obs, mod, axis=None):\n \"\"\" Unpaired Space/Unpaired Time Peak Error (%)\"\"\"\n- return (old_div(np.ma.abs(mod.max(axis=axis) - obs.max(axis=axis)), obs.max(axis=axis))) * 100.\n+ return (old_div(\n+ np.ma.abs(mod.max(axis=axis) - obs.max(axis=axis)),\n+ obs.max(axis=axis))) * 100.\n \n \n def MNPB(obs, mod, paxis, axis=None):\n \"\"\" Mean Normalized Peak Bias (%)\"\"\"\n- return (old_div((mod.max(axis=paxis) - obs.max(axis=paxis)), obs.max(axis=paxis))).mean(axis=axis) * 100.\n+ return (old_div(\n+ (mod.max(axis=paxis) - obs.max(axis=paxis)),\n+ obs.max(axis=paxis))).mean(axis=axis) * 100.\n \n \n def MdnNPB(obs, mod, paxis, axis=None):\n \"\"\" Median Normalized Peak Bias (%)\"\"\"\n- return np.ma.median(old_div((mod.max(axis=paxis) - obs.max(axis=paxis)), obs.max(axis=paxis)), axis=axis) * 100.\n+ return np.ma.median(\n+ old_div(\n+ (mod.max(axis=paxis) - obs.max(axis=paxis)), obs.max(axis=paxis)),\n+ axis=axis) * 100.\n \n \n def MNPE(obs, mod, paxis, axis=None):\n \"\"\" Mean Normalized Peak Error (%)\"\"\"\n- return (old_div((np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis))), obs.max(axis=paxis))).mean(axis=axis) * 100.\n-\n+ return (old_div(\n+ (np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis))),\n+ obs.max(axis=paxis))).mean(axis=axis) * 100.\n \n def MdnNPE(obs, mod, paxis, axis=None):\n \"\"\" Median Normalized Peak Bias (%)\"\"\"\n- return np.ma.median(old_div((np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis))), obs.max(axis=paxis)),\n- axis=axis) * 100.\n+ return np.ma.median(\n+ old_div(\n+ (np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis))),\n+ obs.max(axis=paxis)),\n+ axis=axis) * 100.\n \n \n def NMPB(obs, mod, paxis, axis=None):\n \"\"\" Normalized Mean Peak Bias (%)\"\"\"\n- return (mod.max(axis=paxis) - obs.max(axis=paxis)).mean(axis=axis) / obs.max(axis=paxis).mean(axis=axis) * 100.\n+ return (mod.max(axis=paxis) - obs.max(axis=paxis)\n+ ).mean(axis=axis) / obs.max(axis=paxis).mean(axis=axis) * 100.\n \n \n def NMdnPB(obs, mod, paxis, axis=None):\n \"\"\" Normalized Median Peak Bias (%)\"\"\"\n- return np.ma.median((mod.max(axis=paxis) - obs.max(axis=paxis)), axis=axis) / np.ma.median(obs.max(axis=paxis),\n- axis=axis) * 100.\n+ return np.ma.median(\n+ (mod.max(axis=paxis) - obs.max(axis=paxis)), axis=axis) / np.ma.median(\n+ obs.max(axis=paxis), axis=axis) * 100.\n \n \n def NMPE(obs, mod, paxis, axis=None):\n \"\"\" Normalized Mean Peak Error (%)\"\"\"\n- return (np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis))).mean(axis=axis) / obs.max(axis=paxis).mean(\n- axis=axis) * 100.\n+ return (np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis))\n+ ).mean(axis=axis) / obs.max(axis=paxis).mean(axis=axis) * 100.\n \n \n def NMdnPE(obs, mod, paxis, axis=None):\n \"\"\" Normalized Median Peak Bias (%)\"\"\"\n- return np.ma.median(np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis)), axis=axis) / np.ma.median(\n- obs.max(axis=paxis), axis=axis) * 100.\n+ return np.ma.median(\n+ np.ma.abs(mod.max(axis=paxis) - obs.max(axis=paxis)),\n+ axis=axis) / np.ma.median(\n+ obs.max(axis=paxis), axis=axis) * 100.\n \n \n def PSUTMNPB(obs, mod, axis=None):\n@@ -259,19 +288,19 @@ def R2(obs, mod, axis=None):\n from scipy.stats import pearsonr\n if axis is None:\n obsc, modc = matchedcompressed(obs, mod)\n- return pearsonr(obsc, modc)[0] ** 2\n+ return pearsonr(obsc, modc)[0]**2\n else:\n raise ValueError('Not ready yet')\n \n \n def RMSE(obs, mod, axis=None):\n \"\"\" Root Mean Square Error (model unit)\"\"\"\n- return np.ma.sqrt(((mod - obs) ** 2).mean(axis=axis))\n+ return np.ma.sqrt(((mod - obs)**2).mean(axis=axis))\n \n \n def WDRMSE(obs, mod, axis=None):\n \"\"\" Wind Direction Root Mean Square Error (model unit)\"\"\"\n- return np.ma.sqrt(((circlebias(mod - obs)) ** 2).mean(axis=axis))\n+ return np.ma.sqrt(((circlebias(mod - obs))**2).mean(axis=axis))\n \n \n def RMSEs(obs, mod, axis=None):\n@@ -316,13 +345,17 @@ def RMSEu(obs, mod, axis=None):\n \n def d1(obs, mod, axis=None):\n \"\"\" Modified Index of Agreement, d1\"\"\"\n- return 1.0 - old_div((np.ma.abs(obs - mod)).sum(axis=axis), (\n- np.ma.abs(mod - obs.mean(axis=axis)) + np.ma.abs(obs - obs.mean(axis=axis))).sum(axis=axis))\n+ return 1.0 - old_div(\n+ (np.ma.abs(obs - mod)).sum(axis=axis),\n+ (np.ma.abs(mod - obs.mean(axis=axis)) +\n+ np.ma.abs(obs - obs.mean(axis=axis))).sum(axis=axis))\n \n \n def E1(obs, mod, axis=None):\n \"\"\" Modified Coefficient of Efficiency, E1\"\"\"\n- return 1.0 - old_div((np.ma.abs(obs - mod)).sum(axis=axis), (np.ma.abs(obs - obs.mean(axis=axis))).sum(axis=axis))\n+ return 1.0 - old_div(\n+ (np.ma.abs(obs - mod)).sum(axis=axis),\n+ (np.ma.abs(obs - obs.mean(axis=axis))).sum(axis=axis))\n \n \n def IOA(obs, mod, axis=None):\n@@ -330,8 +363,10 @@ def IOA(obs, mod, axis=None):\n obsmean = obs.mean(axis=axis)\n if not axis is None:\n obsmean = np.expand_dims(obsmean, axis=axis)\n- return 1.0 - old_div((np.ma.abs(obs - mod) ** 2).sum(axis=axis), (\n- (np.ma.abs(mod - obsmean) + np.ma.abs(obs - obsmean)) ** 2).sum(axis=axis))\n+ return 1.0 - old_div(\n+ (np.ma.abs(obs - mod)**2).sum(axis=axis),\n+ ((np.ma.abs(mod - obsmean) + np.ma.abs(obs - obsmean)) **\n+ 2).sum(axis=axis))\n \n \n def circlebias(b):\n@@ -351,7 +386,9 @@ def WDIOA(obs, mod, axis=None):\n \n ohat = circlebias(obs - obsmean)\n \n- return 1.0 - old_div((np.ma.abs(b) ** 2).sum(axis=axis), ((np.ma.abs(bhat) + np.ma.abs(ohat)) ** 2).sum(axis=axis))\n+ return 1.0 - old_div(\n+ (np.ma.abs(b)**2).sum(axis=axis),\n+ ((np.ma.abs(bhat) + np.ma.abs(ohat))**2).sum(axis=axis))\n \n \n def AC(obs, mod, axis=None):\n@@ -360,7 +397,8 @@ def AC(obs, mod, axis=None):\n if not axis is None:\n obs_bar = np.expand_dims(obs_bar, axis=axis)\n p1 = ((mod - obs_bar) * (obs - obs_bar)).sum(axis=axis)\n- p2 = (((mod - obs_bar) ** 2).sum(axis=axis) * ((obs - obs_bar) ** 2).sum(axis=axis)) ** 0.5\n+ p2 = (((mod - obs_bar)**2).sum(axis=axis) * (\n+ (obs - obs_bar)**2).sum(axis=axis))**0.5\n return old_div(p1, p2)\n \n \n@@ -370,7 +408,8 @@ def WDAC(obs, mod, axis=None):\n if not axis is None:\n obs_bar = np.expand_dims(obs_bar, axis=axis)\n p1 = (circlebias(mod - obs_bar) * circlebias(obs - obs_bar)).sum(axis=axis)\n- p2 = ((circlebias(mod - obs_bar) ** 2).sum(axis=axis) * (circlebias(obs - obs_bar) ** 2).sum(axis=axis)) ** 0.5\n+ p2 = ((circlebias(mod - obs_bar)**2).sum(axis=axis) *\n+ (circlebias(obs - obs_bar)**2).sum(axis=axis))**0.5\n return old_div(p1, p2)\n \n \n@@ -410,8 +449,10 @@ def scores(obs, mod, minval, maxval=1.0e5):\n d['obs'] = obs\n d['mod'] = mod\n df = DataFrame(d)\n- ct = crosstab((df['mod'] > minval) & (df['mod'] < maxval), (df['obs'] > minval) & (df['obs'] < maxval),\n- margins=True)\n+ ct = crosstab(\n+ (df['mod'] > minval) & (df['mod'] < maxval),\n+ (df['obs'] > minval) & (df['obs'] < maxval),\n+ margins=True)\n # print ct\n a = ct[1][1].astype('float')\n b = ct[1][0].astype('float')\ndiff --git a/monet/util/resample.py b/monet/util/resample.py\nnew file mode 100644\n--- /dev/null\n+++ b/monet/util/resample.py\n@@ -0,0 +1,144 @@\n+from pyresample.kd_tree import XArrayResamplerNN\n+from pyresample.bilinear.xarr import XArrayResamplerBilinear\n+import xarray as xr\n+from pyresample.geometry import SwathDefinition, AreaDefinition\n+\n+\n+def _ensure_swathdef_compatability(defin):\n+ \"\"\"ensures the SwathDefinition is compatible with XArrayResamplerNN.\n+\n+ Parameters\n+ ----------\n+ defin : pyresample SwathDefinition\n+ a pyresample.geometry.SwathDefinition instance\n+\n+ Returns\n+ -------\n+ type\n+ Description of returned object.\n+\n+ \"\"\"\n+ if isinstance(defin.lons, xr.DataArray):\n+ return defin # do nothing\n+ else:\n+ defin.lons = xr.DataArray(defin.lons, dims=['y', 'x']).chunk()\n+ defin.lats = xr.DataArray(defin.lons, dims=['y', 'x']).chunk()\n+ return defin\n+\n+\n+def _check_swath_or_area(defin):\n+ \"\"\"Checks for a SwathDefinition or AreaDefinition. If AreaDefinition do\n+ nothing else ensure compatability with XArrayResamplerNN\n+\n+ Parameters\n+ ----------\n+ defin : pyresample SwathDefinition or AreaDefinition\n+ Description of parameter `defin`.\n+\n+ Returns\n+ -------\n+ pyresample.geometry\n+ SwathDefinition or AreaDefinition\n+\n+ \"\"\"\n+ try:\n+ if isinstance(defin, SwathDefinition):\n+ newswath = _ensure_swathdef_compatability(defin)\n+ elif isinstance(defin, AreaDefinition):\n+ newswath = defin\n+ else:\n+ raise RuntimeError\n+ except RuntimeError:\n+ print('grid definition must be a pyresample SwathDefinition or '\n+ 'AreaDefinition')\n+ return\n+ return newswath\n+\n+\n+def _reformat_resampled_data(orig, new, target_grid):\n+ \"\"\"reformats the resampled data array filling in coords, name and attrs .\n+\n+ Parameters\n+ ----------\n+ orig : xarray.DataArray\n+ original input DataArray.\n+ new : xarray.DataArray\n+ resampled xarray.DataArray\n+ target_grid : pyresample.geometry\n+ target grid is the target SwathDefinition or AreaDefinition\n+\n+ Returns\n+ -------\n+ xarray.DataArray\n+ reformated xarray.DataArray\n+\n+ \"\"\"\n+ target_lon, target_lat = target_grid.get_lonlats_dask()\n+ new.name = orig.name\n+ new['latitude'] = (('y', 'x'), target_lat)\n+ new['longitude'] = (('y', 'x'), target_lon)\n+ new.attrs['area'] = target_grid\n+ return new\n+\n+\n+def resample_dataset(data,\n+ target_grid,\n+ radius_of_influence=100e3,\n+ resample_cache=None,\n+ return_neighbor_info=False,\n+ neighbours=1,\n+ epsilon=0,\n+ interp='nearest'):\n+ # first get the source grid definition\n+ try:\n+ if 'area' in data.attrs:\n+ source_grid = data.attrs['area']\n+ else:\n+ raise RuntimeError\n+ except RuntimeError:\n+ print('Must include pyresample.gemoetry in the data.attrs area_def or '\n+ 'area')\n+ return\n+\n+ # check for SwathDefinition or AreaDefinition\n+ # if swath ensure it is xarray.DataArray and not numpy for chunking\n+ source_grid = _check_swath_or_area(source_grid)\n+\n+ # set kwargs for XArrayResamplerNN\n+ kwargs = dict(\n+ source_geo_def=source_grid,\n+ target_geo_def=target_grid,\n+ radius_of_influence=radius_of_influence,\n+ neighbours=neighbours,\n+ epsilon=epsilon)\n+ if interp is 'nearest':\n+ resampler = XArrayResamplerNN(**kwargs)\n+ else:\n+ resampler = XArrayResamplerBilinear(**kwargs)\n+\n+ # check if resample cash is none else assume it is a dict with keys\n+ #[valid_input_index, valid_output_index, index_array, distance_array]\n+ # else generate the data\n+ if resample_cache is None:\n+ valid_input_index, valid_output_index, index_array, distance_array = resampler.get_neighbour_info(\n+ )\n+ else:\n+ resampler.valid_input_index = resample_cache['valid_input_index']\n+ resampler.valid_output_index = resample_cache['valid_output_index']\n+ resampler.index_array = resample_cache['index_array']\n+ resampler.distance_array = resample_cache['distance_array']\n+\n+ # now store the resampled data temporarily in temp\n+ temp = resampler.get_sample_from_neighbour_info(data)\n+\n+ # reformat data from temp\n+ out = _reformat_resampled_data(data, temp, target_grid)\n+ if return_neighbor_info:\n+ resample_cache = dict(\n+ valid_input_index=valid_input_index,\n+ valid_output_index=valid_output_index,\n+ index_array=index_array,\n+ distance_array=distance_array)\n+ return out, resample_cache\n+ else:\n+ return out\ndiff --git a/monet/util/tools.py b/monet/util/tools.py\n--- a/monet/util/tools.py\n+++ b/monet/util/tools.py\n@@ -3,7 +3,6 @@\n from builtins import range\n \n import numpy as np\n-from past.utils import old_div\n \n __author__ = 'barry'\n \n@@ -74,3 +73,16 @@ def wsdir2uv(ws, wdir):\n u = -ws * sin(wdir * pi / 180.)\n v = -ws * cos(wdir * pi / 180.)\n return u, v\n+\n+def long_to_wide(df):\n+ from pandas import Series, merge\n+ w = df.pivot_table(\n+ values='obs', index=['time', 'siteid'],\n+ columns='variable').reset_index()\n+ cols = Series(df.columns)\n+ g = df.groupby('variable')\n+ for name, group in g:\n+ w[name + '_unit'] = group.units.unique()[0]\n+ #mergeon = hstack((index.values, df.variable.unique()))\n+ return merge(w, df, on=['siteid', 'time'])\n+\ndiff --git a/monet/verification/__init__.py b/monet/verification/__init__.py\n--- a/monet/verification/__init__.py\n+++ b/monet/verification/__init__.py\n@@ -1,7 +1,7 @@\n from __future__ import absolute_import, print_function\n \n-from . import combine, interpolation, verify\n+from . import verify\n \n-__all__ = ['combine', 'interpolation', 'verify']\n+__all__ = ['verify']\n \n __name__ = 'verification'\ndiff --git a/monet/verification/verify.py b/monet/verification/verify.py\n--- a/monet/verification/verify.py\n+++ b/monet/verification/verify.py\n@@ -1,204 +1,436 @@\n-from __future__ import absolute_import, print_function\n+\"\"\"This needs to be modularized for plotting\"\"\"\n+# from __future__ import absolute_import, print_function\n+#\n+# from builtins import object\n+#\n+# import pandas as pd\n+#\n+# from ..plots import plots\n+#\n+#\n+# class VERIFY(object):\n+# def __init__(self, input, obs=None, model=None):\n+# self.dset = input\n+# self.obs = obs\n+# self.model = model\n+# self.default_scatter_args = {'s': 20, 'edgecolors': 'w', 'lw': .25}\n+#\n+# def point(self,\n+# param,\n+# plot_type=None,\n+# label=None,\n+# title=None,\n+# ax=None,\n+# plotargs={},\n+# fillargs={'alpha': .2},\n+# marker='o',\n+# **kwargs):\n+# if isinstance(self.dset, pd.DataFrame):\n+# if self.obs.objtype is 'AQS' or self.obs.objtype is 'AirNow':\n+# df, title = self.subset_epa(self.dset, param, **kwargs)\n+# elif self.obs.objtype is 'CRN' or self.obs.objtype is 'ISH':\n+# df, title = self.subset_crn(self.dset, **kwargs)\n+# else:\n+# df = self.pair\n+# if title is not None:\n+# title = ''\n+# df.index = df.time\n+# print(plot_type)\n+# if plot_type.lower() == 'timeseries':\n+# ax = self._point_plot(\n+# df,\n+# col1='model',\n+# label=label,\n+# title=title,\n+# timeseries=True,\n+# plotargs=plotargs,\n+# fillargs=fillargs,\n+# ax=ax)\n+# plotargs['color'] = 'darkslategrey'\n+# fillargs['color'] = 'darkslategrey'\n+# ax = self._point_plot(\n+# df,\n+# col1='obs',\n+# ax=ax,\n+# title=title,\n+# timeseries=True,\n+# plotargs=plotargs,\n+# fillargs=fillargs)\n+# elif plot_type.lower() == 'scatter':\n+# kwargs['x'] = 'obs'\n+# kwargs['y'] = 'model'\n+# ax = self._point_plot(\n+# df,\n+# col1='obs',\n+# col2='model',\n+# label=label,\n+# title=title,\n+# scatter=True)\n+# elif plot_type.lower == 'box':\n+# ax = self._point_plot(\n+# df,\n+# col1='obs',\n+# col2='model',\n+# label=label,\n+# title=title,\n+# box=True,\n+# plotargs=plotargs)\n+# elif plot_type.lower() == 'pdf':\n+# ax = self._point_plot(\n+# df,\n+# col1='model',\n+# label=label,\n+# title=title,\n+# pdf=True,\n+# plotargs=plotargs,\n+# ax=ax)\n+# plotargs['color'] = 'darkslategrey'\n+# ax = self._point_plot(\n+# df,\n+# col1='obs',\n+# label=self.obs.objtype,\n+# title=title,\n+# pdf=True,\n+# plotargs=plotargs,\n+# ax=ax)\n+# elif plot_type.lower() == 'taylor':\n+# ax = self._point_plot(\n+# df,\n+# col1='model',\n+# label=label,\n+# title=title,\n+# taylor=True,\n+# plotargs=plotargs,\n+# fillargs=fillargs,\n+# marker=marker)\n+# # elif\n+# return ax\n+#\n+# def _point_plot(self,\n+# df,\n+# label=None,\n+# title=None,\n+# ax=None,\n+# plotargs={},\n+# fillargs={},\n+# timeseries=False,\n+# scatter=False,\n+# pdf=False,\n+# taylor=False,\n+# box=False,\n+# col1=None,\n+# col2=None,\n+# marker='o',\n+# **kwargs):\n+# import matplotlib.pyplot as plt\n+# if timeseries:\n+# ax = plots.timeseries(\n+# df,\n+# y=col1,\n+# title=title,\n+# label=label,\n+# ax=ax,\n+# plotargs=plotargs,\n+# fillargs=fillargs)\n+# return ax\n+# if scatter:\n+# ax = plots.scatter(\n+# df, x=col1, y=col2, title=title, label=label, ax=ax, **kwargs)\n+# return ax\n+# if pdf:\n+# ax = plots.kdeplot(\n+# df[col1], title=title, label=label, ax=ax, **plotargs)\n+# ax.set_xlabel(df.variable.unique()[0] + ' (' +\n+# df.units.unique()[0] + ')')\n+# return ax\n+# if taylor:\n+# if marker is None:\n+# marker = 'o'\n+# if ax is None:\n+# dia = plots.taylordiagram(\n+# df, label=label, dia=ax, addon=False, marker=marker)\n+# return dia\n+# else:\n+# dia = plots.taylordiagram(\n+# df, label=label, dia=ax, addon=True, marker=marker)\n+# plt.legend()\n+# return dia\n+#\n+# def compare_surface(self, **kwargs):\n+# \"\"\"Short summary.\n+#\n+# Parameters\n+# ----------\n+# **kwargs : type\n+# Description of parameter `**kwargs`.\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# \"\"\"\n+# if (self.obs.objtype is 'AQS' or self.obs.objtype is 'AirNow') and (\n+# self.model.objtype is 'CMAQ' or self.model.objtype is 'CAMX'):\n+# self.compare_epa(**kwargs)\n+#\n+# def compare_spatial(self, **kwargs):\n+# \"\"\"Short summary.\n+#\n+# Parameters\n+# ----------\n+# **kwargs : type\n+# Description of parameter `**kwargs`.\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# \"\"\"\n+# if (self.obs.objtype is 'AQS' or self.obs.objtype is 'AIRNOW') and (\n+# self.model.objtype is 'CMAQ' or self.model.objtype is 'CAMX'):\n+# self.compare_epa_spatial(**kwargs)\n+#\n+# def compare_epa_spatial(self,\n+# model_param='O3',\n+# param='OZONE',\n+# date=None,\n+# imshow_args={},\n+# scatter_args={\n+# 's': 20,\n+# 'edgecolors': 'w',\n+# 'lw': .25\n+# },\n+# barbs_args={},\n+# barbs=False,\n+# Obs=True,\n+# ncolors=None,\n+# discrete=False,\n+# lay=None):\n+# \"\"\"Short summary.\n+#\n+# Parameters\n+# ----------\n+# model_param : type\n+# Description of parameter `model_param` (the default is 'O3').\n+# param : type\n+# Description of parameter `param` (the default is 'OZONE').\n+# date : type\n+# Description of parameter `date` (the default is None).\n+# imshow_args : type\n+# Description of parameter `imshow_args` (the default is {}).\n+# scatter_args : type\n+# Description of parameter `scatter_args` (the default is {'s': 20).\n+# 'edgecolors': 'w' : type\n+# Description of parameter `'edgecolors': 'w'`.\n+# 'lw': .25} : type\n+# Description of parameter `'lw': .25}`.\n+# barbs_args : type\n+# Description of parameter `barbs_args` (the default is {}).\n+# barbs : type\n+# Description of parameter `barbs` (the default is False).\n+# Obs : type\n+# Description of parameter `Obs` (the default is True).\n+# ncolors : type\n+# Description of parameter `ncolors` (the default is None).\n+# discrete : type\n+# Description of parameter `discrete` (the default is False).\n+# lay : type\n+# Description of parameter `lay` (the default is None).\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# :param param: variable Parameter: Acceptable variable: 'OZONE' 'PM2.5' 'CO' 'NOY' 'SO2' 'SO2' 'NOX'\n+# :param region: EPA Region: 'Northeast', 'Southeast', 'North_Central', 'South_Central', 'Rockies', 'Pacific'\n+# :param date: If not supplied will plot all time. Put in 'YYYY-MM-DD HH:MM' for single time\n+# :return:\n+# \"\"\"\n+# from numpy import where\n+# if Obs:\n+# try:\n+# g = self.obs.df.groupby('variable')\n+# df2 = g.get_group(param)\n+# except KeyError:\n+# print(param + ' variable not available!!!!')\n+# exit\n+# param = param.upper()\n+# v = self.model.get_var(param=model_param, lay=lay)\n+# m = self.cmaq.map\n+# dts = v.time.to_index()\n+# if isinstance(date, type(None)):\n+# index = where(dts == dts[0])[0][0]\n+# else:\n+# index = where(dts.isin([date]))[0][0]\n+# f, ax, c, cmap, vmin, vmax = plots.make_spatial_plot2(\n+# self.model.dset[index, :, :].squeeze(),\n+# m,\n+# plotargs=imshow_args,\n+# ncolors=ncolors,\n+# discrete=discrete)\n+# plt.tight_layout()\n+# if Obs:\n+# scatter_args['vmin'] = vmin\n+# scatter_args['vmax'] = vmax\n+# scatter_args['cmap'] = cmap\n+# df2 = df2.loc[df2.datetime == dts[index]]\n+# plots.spatial_scatter(df2, m, plotargs=scatter_args)\n+# c.set_label(param + ' (' + g.get_group(param).Units.unique()[0] +\n+# ')')\n+#\n+# @staticmethod\n+# def subset_epa(df,\n+# param,\n+# site=None,\n+# city=None,\n+# state=None,\n+# region=None,\n+# epa_region=None):\n+# from ..obs.epa_util import get_epa_location_df\n+# if site is None and city is None and state is None and region is None and epa_region is None:\n+# df2 = df.copy()\n+# title = ' '\n+# else:\n+# df2, title = get_epa_location_df(\n+# df.copy(),\n+# param,\n+# site=site,\n+# city=city,\n+# state=state,\n+# region=region,\n+# epa_region=epa_region)\n+# return df2, title\n+#\n+# def compare_epa(self,\n+# param='OZONE',\n+# site='',\n+# city='',\n+# state='',\n+# epa_region='',\n+# region='',\n+# timeseries=False,\n+# scatter=False,\n+# pdfs=False,\n+# diffscatter=False,\n+# diffpdfs=False,\n+# timeseries_rmse=False,\n+# timeseries_mb=False,\n+# taylordiagram=False,\n+# ax=None,\n+# label=None,\n+# footer=False,\n+# dia=None,\n+# marker=None):\n+# \"\"\"Short summary.\n+#\n+# Parameters\n+# ----------\n+# param : type\n+# Description of parameter `param` (the default is 'OZONE').\n+# site : type\n+# Description of parameter `site` (the default is '').\n+# city : type\n+# Description of parameter `city` (the default is '').\n+# state : type\n+# Description of parameter `state` (the default is '').\n+# epa_region : type\n+# Description of parameter `epa_region` (the default is '').\n+# region : type\n+# Description of parameter `region` (the default is '').\n+# timeseries : type\n+# Description of parameter `timeseries` (the default is False).\n+# scatter : type\n+# Description of parameter `scatter` (the default is False).\n+# pdfs : type\n+# Description of parameter `pdfs` (the default is False).\n+# diffscatter : type\n+# Description of parameter `diffscatter` (the default is False).\n+# diffpdfs : type\n+# Description of parameter `diffpdfs` (the default is False).\n+# timeseries_rmse : type\n+# Description of parameter `timeseries_rmse` (the default is False).\n+# timeseries_mb : type\n+# Description of parameter `timeseries_mb` (the default is False).\n+# taylordiagram : type\n+# Description of parameter `taylordiagram` (the default is False).\n+# ax : type\n+# Description of parameter `ax` (the default is None).\n+# label : type\n+# Description of parameter `label` (the default is None).\n+# footer : type\n+# Description of parameter `footer` (the default is False).\n+# dia : type\n+# Description of parameter `dia` (the default is None).\n+# marker : type\n+# Description of parameter `marker` (the default is None).\n+#\n+# Returns\n+# -------\n+# type\n+# Description of returned object.\n+#\n+# \"\"\"\n+# from numpy import NaN\n+# from ..obs.epa_util import get_epa_location_df\n+# df2, title = get_epa_location_df(\n+# self.dset.copy(),\n+# param,\n+# site=site,\n+# city=city,\n+# state=state,\n+# region=region,\n+# epa_region=epa_region)\n+# df2 = df2.groupby('variable').get_group(param)\n+# if timeseries:\n+# if ax is None:\n+# ax = plots.timeseries_param(\n+# df2,\n+# col='Obs',\n+# title=title,\n+# label=label,\n+# ax=ax,\n+# plotargs={'color': 'darkslategrey'},\n+# fillargs={\n+# 'color': 'darkslategrey',\n+# 'alpha': .2\n+# })\n+# ax = plots.timeseries_param(\n+# df2,\n+# col='model',\n+# title=title,\n+# label=label,\n+# ax=ax,\n+# fillargs={'alpha': .2})\n+# if scatter:\n+# plots.scatter_param(\n+# df2, title=title, label=label, fig=fig, footer=footer)\n+# if pdfs:\n+# plots.kdeplots_param(\n+# df2, title=title, label=label, fig=fig, footer=footer)\n+# if diffscatter:\n+# plots.diffscatter_param(df2, title=title)\n+# if diffpdfs:\n+# plots.diffpdfs_param(\n+# df2, title=title, label=label, fig=fig, footer=footer)\n+# if timeseries_rmse:\n+# plots.timeseries_rmse_param(\n+# df2, title=title, label=label, fig=fig, footer=footer)\n+# if timeseries_mb:\n+# plots.timeseries_mb_param(\n+# df2, title=title, label=label, fig=fig, footer=footer)\n+# if taylordiagram:\n+# if marker is None:\n+# marker = 'o'\n+# if fig is None:\n+# dia = plots.taylordiagram(\n+# df2, label=label, dia=dia, addon=False, marker=marker)\n+# return dia\n+# else:\n+# dia = plots.taylordiagram(\n+# df2, label=label, dia=dia, addon=True, marker=marker)\n+# plt.legend()\n+# return dia\n \n-from builtins import object\n-\n-from ..plots import plots\n-\n-\n-class VERIFY(object):\n- def __init__(self, model=None, obs=None, dset=None):\n- self.model = model\n- self.obs = obs\n- self.dset = dset\n-\n- def compare_surface(self, **kwargs):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- **kwargs : type\n- Description of parameter `**kwargs`.\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- \"\"\"\n- if (self.obs.objtype is 'AQS' or self.obs.objtype is 'AirNow') and (\n- self.model.objtype is 'CMAQ' or self.model.objtype is 'CAMX'):\n- self.compare_epa(**kwargs)\n-\n- def compare_spatial(self, **kwargs):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- **kwargs : type\n- Description of parameter `**kwargs`.\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- \"\"\"\n- if (obs.objtype is 'AQS' or obs.objtype is 'AIRNOW') and (model.objtype is 'CMAQ' or model.objtype is 'CAMX'):\n- self.compare_epa_spatial(**kwargs)\n-\n- def compare_epa_spatial(self, model_param='O3', param='OZONE', date=None, imshow_args={},\n- scatter_args={'s': 20, 'edgecolors': 'w', 'lw': .25}, barbs_args={}, barbs=False, Obs=True,\n- ncolors=None, discrete=False, lay=None):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- model_param : type\n- Description of parameter `model_param` (the default is 'O3').\n- param : type\n- Description of parameter `param` (the default is 'OZONE').\n- date : type\n- Description of parameter `date` (the default is None).\n- imshow_args : type\n- Description of parameter `imshow_args` (the default is {}).\n- scatter_args : type\n- Description of parameter `scatter_args` (the default is {'s': 20).\n- 'edgecolors': 'w' : type\n- Description of parameter `'edgecolors': 'w'`.\n- 'lw': .25} : type\n- Description of parameter `'lw': .25}`.\n- barbs_args : type\n- Description of parameter `barbs_args` (the default is {}).\n- barbs : type\n- Description of parameter `barbs` (the default is False).\n- Obs : type\n- Description of parameter `Obs` (the default is True).\n- ncolors : type\n- Description of parameter `ncolors` (the default is None).\n- discrete : type\n- Description of parameter `discrete` (the default is False).\n- lay : type\n- Description of parameter `lay` (the default is None).\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- :param param: Species Parameter: Acceptable Species: 'OZONE' 'PM2.5' 'CO' 'NOY' 'SO2' 'SO2' 'NOX'\n- :param region: EPA Region: 'Northeast', 'Southeast', 'North_Central', 'South_Central', 'Rockies', 'Pacific'\n- :param date: If not supplied will plot all time. Put in 'YYYY-MM-DD HH:MM' for single time\n- :return:\n- \"\"\"\n- if Obs:\n- try:\n- g = df.groupby('Species')\n- df2 = g.get_group(param)\n- except KeyError:\n- print(param + ' Species not available!!!!')\n- exit\n- param = param.upper()\n- v = self.model.get_var(param=model_param, lay=lay)\n- m = self.cmaq.map\n- dts = v.time.to_index()\n- if isinstance(date, type(None)):\n- index = where(dts == dts[0])[0][0]\n- else:\n- index = where(dts.isin([date]))[0][0]\n- f, ax, c, cmap, vmin, vmax = plots.make_spatial_plot2(cmaq[index, :, :].squeeze(), m, plotargs=imshow_args,\n- ncolors=ncolors, discrete=discrete)\n- plt.tight_layout()\n- if Obs:\n- scatter_args['vmin'] = vmin\n- scatter_args['vmax'] = vmax\n- scatter_args['cmap'] = cmap\n- df2 = df2.loc[df2.datetime == dts[index]]\n- plots.spatial_scatter(df2, m, plotargs=scatter_args)\n- c.set_label(param + ' (' + g.get_group(param).Units.unique()[0] + ')')\n-\n- def compare_epa(self, param='OZONE', site='', city='', state='', epa_region='', region='', timeseries=False,\n- scatter=False, pdfs=False, diffscatter=False, diffpdfs=False, timeseries_rmse=False,\n- timeseries_mb=False,\n- taylordiagram=False, ax=None, label=None, footer=False, dia=None, marker=None):\n- \"\"\"Short summary.\n-\n- Parameters\n- ----------\n- param : type\n- Description of parameter `param` (the default is 'OZONE').\n- site : type\n- Description of parameter `site` (the default is '').\n- city : type\n- Description of parameter `city` (the default is '').\n- state : type\n- Description of parameter `state` (the default is '').\n- epa_region : type\n- Description of parameter `epa_region` (the default is '').\n- region : type\n- Description of parameter `region` (the default is '').\n- timeseries : type\n- Description of parameter `timeseries` (the default is False).\n- scatter : type\n- Description of parameter `scatter` (the default is False).\n- pdfs : type\n- Description of parameter `pdfs` (the default is False).\n- diffscatter : type\n- Description of parameter `diffscatter` (the default is False).\n- diffpdfs : type\n- Description of parameter `diffpdfs` (the default is False).\n- timeseries_rmse : type\n- Description of parameter `timeseries_rmse` (the default is False).\n- timeseries_mb : type\n- Description of parameter `timeseries_mb` (the default is False).\n- taylordiagram : type\n- Description of parameter `taylordiagram` (the default is False).\n- ax : type\n- Description of parameter `ax` (the default is None).\n- label : type\n- Description of parameter `label` (the default is None).\n- footer : type\n- Description of parameter `footer` (the default is False).\n- dia : type\n- Description of parameter `dia` (the default is None).\n- marker : type\n- Description of parameter `marker` (the default is None).\n-\n- Returns\n- -------\n- type\n- Description of returned object.\n-\n- \"\"\"\n- from numpy import NaN\n- from ..obs.epa_util import get_epa_location_df\n- df2, title = get_epa_location_df(self.dset.copy(), param, site=site, city=city, state=state, region=region,\n- epa_region=epa_region)\n- df2 = df2.groupby('Species').get_group(param)\n- if timeseries:\n- if ax is None:\n- ax = plots.timeseries_param(df2, col='Obs', title=title, label=label, ax=ax,\n- plotargs={'color': 'darkslategrey'},\n- fillargs={'color': 'darkslategrey', 'alpha': .2})\n- ax = plots.timeseries_param(df2, col='model', title=title, label=label, ax=ax, fillargs={'alpha': .2})\n- if scatter:\n- plots.scatter_param(df2, title=title, label=label, fig=fig, footer=footer)\n- if pdfs:\n- plots.kdeplots_param(df2, title=title, label=label, fig=fig, footer=footer)\n- if diffscatter:\n- plots.diffscatter_param(df2, title=title)\n- if diffpdfs:\n- plots.diffpdfs_param(df2, title=title, label=label, fig=fig, footer=footer)\n- if timeseries_rmse:\n- plots.timeseries_rmse_param(df2, title=title, label=label, fig=fig, footer=footer)\n- if timeseries_mb:\n- plots.timeseries_mb_param(df2, title=title, label=label, fig=fig, footer=footer)\n- if taylordiagram:\n- if marker is None:\n- marker = 'o'\n- if fig is None:\n- dia = plots.taylordiagram(df2, label=label, dia=dia, addon=False, marker=marker)\n- return dia\n- else:\n- dia = plots.taylordiagram(df2, label=label, dia=dia, addon=True, marker=marker)\n- plt.legend()\n- return dia\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,7 @@\n \n setup(\n name='monet',\n- version='1.2',\n+ version='2.0',\n url='https://github.com/noaa-oar-arl/MONET',\n license='MIT',\n author='Barry D. Baker',\n@@ -19,8 +19,16 @@\n maintainer='Barry Baker',\n maintainer_email='barry.baker@noaa.gov',\n packages=find_packages(),\n- keywords=['model','verification','hysplit','cmaq','atmosphere','camx','evaluation'],\n+ keywords=[\n+ 'model', 'verification', 'hysplit', 'cmaq', 'atmosphere', 'camx',\n+ 'evaluation'\n+ ],\n description='The Model and Observation Evaluation Toolkit (MONET)',\n- install_requires=['numpy', 'pandas', 'wget', 'pyresample', 'netcdf4', 'pynio', 'xarray', 'dask', 'matplotlib', 'seaborn', 'pseudonetcdf'],\n- dependency_links=[\"git+ssh://git@github.com/barronh/pseudonetcdf.git@develop\", \"git+ssh://git@github.com/barronh/xarray.git@pnc-backend\"]\n-)\n+ install_requires=[\n+ 'numpy', 'pandas', 'pyresample', 'netcdf4', 'xarray', 'dask',\n+ 'matplotlib', 'seaborn', 'pseudonetcdf', 'cartopy', 'future', 'sphinx',\n+ 'pandoc'\n+ ],\n+ dependency_links=[\n+ \"git+ssh://git@github.com/barronh/pseudonetcdf.git\",\n+ ])\n", "test_patch": "", "problem_statement": "", "hints_text": "", "created_at": "2018-09-04T14:29:21Z"}