_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q23500 | Env.cache_url | train | def cache_url(self, var=DEFAULT_CACHE_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to | python | {
"resource": ""
} |
q23501 | Env.email_url | train | def email_url(self, var=DEFAULT_EMAIL_ENV, default=NOTSET, backend=None):
"""Returns a config dictionary, defaulting to | python | {
"resource": ""
} |
q23502 | Env.search_url | train | def search_url(self, var=DEFAULT_SEARCH_ENV, default=NOTSET, engine=None):
"""Returns a config dictionary, defaulting to SEARCH_URL.
:rtype: dict
| python | {
"resource": ""
} |
q23503 | Env.parse_value | train | def parse_value(cls, value, cast):
"""Parse and cast provided value
:param value: Stringed value.
:param cast: Type to cast return value as.
:returns: Casted value
"""
if cast is None:
return value
elif cast is bool:
try:
value = int(value) != 0
except ValueError:
value = value.lower() in cls.BOOLEAN_TRUE_STRINGS
elif isinstance(cast, list):
value = list(map(cast[0], [x for x in value.split(',') if x]))
elif isinstance(cast, tuple):
val = value.strip('(').strip(')').split(',')
value = tuple(map(cast[0], [x for x in val if x]))
elif isinstance(cast, dict):
key_cast = cast.get('key', str)
value_cast = cast.get('value', str)
value_cast_by_key = cast.get('cast', dict())
value = dict(map(
lambda kv: (
key_cast(kv[0]),
cls.parse_value(kv[1], value_cast_by_key.get(kv[0], value_cast))
),
[val.split('=') for val in value.split(';') if val]
| python | {
"resource": ""
} |
q23504 | Env.db_url_config | train | def db_url_config(cls, url, engine=None):
"""Pulled from DJ-Database-URL, parse an arbitrary Database URL.
Support currently exists for PostgreSQL, PostGIS, MySQL, Oracle and SQLite.
SQLite connects to file based databases. The same URL format is used, omitting the hostname,
and using the "file" portion as the filename of the database.
This has the effect of four slashes being present for an absolute file path:
>>> from environ import Env
>>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite')
{'ENGINE': 'django.db.backends.sqlite3', 'HOST': '', 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': '', 'PORT': '', 'USER': ''}
>>> Env.db_url_config('postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn')
{'ENGINE': 'django.db.backends.postgresql', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'}
"""
if not isinstance(url, cls.URL_CLASS):
if url == 'sqlite://:memory:':
# this is a special case, because if we pass this URL into
# urlparse, urlparse will choke trying to interpret "memory"
# as a port number
return {
'ENGINE': cls.DB_SCHEMES['sqlite'],
'NAME': ':memory:'
}
# note: no other settings are required for sqlite
url = urlparse(url)
config = {}
# Remove query strings.
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
if url.scheme == 'sqlite':
if path == '':
# if we are using sqlite and we have no path, then assume we
# want an in-memory database (this is the behaviour of sqlalchemy)
path = ':memory:'
if url.netloc:
warnings.warn(
'SQLite URL contains host component %r, it will be ignored' % url.netloc, stacklevel=3)
if url.scheme == 'ldap':
path = '{scheme}://{hostname}'.format(scheme=url.scheme, hostname=url.hostname)
if url.port:
path += ':{port}'.format(port=url.port)
# Update with environment | python | {
"resource": ""
} |
q23505 | Env.cache_url_config | train | def cache_url_config(cls, url, backend=None):
"""Pulled from DJ-Cache-URL, parse an arbitrary Cache URL.
:param url:
:param backend:
:return:
"""
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
location = url.netloc.split(',')
if len(location) == 1:
location = location[0]
config = {
'BACKEND': cls.CACHE_SCHEMES[url.scheme],
'LOCATION': location,
}
# Add the drive to LOCATION
if url.scheme == 'filecache':
config.update({
'LOCATION': url.netloc + url.path,
})
if url.path and url.scheme in ['memcache', 'pymemcache']:
config.update({
'LOCATION': 'unix:' + url.path,
})
elif url.scheme.startswith('redis'):
if url.hostname:
scheme = url.scheme.replace('cache', '')
else:
scheme = 'unix'
| python | {
"resource": ""
} |
q23506 | Path.path | train | def path(self, *paths, **kwargs):
"""Create new Path based on self.root and provided paths.
:param paths: List of sub paths
:param kwargs: required=False
| python | {
"resource": ""
} |
q23507 | warn_deprecated | train | def warn_deprecated(since, message='', name='', alternative='', pending=False,
obj_type='attribute', addendum=''):
"""Display deprecation warning in a standard way.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The format
specifier `%(name)s` may be used for the name of the function,
and `%(alternative)s` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. `%(obj_type)s` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative function that the user may use in place of the
deprecated function. The deprecation warning will tell the user
about this alternative if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning.
obj_type : str, optional
The object type being deprecated.
addendum | python | {
"resource": ""
} |
q23508 | generate_grid | train | def generate_grid(horiz_dim, bbox):
r"""Generate a meshgrid based on bounding box and x & y resolution.
Parameters
----------
horiz_dim: integer
Horizontal resolution
bbox: dictionary
Dictionary containing coordinates for corners of study area.
Returns
| python | {
"resource": ""
} |
q23509 | generate_grid_coords | train | def generate_grid_coords(gx, gy):
r"""Calculate x,y coordinates of each grid cell.
Parameters
----------
gx: numeric
x coordinates in meshgrid
gy: numeric
y coordinates in meshgrid
Returns
-------
| python | {
"resource": ""
} |
q23510 | get_xy_range | train | def get_xy_range(bbox):
r"""Return x and y ranges in meters based on bounding box.
bbox: dictionary
dictionary containing coordinates for corners of study area
Returns
-------
x_range: float
Range in meters in x dimension.
y_range: float | python | {
"resource": ""
} |
q23511 | get_xy_steps | train | def get_xy_steps(bbox, h_dim):
r"""Return meshgrid spacing based on bounding box.
bbox: dictionary
Dictionary containing coordinates for corners of study area.
h_dim: integer
Horizontal resolution in meters.
Returns
-------
x_steps, (X, ) ndarray
Number of grids in x dimension.
| python | {
"resource": ""
} |
q23512 | get_boundary_coords | train | def get_boundary_coords(x, y, spatial_pad=0):
r"""Return bounding box based on given x and y coordinates assuming northern hemisphere.
x: numeric
x coordinates.
y: numeric
y coordinates.
spatial_pad: numeric
Number of meters to add to the x | python | {
"resource": ""
} |
q23513 | natural_neighbor_to_grid | train | def natural_neighbor_to_grid(xp, yp, variable, grid_x, grid_y):
r"""Generate a natural neighbor interpolation of the given points to a regular grid.
This assigns values to the given grid using the Liang and Hale [Liang2010]_.
approach.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations
yp: (N, ) ndarray
y-coordinates of observations
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i])
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension
Returns
| python | {
"resource": ""
} |
q23514 | natural_neighbor | train | def natural_neighbor(xp, yp, variable, grid_x, grid_y):
"""Wrap natural_neighbor_to_grid | python | {
"resource": ""
} |
q23515 | inverse_distance_to_grid | train | def inverse_distance_to_grid(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None,
min_neighbors=3, kind='cressman'):
r"""Generate an inverse distance interpolation of the given points to a regular grid.
Values are assigned to the given grid using inverse distance weighting based on either
[Cressman1959]_ or [Barnes1964]_. The Barnes implementation used here based on [Koch1983]_.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations.
yp: (N, ) ndarray
y-coordinates of observations.
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i]).
grid_x: (M, 2) ndarray
Meshgrid associated with x dimension.
grid_y: (M, 2) ndarray
Meshgrid associated with y dimension.
r: float
Radius from grid center, within which observations
are considered and weighted.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
kappa: float
Response parameter for barnes interpolation. Default None.
min_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation
for a point. Default is 3.
kind: str
| python | {
"resource": ""
} |
q23516 | inverse_distance | train | def inverse_distance(xp, yp, variable, grid_x, grid_y, r, gamma=None, kappa=None,
min_neighbors=3, kind='cressman'):
"""Wrap inverse_distance_to_grid for deprecated inverse_distance function."""
return | python | {
"resource": ""
} |
q23517 | interpolate_to_isosurface | train | def interpolate_to_isosurface(level_var, interp_var, level, **kwargs):
r"""Linear interpolation of a variable to a given vertical level from given values.
This function assumes that highest vertical level (lowest pressure) is zeroth index.
A classic use of this function would be to compute the potential temperature on the
dynamic tropopause (2 PVU surface).
Parameters
----------
level_var: array_like (P, M, N)
Level values in 3D grid on common vertical coordinate (e.g., PV values on
isobaric levels). Assumes height dimension is highest to lowest in atmosphere.
interp_var: array_like (P, M, N)
Variable on 3D grid with same vertical coordinate as level_var to interpolate to
given level (e.g., potential temperature on isobaric levels)
level: int or float
Desired interpolated level (e.g., 2 PVU surface)
Other Parameters
----------------
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Returns
-------
interp_level: (M, N) ndarray
The interpolated variable (e.g., potential temperature) on the desired level (e.g.,
2 PVU surface)
Notes
-----
This function implements a linear interpolation to estimate values on a given surface.
The prototypical example is interpolation of potential temperature to the dynamic
tropopause (e.g., 2 PVU surface)
"""
# Change when Python 2.7 no longer supported
# Pull out | python | {
"resource": ""
} |
q23518 | interpolate | train | def interpolate(x, y, z, interp_type='linear', hres=50000,
minimum_neighbors=3, gamma=0.25, kappa_star=5.052,
search_radius=None, rbf_func='linear', rbf_smooth=0,
boundary_coords=None):
"""Wrap interpolate_to_grid for deprecated interpolate function."""
return interpolate_to_grid(x, y, z, interp_type=interp_type, hres=hres,
| python | {
"resource": ""
} |
q23519 | interpolate_nans_1d | train | def interpolate_nans_1d(x, y, kind='linear'):
"""Interpolate NaN values in y.
Interpolate NaN values in the y dimension. Works with unsorted x values.
Parameters
----------
x : array-like
1-dimensional array of numeric x-values
y : array-like
1-dimensional array of numeric y-values
kind : string
specifies the kind of interpolation x coordinate - 'linear' or 'log', optional.
Defaults to 'linear'.
Returns
-------
An array of the y coordinate data with NaN values interpolated.
"""
x_sort_args = np.argsort(x)
x = x[x_sort_args]
y | python | {
"resource": ""
} |
q23520 | interpolate_1d | train | def interpolate_1d(x, xp, *args, **kwargs):
r"""Interpolates data with any shape over a specified axis.
Interpolation over a specified axis for arrays of any shape.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted in ascending order.
Examples
--------
>>> x = np.array([1., 2., 3., 4.])
>>> y = np.array([1., 2., 3., 4.])
>>> x_interp = np.array([2.5, 3.5])
>>> metpy.calc.interp(x_interp, x, y)
array([2.5, 3.5])
Notes
-----
xp and args must be the same shape.
"""
# Pull out keyword args
fill_value = kwargs.pop('fill_value', np.nan)
axis = kwargs.pop('axis', 0)
# Make x an array
x = np.asanyarray(x).reshape(-1)
# Save number of dimensions in xp
ndim = xp.ndim
# Sort input data
sort_args = np.argsort(xp, axis=axis)
sort_x = np.argsort(x)
# indices for sorting
sorter = broadcast_indices(xp, sort_args, ndim, axis)
# sort xp
xp = xp[sorter] | python | {
"resource": ""
} |
q23521 | log_interpolate_1d | train | def log_interpolate_1d(x, xp, *args, **kwargs):
r"""Interpolates data with logarithmic x-scale over a specified axis.
Interpolation on a logarithmic x-scale for interpolation values in pressure coordintates.
Parameters
----------
x : array-like
1-D array of desired interpolated values.
xp : array-like
The x-coordinates of the data points.
args : array-like
The data to be interpolated. Can be multiple arguments, all must be the same shape as
xp.
axis : int, optional
The axis to interpolate over. Defaults to 0.
fill_value: float, optional
Specify handling of interpolation points out of data bounds. If None, will return
ValueError if points are out of bounds. Defaults to nan.
Returns
-------
array-like
Interpolated values for each point with coordinates sorted | python | {
"resource": ""
} |
q23522 | distances_from_cross_section | train | def distances_from_cross_section(cross):
"""Calculate the distances in the x and y directions along a cross-section.
Parameters
----------
cross : `xarray.DataArray`
The input DataArray of a cross-section from which to obtain geometeric distances in
the x and y directions.
Returns
-------
x, y : tuple of `xarray.DataArray`
A tuple of the x and y distances as DataArrays
"""
if (CFConventionHandler.check_axis(cross.metpy.x, 'lon')
and CFConventionHandler.check_axis(cross.metpy.y, 'lat')):
# Use pyproj to obtain x and y distances
from pyproj import Geod
g = Geod(cross.metpy.cartopy_crs.proj4_init)
lon = cross.metpy.x
lat = cross.metpy.y
forward_az, _, distance = g.inv(lon[0].values * np.ones_like(lon),
lat[0].values * np.ones_like(lat),
lon.values,
| python | {
"resource": ""
} |
q23523 | latitude_from_cross_section | train | def latitude_from_cross_section(cross):
"""Calculate the latitude of points in a cross-section.
Parameters
----------
cross : `xarray.DataArray`
The input DataArray of a cross-section from which to obtain latitudes.
Returns
-------
latitude : `xarray.DataArray`
Latitude of points
"""
y = cross.metpy.y
if CFConventionHandler.check_axis(y, 'lat'):
return y
else:
import cartopy.crs as ccrs
latitude = ccrs.Geodetic().transform_points(cross.metpy.cartopy_crs,
| python | {
"resource": ""
} |
q23524 | unit_vectors_from_cross_section | train | def unit_vectors_from_cross_section(cross, index='index'):
r"""Calculate the unit tanget and unit normal vectors from a cross-section.
Given a path described parametrically by :math:`\vec{l}(i) = (x(i), y(i))`, we can find
the unit tangent vector by the formula
.. math:: \vec{T}(i) =
\frac{1}{\sqrt{\left( \frac{dx}{di} \right)^2 + \left( \frac{dy}{di} \right)^2}}
\left( \frac{dx}{di}, \frac{dy}{di} \right)
From this, because this is a two-dimensional path, the normal vector can be obtained by a
simple :math:`\frac{\pi}{2}` rotation.
Parameters
----------
cross : `xarray.DataArray`
The input DataArray of a cross-section from which to obtain latitudes.
index : `str`, optional
A string denoting the index coordinate of the cross section, defaults to 'index' as
set by `metpy.interpolate.cross_section`.
Returns
------- | python | {
"resource": ""
} |
q23525 | cross_section_components | train | def cross_section_components(data_x, data_y, index='index'):
r"""Obtain the tangential and normal components of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
Returns
-------
component_tangential, component_normal: tuple of `xarray.DataArray`
The components of the vector field in the tangential and normal directions,
respectively.
See Also
--------
tangential_component, normal_component
Notes
-----
The coordinates of `data_x` and `data_y` must match. | python | {
"resource": ""
} |
q23526 | normal_component | train | def normal_component(data_x, data_y, index='index'):
r"""Obtain the normal component of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
Returns
-------
component_normal: `xarray.DataArray`
The component of the vector field in the normal directions.
See Also
--------
cross_section_components, tangential_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
| python | {
"resource": ""
} |
q23527 | tangential_component | train | def tangential_component(data_x, data_y, index='index'):
r"""Obtain the tangential component of a cross-section of a vector field.
Parameters
----------
data_x : `xarray.DataArray`
The input DataArray of the x-component (in terms of data projection) of the vector
field.
data_y : `xarray.DataArray`
The input DataArray of the y-component (in terms of data projection) of the vector
field.
Returns
-------
component_tangential: `xarray.DataArray`
The component of the vector field in the tangential directions.
See Also
--------
cross_section_components, normal_component
Notes
-----
The coordinates of `data_x` and `data_y` must match.
"""
| python | {
"resource": ""
} |
q23528 | read_colortable | train | def read_colortable(fobj):
r"""Read colortable information from a file.
Reads a colortable, which consists of one color per line of the file, where
a color can be one of: a tuple of 3 floats, a string with a HTML color name,
or a string with a HTML hex color.
Parameters
----------
fobj : a file-like object
A file-like object to read the colors from
Returns
-------
List of tuples
A list of the RGB color values, where each RGB color is a tuple of 3 floats in the
range of [0, 1]. | python | {
"resource": ""
} |
q23529 | convert_gempak_table | train | def convert_gempak_table(infile, outfile):
r"""Convert a GEMPAK color table to one MetPy can read.
Reads lines from a GEMPAK-style color table file, and writes them to another file in
a format that MetPy can parse.
Parameters
----------
infile : file-like object
The file-like object to read from
outfile : file-like object
| python | {
"resource": ""
} |
q23530 | ColortableRegistry.scan_resource | train | def scan_resource(self, pkg, path):
r"""Scan a resource directory for colortable files and add them to the registry.
Parameters
----------
pkg : str
The package containing the resource directory
path : str
The path to the directory with the color tables
"""
for fname in resource_listdir(pkg, path):
if fname.endswith(TABLE_EXT):
table_path = posixpath.join(path, fname)
| python | {
"resource": ""
} |
q23531 | ColortableRegistry.scan_dir | train | def scan_dir(self, path):
r"""Scan a directory on disk for color table files and add them to the registry.
Parameters
----------
path : str
The path to the directory with the color tables
"""
for fname in glob.glob(os.path.join(path, '*' + TABLE_EXT)):
if os.path.isfile(fname):
with open(fname, 'r') as fobj:
try:
| python | {
"resource": ""
} |
q23532 | ColortableRegistry.add_colortable | train | def add_colortable(self, fobj, name):
r"""Add a color table from a file to the registry.
Parameters
----------
fobj : file-like object
The file to read the color table from
name : str
| python | {
"resource": ""
} |
q23533 | cressman_point | train | def cressman_point(sq_dist, values, radius):
r"""Generate a Cressman interpolation value for a point.
The calculated value is based on the given distances and search radius.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distance between observations and grid point
| python | {
"resource": ""
} |
q23534 | barnes_point | train | def barnes_point(sq_dist, values, kappa, gamma=None):
r"""Generate a single pass barnes interpolation value for a point.
The calculated value is based on the given distances, kappa and gamma values.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distance between observations and grid point
values: (N, ) ndarray
Observation values in same order as sq_dist
kappa: float
Response parameter for barnes interpolation.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 1.
Returns
-------
value: | python | {
"resource": ""
} |
q23535 | natural_neighbor_point | train | def natural_neighbor_point(xp, yp, variable, grid_loc, tri, neighbors, triangle_info):
r"""Generate a natural neighbor interpolation of the observations to the given point.
This uses the Liang and Hale approach [Liang2010]_. The interpolation will fail if
the grid point has no natural neighbors.
Parameters
----------
xp: (N, ) ndarray
x-coordinates of observations
yp: (N, ) ndarray
y-coordinates of observations
variable: (N, ) ndarray
observation values associated with (xp, yp) pairs.
IE, variable[i] is a unique observation at (xp[i], yp[i])
grid_loc: (float, float)
Coordinates of the grid point at which to calculate the
interpolation.
tri: object
Delaunay triangulation of the observations.
neighbors: (N, ) ndarray
Simplex codes of the grid point's natural neighbors. The codes
will correspond to codes in the triangulation.
triangle_info: dictionary
Pre-calculated triangle attributes for quick look ups. Requires
items 'cc' (circumcenters) and 'r' (radii) to be associated with
each simplex code key from the delaunay triangulation.
Returns
-------
value: float
Interpolated value for the grid location
"""
edges = geometry.find_local_boundary(tri, neighbors)
edge_vertices = [segment[0] for segment in geometry.order_edges(edges)]
num_vertices = len(edge_vertices)
p1 = edge_vertices[0]
p2 = edge_vertices[1]
c1 = geometry.circumcenter(grid_loc, tri.points[p1], tri.points[p2])
polygon = [c1]
area_list = []
total_area = 0.0
for i in range(num_vertices):
| python | {
"resource": ""
} |
q23536 | natural_neighbor_to_points | train | def natural_neighbor_to_points(points, values, xi):
r"""Generate a natural neighbor interpolation to the given points.
This assigns values to the given interpolation points using the Liang and Hale
[Liang2010]_. approach.
Parameters
----------
points: array_like, shape (n, 2)
Coordinates of the data points.
values: array_like, shape (n,)
Values of the data points.
xi: array_like, shape (M, 2)
Points to interpolate the data onto.
Returns
-------
img: (M,) ndarray
Array representing the interpolated values for each input point in `xi`
See Also
--------
natural_neighbor_to_grid
"""
tri = Delaunay(points)
members, | python | {
"resource": ""
} |
q23537 | inverse_distance_to_points | train | def inverse_distance_to_points(points, values, xi, r, gamma=None, kappa=None, min_neighbors=3,
kind='cressman'):
r"""Generate an inverse distance weighting interpolation to the given points.
Values are assigned to the given interpolation points based on either [Cressman1959]_ or
[Barnes1964]_. The Barnes implementation used here based on [Koch1983]_.
Parameters
----------
points: array_like, shape (n, 2)
Coordinates of the data points.
values: array_like, shape (n,)
Values of the data points.
xi: array_like, shape (M, 2)
Points to interpolate the data onto.
r: float
Radius from grid center, within which observations
are considered and weighted.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default None.
kappa: float
Response parameter for barnes interpolation. Default None.
min_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation
for a point. Default is 3.
kind: str
Specify what inverse distance weighting interpolation to use.
Options: 'cressman' or 'barnes'. Default 'cressman'
Returns
-------
img: (M,) ndarray
Array representing the interpolated values for each input point in `xi`
See Also
--------
inverse_distance_to_grid
| python | {
"resource": ""
} |
q23538 | interpolate_to_points | train | def interpolate_to_points(points, values, xi, interp_type='linear', minimum_neighbors=3,
gamma=0.25, kappa_star=5.052, search_radius=None, rbf_func='linear',
rbf_smooth=0):
r"""Interpolate unstructured point data to the given points.
This function interpolates the given `values` valid at `points` to the points `xi`. This is
modeled after `scipy.interpolate.griddata`, but acts as a generalization of it by including
the following types of interpolation:
- Linear
- Nearest Neighbor
- Cubic
- Radial Basis Function
- Natural Neighbor (2D Only)
- Barnes (2D Only)
- Cressman (2D Only)
Parameters
----------
points: array_like, shape (n, D)
Coordinates of the data points.
values: array_like, shape (n,)
Values of the data points.
xi: array_like, shape (M, D)
Points to interpolate the data onto.
interp_type: str
What type of interpolation to use. Available options include:
1) "linear", "nearest", "cubic", or "rbf" from `scipy.interpolate`.
2) "natural_neighbor", "barnes", or "cressman" from `metpy.interpolate`.
Default "linear".
minimum_neighbors: int
Minimum number of neighbors needed to perform barnes or cressman interpolation for a
point. Default is 3.
gamma: float
Adjustable smoothing parameter for the barnes interpolation. Default 0.25.
kappa_star: float
Response parameter for barnes interpolation, specified nondimensionally
in terms of the Nyquist. Default 5.052
search_radius: float
A search radius to use for the barnes and cressman interpolation schemes.
If search_radius is not specified, it will default to the average spacing of
observations.
rbf_func: str
Specifies which function to use for Rbf interpolation.
Options include: 'multiquadric', 'inverse', 'gaussian', 'linear', 'cubic',
'quintic', and 'thin_plate'. Defualt 'linear'. See `scipy.interpolate.Rbf` for more
information.
rbf_smooth: float
Smoothing value applied to rbf interpolation. Higher values result in more smoothing.
Returns
-------
values_interpolated: (M,) ndarray
Array representing the interpolated values for each input point in `xi`.
Notes
-----
This function primarily acts as a wrapper for the individual interpolation routines. The
individual functions are also available for direct use.
See Also
--------
interpolate_to_grid
"""
# If this is a type that `griddata` handles, hand it along to `griddata`
if interp_type in ['linear', 'nearest', 'cubic']:
return griddata(points, values, xi, method=interp_type)
# If this is natural neighbor, hand it along to `natural_neighbor`
elif interp_type == 'natural_neighbor':
return natural_neighbor_to_points(points, values, xi)
# If this is Barnes/Cressman, determine | python | {
"resource": ""
} |
q23539 | _make_datetime | train | def _make_datetime(s):
r"""Convert 7 bytes from a GINI file to a `datetime` instance."""
s = bytearray(s) # For Python 2 | python | {
"resource": ""
} |
q23540 | _scaled_int | train | def _scaled_int(s):
r"""Convert a 3 byte string to a signed integer value."""
s = bytearray(s) # For Python 2
# Get leftmost bit (sign) as 1 (if 0) or -1 (if 1)
sign = 1 - ((s[0] & 0x80) >> 6)
# Combine remaining bits
int_val = (((s[0] & 0x7f) << 16) | (s[1] << 8) | s[2])
| python | {
"resource": ""
} |
q23541 | _name_lookup | train | def _name_lookup(names):
r"""Create an io helper to convert an integer to a named value."""
mapper = dict(zip(range(len(names)), names))
| python | {
"resource": ""
} |
q23542 | cf_to_proj | train | def cf_to_proj(var):
r"""Convert a Variable with projection information to a Proj.4 Projection instance.
The attributes of this Variable must conform to the Climate and Forecasting (CF)
netCDF conventions.
Parameters
----------
var : Variable
The projection variable with appropriate attributes.
"""
import pyproj
kwargs = {'lat_0': var.latitude_of_projection_origin, 'a': var.earth_radius,
'b': var.earth_radius}
if var.grid_mapping_name == 'lambert_conformal_conic':
kwargs['proj'] = 'lcc'
kwargs['lon_0'] = var.longitude_of_central_meridian
kwargs['lat_1'] = var.standard_parallel
kwargs['lat_2'] = var.standard_parallel
elif var.grid_mapping_name == 'polar_stereographic':
kwargs['proj'] = 'stere'
kwargs['lon_0'] = var.straight_vertical_longitude_from_pole
| python | {
"resource": ""
} |
q23543 | get_perturbation | train | def get_perturbation(ts, axis=-1):
r"""Compute the perturbation from the mean of a time series.
Parameters
----------
ts : array_like
The time series from which you wish to find the perturbation
time series (perturbation from the mean).
Returns
-------
array_like
The perturbation time series.
Other Parameters
----------------
axis : int
The index of the time axis. Default is -1
Notes
-----
The perturbation time series produced by this function | python | {
"resource": ""
} |
q23544 | tke | train | def tke(u, v, w, perturbation=False, axis=-1):
r"""Compute turbulence kinetic energy.
Compute the turbulence kinetic energy (e) from the time series of the
velocity components.
Parameters
----------
u : array_like
The wind component along the x-axis
v : array_like
The wind component along the y-axis
w : array_like
The wind component along the z-axis
perturbation : {False, True}, optional
True if the `u`, `v`, and `w` components of wind speed
supplied to the function are perturbation velocities.
If False, perturbation velocities will be calculated by
removing the mean value from each component.
Returns
-------
array_like
The corresponding turbulence kinetic energy value
Other Parameters
----------------
axis : int
The index of the time axis. Default is -1
See Also
--------
get_perturbation : Used to compute perturbations if `perturbation`
is False.
Notes
-----
Turbulence Kinetic Energy is computed as:
.. math:: | python | {
"resource": ""
} |
q23545 | kinematic_flux | train | def kinematic_flux(vel, b, perturbation=False, axis=-1):
r"""Compute the kinematic flux from two time series.
Compute the kinematic flux from the time series of two variables `vel`
and b. Note that to be a kinematic flux, at least one variable must be
a component of velocity.
Parameters
----------
vel : array_like
A component of velocity
b : array_like
May be a component of velocity or a scalar variable (e.g. Temperature)
perturbation : bool, optional
`True` if the `vel` and `b` variables are perturbations. If `False`, perturbations
will be calculated by removing the mean value from each variable. Defaults to `False`.
Returns
-------
array_like
The corresponding kinematic flux
Other Parameters
----------------
axis : int, optional
The index of the time axis, along which the calculations will be
performed. Defaults to -1
Notes
-----
A kinematic flux is computed as
.. math:: \overline{u^{\prime} s^{\prime}}
where at the prime notation denotes perturbation variables, and at least
one variable is perturbation velocity. For example, the vertical kinematic
momentum flux (two velocity components):
.. math:: \overline{u^{\prime} w^{\prime}}
or the vertical kinematic heat flux (one velocity component, and one
scalar):
.. math:: \overline{w^{\prime} | python | {
"resource": ""
} |
q23546 | friction_velocity | train | def friction_velocity(u, w, v=None, perturbation=False, axis=-1):
r"""Compute the friction velocity from the time series of velocity components.
Compute the friction velocity from the time series of the x, z,
and optionally y, velocity components.
Parameters
----------
u : array_like
The wind component along the x-axis
w : array_like
The wind component along the z-axis
v : array_like, optional
The wind component along the y-axis.
perturbation : {False, True}, optional
True if the `u`, `w`, and `v` components of wind speed
supplied to the function are perturbation velocities.
If False, perturbation velocities will be calculated by
removing the mean value from each component.
Returns
-------
array_like
The corresponding friction velocity
Other Parameters
----------------
axis : int
The index of the time axis. Default is -1
See Also
--------
kinematic_flux : Used to compute the x-component and y-component
vertical kinematic momentum flux(es) used in the
computation of the friction velocity.
Notes
-----
The Friction Velocity is computed as:
.. math:: u_{*} = \sqrt[4]{\left(\overline{u^{\prime}w^{\prime}}\right)^2 +
| python | {
"resource": ""
} |
q23547 | open_as_needed | train | def open_as_needed(filename):
"""Return a file-object given either a filename or an object.
Handles opening with the right class based on the file extension.
"""
| python | {
"resource": ""
} |
q23548 | zlib_decompress_all_frames | train | def zlib_decompress_all_frames(data):
"""Decompress all frames of zlib-compressed bytes.
Repeatedly tries to decompress `data` until all data are decompressed, or decompression
fails. This will skip over bytes that are not compressed with zlib.
Parameters
----------
data : bytearray or bytes
Binary data compressed using zlib.
Returns
-------
bytearray
All decompressed bytes
"""
frames = bytearray()
data = bytes(data)
| python | {
"resource": ""
} |
q23549 | hexdump | train | def hexdump(buf, num_bytes, offset=0, width=32):
"""Perform a hexudmp of the buffer.
Returns the hexdump as a canonically-formatted string.
"""
ind = offset
end = offset + num_bytes
lines = []
while ind < end:
chunk = buf[ind:ind + width]
actual_width = len(chunk)
hexfmt = '{:02X}'
blocksize = 4
blocks = [hexfmt * blocksize for _ in range(actual_width // blocksize)]
# Need to get any partial lines
num_left = actual_width % blocksize # noqa: S001 Fix false alarm
if num_left:
blocks += [hexfmt * num_left + '--' * (blocksize - num_left)]
| python | {
"resource": ""
} |
q23550 | UnitLinker.units | train | def units(self, val):
"""Override the units on the underlying variable."""
if isinstance(val, units.Unit):
| python | {
"resource": ""
} |
q23551 | NamedStruct.unpack_from | train | def unpack_from(self, buff, offset=0):
"""Read bytes from a buffer and return as a namedtuple."""
| python | {
"resource": ""
} |
q23552 | DictStruct.unpack_from | train | def unpack_from(self, buff, offset=0):
"""Unpack the next bytes from a file object."""
| python | {
"resource": ""
} |
q23553 | IOBuffer.set_mark | train | def set_mark(self):
"""Mark the current location and return its id so that the buffer can return later."""
| python | {
"resource": ""
} |
q23554 | IOBuffer.jump_to | train | def jump_to(self, mark, offset=0):
"""Jump to a previously set mark."""
| python | {
"resource": ""
} |
q23555 | IOBuffer.splice | train | def splice(self, mark, newdata):
"""Replace the data after the marked location with the specified data."""
self.jump_to(mark)
| python | {
"resource": ""
} |
q23556 | IOBuffer.read_struct | train | def read_struct(self, struct_class):
"""Parse and return a structure from the current buffer offset."""
struct | python | {
"resource": ""
} |
q23557 | IOBuffer.read_func | train | def read_func(self, func, num_bytes=None):
"""Parse data from the current buffer offset using a function."""
# only advance if func succeeds
| python | {
"resource": ""
} |
q23558 | IOBuffer.read_binary | train | def read_binary(self, num, item_type='B'):
"""Parse the current buffer offset as the specified code."""
if 'B' in item_type:
return self.read(num)
if item_type[0] in ('@', '=', '<', '>', '!'):
order = item_type[0]
| python | {
"resource": ""
} |
q23559 | IOBuffer.read | train | def read(self, num_bytes=None):
"""Read and return the specified bytes from the buffer."""
| python | {
"resource": ""
} |
q23560 | IOBuffer.get_next | train | def get_next(self, num_bytes=None):
"""Get the next bytes in the buffer without modifying the offset."""
if num_bytes is None:
| python | {
"resource": ""
} |
q23561 | IOBuffer.skip | train | def skip(self, num_bytes):
"""Jump the ahead the specified bytes in the buffer."""
if num_bytes is None:
| python | {
"resource": ""
} |
q23562 | draw_polygon_with_info | train | def draw_polygon_with_info(ax, polygon, off_x=0, off_y=0):
"""Draw one of the natural neighbor polygons with some information."""
pts = np.array(polygon)[ConvexHull(polygon).vertices]
for i, pt in enumerate(pts):
ax.plot([pt[0], pts[(i + 1) % len(pts)][0]],
[pt[1], pts[(i | python | {
"resource": ""
} |
q23563 | _check_and_flip | train | def _check_and_flip(arr):
"""Transpose array or list of arrays if they are 2D."""
if hasattr(arr, 'ndim'):
if arr.ndim >= 2:
return arr.T
else:
| python | {
"resource": ""
} |
q23564 | ensure_yx_order | train | def ensure_yx_order(func):
"""Wrap a function to ensure all array arguments are y, x ordered, based on kwarg."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Check what order we're given
dim_order = kwargs.pop('dim_order', None)
x_first = _is_x_first_dim(dim_order)
# If x is the first dimension, flip (transpose) every array within the function args.
if x_first:
args = tuple(_check_and_flip(arr) for arr in args)
for k, v in kwargs:
kwargs[k] = _check_and_flip(v)
ret = func(*args, **kwargs)
# If we flipped on the way in, need to flip on the way out so that output array(s)
# match the dimension order of the original input.
if x_first:
return _check_and_flip(ret)
else:
return ret
# Inject a docstring for the dim_order argument into the function's docstring.
dim_order_doc = """
dim_order : str or ``None``, optional
The ordering of dimensions in passed in arrays. Can be one of ``None``, ``'xy'``,
or ``'yx'``. ``'xy'`` indicates that the dimension corresponding to x is the leading
| python | {
"resource": ""
} |
q23565 | vorticity | train | def vorticity(u, v, dx, dy):
r"""Calculate the vertical vorticity of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
vertical vorticity
| python | {
"resource": ""
} |
q23566 | divergence | train | def divergence(u, v, dx, dy):
r"""Calculate the horizontal divergence of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
The horizontal divergence | python | {
"resource": ""
} |
q23567 | total_deformation | train | def total_deformation(u, v, dx, dy):
r"""Calculate the horizontal total deformation of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
Total Deformation
See Also
| python | {
"resource": ""
} |
q23568 | advection | train | def advection(scalar, wind, deltas):
r"""Calculate the advection of a scalar field by the wind.
The order of the dimensions of the arrays must match the order in which
the wind components are given. For example, if the winds are given [u, v],
then the scalar and wind arrays must be indexed as x,y (which puts x as the
rows, not columns).
Parameters
----------
scalar : N-dimensional array
Array (with N-dimensions) with the quantity to be advected.
wind : sequence of arrays
Length M sequence of N-dimensional arrays. Represents the flow,
with a component of the wind in each dimension. For example, for
horizontal advection, this could be a list: [u, v], where u and v
are each a 2-dimensional array.
deltas : sequence of float or ndarray
A (length M) sequence containing the grid spacing(s) in each dimension. If using
arrays, in each array there should be one item less than the size of `scalar` along the
applicable axis.
Returns
-------
N-dimensional array
An N-dimensional array containing the advection at all grid points.
"""
# This allows passing in a list of wind components or an array.
wind = _stack(wind)
# If we have more than one component, we need to reverse the order | python | {
"resource": ""
} |
q23569 | frontogenesis | train | def frontogenesis(thta, u, v, dx, dy, dim_order='yx'):
r"""Calculate the 2D kinematic frontogenesis of a temperature field.
The implementation is a form of the Petterssen Frontogenesis and uses the formula
outlined in [Bluestein1993]_ pg.248-253.
.. math:: F=\frac{1}{2}\left|\nabla \theta\right|[D cos(2\beta)-\delta]
* :math:`F` is 2D kinematic frontogenesis
* :math:`\theta` is potential temperature
* :math:`D` is the total deformation
* :math:`\beta` is the angle between the axis of dilitation and the isentropes
* :math:`\delta` is the divergence
Parameters
----------
thta : (M, N) ndarray
Potential temperature
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
Returns
-------
(M, N) ndarray
2D Frontogenesis in [temperature units]/m/s
Notes
-----
If inputs have more than two dimensions, they are assumed to have either leading dimensions
of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
Conversion factor to go from [temperature units]/m/s to [temperature units/100km/3h]
:math:`1.08e4*1.e5`
"""
# Get gradients of potential temperature | python | {
"resource": ""
} |
q23570 | geostrophic_wind | train | def geostrophic_wind(heights, f, dx, dy):
r"""Calculate the geostrophic wind given from the heights or geopotential.
Parameters
----------
heights : (M, N) ndarray
The height field, with either leading dimensions of (x, y) or trailing dimensions
of (y, x), depending on the value of ``dim_order``.
f : array_like
The coriolis parameter. This can be a scalar to be applied
everywhere or an array of values.
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `heights` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `heights` along the applicable axis.
Returns
| python | {
"resource": ""
} |
q23571 | ageostrophic_wind | train | def ageostrophic_wind(heights, f, dx, dy, u, v, dim_order='yx'):
r"""Calculate the ageostrophic wind given from the heights or geopotential.
Parameters
----------
heights : (M, N) ndarray
The height field.
f : array_like
The coriolis parameter. This can be a scalar to be applied
everywhere or an array of values.
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `heights` along the applicable axis.
dy : | python | {
"resource": ""
} |
q23572 | storm_relative_helicity | train | def storm_relative_helicity(u, v, heights, depth, bottom=0 * units.m,
storm_u=0 * units('m/s'), storm_v=0 * units('m/s')):
# Partially adapted from similar SharpPy code
r"""Calculate storm relative helicity.
Calculates storm relatively helicity following [Markowski2010] 230-231.
.. math:: \int\limits_0^d (\bar v - c) \cdot \bar\omega_{h} \,dz
This is applied to the data from a hodograph with the following summation:
.. math:: \sum_{n = 1}^{N-1} [(u_{n+1} - c_{x})(v_{n} - c_{y}) -
(u_{n} - c_{x})(v_{n+1} - c_{y})]
Parameters
----------
u : array-like
u component winds
v : array-like
v component winds
heights : array-like
atmospheric heights, will be converted to AGL
depth : number
depth of the layer
bottom : number
height of layer bottom AGL (default is surface)
storm_u : number
u component of storm motion (default is 0 m/s)
storm_v : number
v component of storm motion (default is 0 m/s)
Returns
-------
`pint.Quantity, pint.Quantity, pint.Quantity`
positive, negative, total storm-relative helicity
"""
_, u, v = get_layer_heights(heights, depth, u, v, with_agl=True, bottom=bottom)
storm_relative_u = u - storm_u
storm_relative_v = v - storm_v
int_layers = (storm_relative_u[1:] * storm_relative_v[:-1]
| python | {
"resource": ""
} |
q23573 | absolute_vorticity | train | def absolute_vorticity(u, v, dx, dy, lats, dim_order='yx'):
"""Calculate the absolute vorticity of the horizontal wind.
Parameters
----------
u : (M, N) ndarray
x component of the wind
v : (M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
latitudes of the wind data in radians or with appropriate unit information attached
| python | {
"resource": ""
} |
q23574 | potential_vorticity_baroclinic | train | def potential_vorticity_baroclinic(potential_temperature, pressure, u, v, dx, dy, lats):
r"""Calculate the baroclinic potential vorticity.
.. math:: PV = -g \left(\frac{\partial u}{\partial p}\frac{\partial \theta}{\partial y}
- \frac{\partial v}{\partial p}\frac{\partial \theta}{\partial x}
+ \frac{\partial \theta}{\partial p}(\zeta + f) \right)
This formula is based on equation 4.5.93 [Bluestein1993]_.
Parameters
----------
potential_temperature : (P, M, N) ndarray
potential temperature
pressure : (P, M, N) ndarray
vertical pressures
u : (P, M, N) ndarray
x component of the wind
v : (P, M, N) ndarray
y component of the wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
latitudes of the wind data in radians or with appropriate unit information attached
axis : int, optional
The axis corresponding to the vertical dimension in the potential temperature
and pressure arrays, defaults to 0, the first dimension.
Returns
-------
(P, M, N) ndarray
baroclinic potential vorticity
Notes
-----
This function will only work with data that is in (P, Y, X) format. If your data
is in a different order you will need to re-order your data in order to get correct
results from this function.
The same function can be used for isobaric and isentropic PV analysis. Provide winds
for vorticity calculations on the desired isobaric or isentropic surface. At least three
layers of pressure/potential temperature are required in order to calculate the vertical
derivative (one above and below the desired surface). The first two terms will be zero if
isentropic level data is used due to the gradient of theta in both the x and y-directions
| python | {
"resource": ""
} |
q23575 | inertial_advective_wind | train | def inertial_advective_wind(u, v, u_geostrophic, v_geostrophic, dx, dy, lats):
r"""Calculate the inertial advective wind.
.. math:: \frac{\hat k}{f} \times (\vec V \cdot \nabla)\hat V_g
.. math:: \frac{\hat k}{f} \times \left[ \left( u \frac{\partial u_g}{\partial x} + v
\frac{\partial u_g}{\partial y} \right) \hat i + \left( u \frac{\partial v_g}
{\partial x} + v \frac{\partial v_g}{\partial y} \right) \hat j \right]
.. math:: \left[ -\frac{1}{f}\left(u \frac{\partial v_g}{\partial x} + v
\frac{\partial v_g}{\partial y} \right) \right] \hat i + \left[ \frac{1}{f}
\left( u \frac{\partial u_g}{\partial x} + v \frac{\partial u_g}{\partial y}
\right) \right] \hat j
This formula is based on equation 27 of [Rochette2006]_.
Parameters
----------
u : (M, N) ndarray
x component of the advecting wind
v : (M, N) ndarray
y component of the advecting wind
u_geostrophic : (M, N) ndarray
x component of the geostrophic (advected) wind
v_geostrophic : (M, N) ndarray
y component of the geostrophic (advected) wind
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
lats : (M, N) ndarray
| python | {
"resource": ""
} |
q23576 | q_vector | train | def q_vector(u, v, temperature, pressure, dx, dy, static_stability=1):
r"""Calculate Q-vector at a given pressure level using the u, v winds and temperature.
.. math:: \vec{Q} = (Q_1, Q_2)
= - \frac{R}{\sigma p}\left(
\frac{\partial \vec{v}_g}{\partial x} \cdot \nabla_p T,
\frac{\partial \vec{v}_g}{\partial y} \cdot \nabla_p T
\right)
This formula follows equation 5.7.55 from [Bluestein1992]_, and can be used with the
the below form of the quasigeostrophic omega equation to assess vertical motion
([Bluestein1992]_ equation 5.7.54):
.. math:: \left( \nabla_p^2 + \frac{f_0^2}{\sigma} \frac{\partial^2}{\partial p^2}
\right) \omega =
- 2 \nabla_p \cdot \vec{Q} -
\frac{R}{\sigma p} \beta \frac{\partial T}{\partial x}.
Parameters
----------
u : (M, N) ndarray
x component of the wind (geostrophic in QG-theory)
v : (M, N) ndarray
y component of the wind (geostrophic in QG-theory)
temperature : (M, N) ndarray
Array of temperature at pressure level
pressure : `pint.Quantity`
Pressure at level
dx : float or ndarray
The grid spacing(s) in the x-direction. If an array, there should be one item less than
the size of `u` along the applicable axis.
dy : float or ndarray
The grid spacing(s) in the y-direction. If an array, there should be one item less than
| python | {
"resource": ""
} |
q23577 | basic_map | train | def basic_map(proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
add_metpy_logo(fig, 0, 80, size='large')
view = fig.add_axes([0, 0, 1, 1], projection=proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cfeature.STATES.with_scale('50m'))
| python | {
"resource": ""
} |
q23578 | get_points_within_r | train | def get_points_within_r(center_points, target_points, r):
r"""Get all target_points within a specified radius of a center point.
All data must be in same coordinate system, or you will get undetermined results.
Parameters
----------
center_points: (X, Y) ndarray
location from which to grab surrounding points within r
| python | {
"resource": ""
} |
q23579 | get_point_count_within_r | train | def get_point_count_within_r(center_points, target_points, r):
r"""Get count of target points within a specified radius from center points.
All data must be in same coordinate system, or you will get undetermined results.
Parameters
----------
center_points: (X, Y) ndarray
locations from which to grab surrounding points within r
| python | {
"resource": ""
} |
q23580 | triangle_area | train | def triangle_area(pt1, pt2, pt3):
r"""Return the area of a triangle.
Parameters
----------
pt1: (X,Y) ndarray
Starting vertex of a triangle
pt2: (X,Y) ndarray
Second vertex of a triangle
pt3: (X,Y) ndarray
Ending vertex of a triangle
Returns
-------
| python | {
"resource": ""
} |
q23581 | dist_2 | train | def dist_2(x0, y0, x1, y1):
r"""Return the squared distance between two points.
This is faster than calculating distance but should
only be used with comparable ratios.
Parameters
----------
x0: float
| python | {
"resource": ""
} |
q23582 | distance | train | def distance(p0, p1):
r"""Return the distance between two points.
Parameters
----------
p0: (X,Y) ndarray
Starting coordinate
p1: (X,Y) ndarray
Ending coordinate
Returns
-------
d: float
| python | {
"resource": ""
} |
q23583 | circumcircle_radius_2 | train | def circumcircle_radius_2(pt0, pt1, pt2):
r"""Calculate and return the squared radius of a given triangle's circumcircle.
This is faster than calculating radius but should only be used with comparable ratios.
Parameters
----------
pt0: (x, y)
Starting vertex of triangle
pt1: (x, y)
Second vertex of triangle
pt2: (x, y)
Final vertex of a triangle
Returns
-------
r: float
circumcircle radius
See Also
--------
| python | {
"resource": ""
} |
q23584 | circumcenter | train | def circumcenter(pt0, pt1, pt2):
r"""Calculate and return the circumcenter of a circumcircle generated by a given triangle.
All three points must be unique or a division by zero error will be raised.
Parameters
----------
pt0: (x, y)
Starting vertex of triangle
pt1: (x, y)
Second vertex of triangle
pt2: (x, y)
Final vertex of a triangle
Returns
-------
cc: (x, y)
circumcenter coordinates
See Also
--------
| python | {
"resource": ""
} |
q23585 | find_natural_neighbors | train | def find_natural_neighbors(tri, grid_points):
r"""Return the natural neighbor triangles for each given grid cell.
These are determined by the properties of the given delaunay triangulation.
A triangle is a natural neighbor of a grid cell if that triangles circumcenter
is within the circumradius of the grid cell center.
Parameters
----------
tri: Object
A Delaunay Triangulation.
grid_points: (X, Y) ndarray
Locations of grids.
Returns
-------
members: dictionary
List of simplex codes for natural neighbor
triangles in 'tri' for each grid cell.
triangle_info: dictionary
Circumcenter and radius information for each
triangle in 'tri'.
"""
tree = cKDTree(grid_points)
| python | {
"resource": ""
} |
q23586 | find_nn_triangles_point | train | def find_nn_triangles_point(tri, cur_tri, point):
r"""Return the natural neighbors of a triangle containing a point.
This is based on the provided Delaunay Triangulation.
Parameters
----------
tri: Object
A Delaunay Triangulation
cur_tri: int
Simplex code for Delaunay Triangulation lookup of
a given triangle that contains 'position'.
point: (x, y)
Coordinates used to calculate distances to
simplexes in 'tri'.
Returns
-------
nn: (N, ) array
List of simplex codes for natural neighbor
triangles in 'tri'.
"""
nn = []
candidates = set(tri.neighbors[cur_tri])
# find | python | {
"resource": ""
} |
q23587 | find_local_boundary | train | def find_local_boundary(tri, triangles):
r"""Find and return the outside edges of a collection of natural neighbor triangles.
There is no guarantee that this boundary is convex, so ConvexHull is not
sufficient in some situations.
Parameters
----------
tri: Object
A Delaunay Triangulation
triangles: (N, ) array
List of natural neighbor triangles.
Returns
-------
edges: (2, N) ndarray
List of vertex codes that form outer edges of
a group of natural neighbor triangles.
"""
edges = []
for triangle in triangles:
| python | {
"resource": ""
} |
q23588 | area | train | def area(poly):
r"""Find the area of a given polygon using the shoelace algorithm.
Parameters
----------
poly: (2, N) ndarray
2-dimensional coordinates representing an ordered
traversal around the edge a polygon.
Returns
-------
area: float
| python | {
"resource": ""
} |
q23589 | order_edges | train | def order_edges(edges):
r"""Return an ordered traversal of the edges of a two-dimensional polygon.
Parameters
----------
edges: (2, N) ndarray
List of unordered line segments, where each
line segment is represented by two unique
vertex codes.
Returns
-------
ordered_edges: (2, N) ndarray
"""
edge = edges[0]
edges = edges[1:]
ordered_edges = [edge]
num_max = len(edges)
while len(edges) > 0 and num_max > 0:
match = edge[1]
for | python | {
"resource": ""
} |
q23590 | precipitable_water | train | def precipitable_water(dewpt, pressure, bottom=None, top=None):
r"""Calculate precipitable water through the depth of a sounding.
Formula used is:
.. math:: -\frac{1}{\rho_l g} \int\limits_{p_\text{bottom}}^{p_\text{top}} r dp
from [Salby1996]_, p. 28.
Parameters
----------
dewpt : `pint.Quantity`
Atmospheric dewpoint profile
pressure : `pint.Quantity`
Atmospheric pressure profile
bottom: `pint.Quantity`, optional
Bottom of the layer, specified in pressure. Defaults to None (highest pressure).
top: `pint.Quantity`, optional
The top of the layer, specified in pressure. Defaults to None (lowest pressure).
Returns
-------
`pint.Quantity`
The precipitable water in the layer
"""
# Sort pressure and dewpoint to be in decreasing pressure order (increasing height)
sort_inds = np.argsort(pressure)[::-1]
pressure | python | {
"resource": ""
} |
q23591 | mean_pressure_weighted | train | def mean_pressure_weighted(pressure, *args, **kwargs):
r"""Calculate pressure-weighted mean of an arbitrary variable through a layer.
Layer top and bottom specified in height or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
*args : `pint.Quantity`
Parameters for which the pressure-weighted mean is to be calculated.
heights : `pint.Quantity`, optional
Heights from sounding. Standard atmosphere heights assumed (if needed)
if no heights are given.
bottom: `pint.Quantity`, optional
The bottom of the layer in either the provided height coordinate
or in pressure. Don't provide in meters AGL unless the provided
height coordinate is meters AGL. Default is the first observation,
assumed to be the surface.
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa.
Returns
-------
`pint.Quantity`
u_mean: u-component of layer mean wind.
`pint.Quantity`
v_mean: v-component of layer mean wind.
"""
heights = kwargs.pop('heights', None)
bottom = kwargs.pop('bottom', None)
depth = kwargs.pop('depth', None)
ret = [] # Returned variable means | python | {
"resource": ""
} |
q23592 | bunkers_storm_motion | train | def bunkers_storm_motion(pressure, u, v, heights):
r"""Calculate the Bunkers right-mover and left-mover storm motions and sfc-6km mean flow.
Uses the storm motion calculation from [Bunkers2000]_.
Parameters
----------
pressure : array-like
Pressure from sounding
u : array-like
U component of the wind
v : array-like
V component of the wind
heights : array-like
Heights from sounding
Returns
-------
right_mover: `pint.Quantity`
U and v component of Bunkers RM storm motion
left_mover: `pint.Quantity`
U and v component of Bunkers LM storm motion
wind_mean: `pint.Quantity`
U and v component of sfc-6km mean flow
"""
# mean wind from sfc-6km
wind_mean = concatenate(mean_pressure_weighted(pressure, u, v, heights=heights,
| python | {
"resource": ""
} |
q23593 | bulk_shear | train | def bulk_shear(pressure, u, v, heights=None, bottom=None, depth=None):
r"""Calculate bulk shear through a layer.
Layer top and bottom specified in meters or pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
u : `pint.Quantity`
U-component of wind.
v : `pint.Quantity`
V-component of wind.
height : `pint.Quantity`, optional
Heights from sounding
depth: `pint.Quantity`, optional
The depth of the layer in meters or hPa. Defaults to 100 hPa.
bottom: `pint.Quantity`, optional
The bottom of the layer in height or pressure coordinates.
If using a height, it must be in the same coordinates as the given
heights (i.e., don't use meters AGL unless given heights
| python | {
"resource": ""
} |
q23594 | supercell_composite | train | def supercell_composite(mucape, effective_storm_helicity, effective_shear):
r"""Calculate the supercell composite parameter.
The supercell composite parameter is designed to identify
environments favorable for the development of supercells,
and is calculated using the formula developed by
[Thompson2004]_:
.. math:: \text{SCP} = \frac{\text{MUCAPE}}{1000 \text{J/kg}} *
\frac{\text{Effective SRH}}{50 \text{m}^2/\text{s}^2} *
\frac{\text{Effective Shear}}{20 \text{m/s}}
The effective_shear term is set to zero below 10 m/s and
capped at 1 when effective_shear exceeds 20 m/s.
Parameters
----------
mucape : `pint.Quantity`
Most-unstable CAPE
effective_storm_helicity : `pint.Quantity`
Effective-layer storm-relative helicity
effective_shear : | python | {
"resource": ""
} |
q23595 | critical_angle | train | def critical_angle(pressure, u, v, heights, stormu, stormv):
r"""Calculate the critical angle.
The critical angle is the angle between the 10m storm-relative inflow vector
and the 10m-500m shear vector. A critical angle near 90 degrees indicates
that a storm in this environment on the indicated storm motion vector
is likely ingesting purely streamwise vorticity into its updraft, and [Esterheld2008]_
showed that significantly tornadic supercells tend to occur in environments
with critical angles near 90 degrees.
Parameters
----------
pressure : `pint.Quantity`
Pressures from sounding.
u : `pint.Quantity`
U-component of sounding winds.
v : `pint.Quantity`
V-component of sounding winds.
heights : `pint.Quantity`
Heights from sounding.
stormu : `pint.Quantity`
U-component of storm motion.
stormv : `pint.Quantity`
V-component of storm motion.
Returns
-------
`pint.Quantity`
critical angle in degrees
"""
# Convert everything to m/s
u = u.to('m/s')
v = v.to('m/s')
stormu = stormu.to('m/s')
stormv = stormv.to('m/s')
sort_inds = np.argsort(pressure[::-1])
pressure = pressure[sort_inds]
| python | {
"resource": ""
} |
q23596 | broadcast_indices | train | def broadcast_indices(x, minv, ndim, axis):
"""Calculate index values to properly broadcast index array within data array.
See usage in interp.
"""
ret = []
for dim in range(ndim):
| python | {
"resource": ""
} |
q23597 | Registry.register | train | def register(self, name):
"""Register a callable with the registry under a particular name.
Parameters
----------
name : str
The name under which to register a function
Returns
-------
dec : callable
| python | {
"resource": ""
} |
q23598 | wind_speed | train | def wind_speed(u, v):
r"""Compute the wind speed from u and v-components.
Parameters
----------
u : array_like
Wind component in the X (East-West) direction
v : array_like
Wind component in the Y (North-South) direction
Returns
-------
| python | {
"resource": ""
} |
q23599 | wind_direction | train | def wind_direction(u, v):
r"""Compute the wind direction from u and v-components.
Parameters
----------
u : array_like
Wind component in the X (East-West) direction
v : array_like
Wind component in the Y (North-South) direction
Returns
-------
direction: `pint.Quantity`
The direction of the wind in interval [0, 360] degrees, specified as the direction from
which it is blowing, with 360 being North.
See Also
--------
wind_components
Notes
-----
In the case of calm winds (where `u` and `v` are zero), this function returns a direction
of 0.
"""
wdir = 90. * units.deg - | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.