_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q4900 | add_uniform_time_weights | train | def add_uniform_time_weights(ds):
"""Append uniform time weights to a Dataset.
All DataArrays with a time coordinate require a time weights coordinate.
For Datasets read in without a time bounds coordinate or explicit
time weights built in, aospy adds uniform time weights at each point
in the time coordinate.
Parameters
----------
ds : Dataset
Input data
Returns
-------
Dataset
"""
time = ds[TIME_STR]
unit_interval = | python | {
"resource": ""
} |
q4901 | _assert_has_data_for_time | train | def _assert_has_data_for_time(da, start_date, end_date):
"""Check to make sure data is in Dataset for the given time range.
Parameters
----------
da : DataArray
DataArray with a time variable
start_date : datetime-like object or str
start date
end_date : datetime-like object or str
end date
Raises
------
AssertionError
If the time range is not within the time range of the DataArray
"""
if isinstance(start_date, str) and isinstance(end_date, str):
logging.warning(
'When using strings to specify start and end dates, the check '
'to determine if data exists for the full extent of the desired '
'interval is not implemented. Therefore it is possible that '
'you are doing a calculation for a lesser interval than you '
'specified. If you would like this check to occur, use explicit '
'datetime-like objects for bounds instead.')
return
if RAW_START_DATE_STR in da.coords:
with warnings.catch_warnings(record=True):
da_start = da[RAW_START_DATE_STR].values
da_end = da[RAW_END_DATE_STR].values
else:
times = | python | {
"resource": ""
} |
q4902 | sel_time | train | def sel_time(da, start_date, end_date):
"""Subset a DataArray or Dataset for a given date range.
Ensures that data are present for full extent of requested range.
Appends start and end date of the subset to the DataArray.
Parameters
----------
da : DataArray or Dataset
data to subset
start_date : np.datetime64
start of date interval
end_date : np.datetime64
end of date interval
Returns
----------
da : DataArray or Dataset
subsetted data
Raises
------
AssertionError
| python | {
"resource": ""
} |
q4903 | assert_matching_time_coord | train | def assert_matching_time_coord(arr1, arr2):
"""Check to see if two DataArrays have the same time coordinate.
Parameters
----------
arr1 : DataArray or Dataset
First DataArray or Dataset
arr2 : DataArray or Dataset
Second DataArray or Dataset
Raises
------
ValueError
| python | {
"resource": ""
} |
q4904 | ensure_time_as_index | train | def ensure_time_as_index(ds):
"""Ensures that time is an indexed coordinate on relevant quantites.
Sometimes when the data we load from disk has only one timestep, the
indexing of time-defined quantities in the resulting xarray.Dataset gets
messed up, in that the time bounds array and data variables don't get
indexed by time, even though they should. Therefore, we need this helper
function to (possibly) correct this.
Note that this must be applied before CF-conventions are decoded; otherwise
it casts ``np.datetime64[ns]`` as ``int`` values.
Parameters
----------
ds : Dataset
Dataset with a time coordinate
Returns
-------
Dataset
"""
time_indexed_coords = {TIME_WEIGHTS_STR, TIME_BOUNDS_STR}
| python | {
"resource": ""
} |
q4905 | infer_year | train | def infer_year(date):
"""Given a datetime-like object or string infer the year.
Parameters
----------
date : datetime-like object or str
Input date
Returns
-------
int
Examples
--------
>>> infer_year('2000')
2000
>>> infer_year('2000-01')
2000
>>> infer_year('2000-01-31')
2000
>>> infer_year(datetime.datetime(2000, 1, 1))
2000
>>> infer_year(np.datetime64('2000-01-01'))
2000
>>> infer_year(DatetimeNoLeap(2000, 1, 1))
2000
>>>
"""
if isinstance(date, str):
# Look for a string that begins with four numbers; the first four
# numbers found are the year. | python | {
"resource": ""
} |
q4906 | maybe_convert_to_index_date_type | train | def maybe_convert_to_index_date_type(index, date):
"""Convert a datetime-like object to the index's date type.
Datetime indexing in xarray can be done using either a pandas
DatetimeIndex or a CFTimeIndex. Both support partial-datetime string
indexing regardless of the calendar type of the underlying data;
therefore if a string is passed as a date, we return it unchanged. If a
datetime-like object is provided, it will be converted to the underlying
date type of the index. For a DatetimeIndex that is np.datetime64; for a
CFTimeIndex that is an object of type cftime.datetime specific to the
calendar used.
Parameters
----------
index : pd.Index
Input time index
date : datetime-like object or str
Input datetime
Returns
-------
date of the type appropriate for the time index of the Dataset
"""
if isinstance(date, str):
return date
if isinstance(index, pd.DatetimeIndex):
if isinstance(date, np.datetime64):
return date
else:
return np.datetime64(str(date))
else:
| python | {
"resource": ""
} |
q4907 | Region._make_mask | train | def _make_mask(self, data, lon_str=LON_STR, lat_str=LAT_STR):
"""Construct the mask that defines a region on a given data's grid."""
mask = False
for west, east, south, north in self.mask_bounds:
if west < east:
mask_lon = (data[lon_str] > west) & (data[lon_str] < east)
else:
mask_lon = (data[lon_str] | python | {
"resource": ""
} |
q4908 | Region.mask_var | train | def mask_var(self, data, lon_cyclic=True, lon_str=LON_STR,
lat_str=LAT_STR):
"""Mask the given data outside this region.
Parameters
----------
data : xarray.DataArray
The array to be regionally masked.
lon_cyclic : bool, optional (default True)
Whether or not the longitudes of ``data`` span the whole globe,
meaning that they should be wrapped around as necessary to cover
the Region's full width.
lon_str, lat_str : str, optional
The names of the longitude and latitude dimensions, respectively,
| python | {
"resource": ""
} |
q4909 | Region.ts | train | def ts(self, data, lon_cyclic=True, lon_str=LON_STR, lat_str=LAT_STR,
land_mask_str=LAND_MASK_STR, sfc_area_str=SFC_AREA_STR):
"""Create yearly time-series of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to create the regional timeseries of
lon_cyclic : { None, True, False }, optional (default True)
Whether or not the longitudes of ``data`` span the whole globe,
meaning that they should be wrapped around as necessary to cover
the Region's full width.
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
xarray.DataArray
The timeseries of values averaged within the region and within each
year, one value per year.
"""
data_masked = self.mask_var(data, lon_cyclic=lon_cyclic,
| python | {
"resource": ""
} |
q4910 | Region.av | train | def av(self, data, lon_str=LON_STR, lat_str=LAT_STR,
land_mask_str=LAND_MASK_STR, sfc_area_str=SFC_AREA_STR):
"""Time-average of region-averaged data.
Parameters
----------
data : xarray.DataArray
The array to compute the regional time-average of
lat_str, lon_str, land_mask_str, sfc_area_str : str, optional
The name of the latitude, longitude, land mask, and surface area
coordinates, respectively, in ``data``. Defaults are the
corresponding values in ``aospy.internal_names``.
Returns
-------
| python | {
"resource": ""
} |
q4911 | _rename_coords | train | def _rename_coords(ds, attrs):
"""Rename coordinates to aospy's internal names."""
for name_int, names_ext in attrs.items():
# Check if coord is in dataset already.
ds_coord_name = set(names_ext).intersection(set(ds.coords))
if ds_coord_name:
# Rename to the aospy internal name.
try:
ds = ds.rename({list(ds_coord_name)[0]: name_int})
logging.debug("Rename coord from `{0}` to `{1}` for "
| python | {
"resource": ""
} |
q4912 | _bounds_from_array | train | def _bounds_from_array(arr, dim_name, bounds_name):
"""Get the bounds of an array given its center values.
E.g. if lat-lon grid center lat/lon values are known, but not the
bounds of each grid box. The algorithm assumes that the bounds
are simply halfway between each pair of center values.
"""
# TODO: don't assume needed dimension is in axis=0
# TODO: refactor to get rid of | python | {
"resource": ""
} |
q4913 | _diff_bounds | train | def _diff_bounds(bounds, coord):
"""Get grid spacing by subtracting upper and lower bounds."""
try:
return bounds[:, 1] - bounds[:, 0]
except IndexError:
| python | {
"resource": ""
} |
q4914 | _grid_sfc_area | train | def _grid_sfc_area(lon, lat, lon_bounds=None, lat_bounds=None):
"""Calculate surface area of each grid cell in a lon-lat grid."""
# Compute the bounds if not given.
if lon_bounds is None:
lon_bounds = _bounds_from_array(
lon, internal_names.LON_STR, internal_names.LON_BOUNDS_STR)
if lat_bounds is None:
lat_bounds = _bounds_from_array(
lat, internal_names.LAT_STR, internal_names.LAT_BOUNDS_STR)
# Compute the surface area.
dlon = _diff_bounds(utils.vertcoord.to_radians(lon_bounds, is_delta=True),
| python | {
"resource": ""
} |
q4915 | Model._get_grid_files | train | def _get_grid_files(self):
"""Get the files holding grid data for an aospy object."""
grid_file_paths = self.grid_file_paths
datasets = []
if isinstance(grid_file_paths, str):
grid_file_paths = [grid_file_paths]
for path in grid_file_paths:
| python | {
"resource": ""
} |
q4916 | Model._set_mult_grid_attr | train | def _set_mult_grid_attr(self):
"""
Set multiple attrs from grid file given their names in the grid file.
"""
grid_objs = self._get_grid_files()
if self.grid_attrs is None:
self.grid_attrs = {}
# Override GRID_ATTRS with entries in grid_attrs
attrs = internal_names.GRID_ATTRS.copy()
for k, v in self.grid_attrs.items():
if k not in attrs:
raise ValueError(
'Unrecognized internal name, {!r}, specified for a '
'custom grid attribute name. See the full list of '
'valid internal names below:\n\n{}'.format(
k, list(internal_names.GRID_ATTRS.keys())))
attrs[k] = (v, )
for name_int, names_ext in attrs.items():
for name in names_ext:
grid_attr = _get_grid_attr(grid_objs, name)
| python | {
"resource": ""
} |
q4917 | Model.set_grid_data | train | def set_grid_data(self):
"""Populate the attrs that hold grid data."""
if self._grid_data_is_set:
return
self._set_mult_grid_attr()
if not np.any(getattr(self, 'sfc_area', None)):
try:
sfc_area = _grid_sfc_area(self.lon, self.lat, self.lon_bounds,
self.lat_bounds)
except AttributeError:
| python | {
"resource": ""
} |
q4918 | _other_to_lon | train | def _other_to_lon(func):
"""Wrapper for casting Longitude operator arguments to Longitude"""
| python | {
"resource": ""
} |
q4919 | _get_attr_by_tag | train | def _get_attr_by_tag(obj, tag, attr_name):
"""Get attribute from an object via a string tag.
Parameters
----------
obj : object from which to get the attribute
attr_name : str
Unmodified name of the attribute to be found. The actual attribute
that is returned may be modified be 'tag'.
tag : str
Tag | python | {
"resource": ""
} |
q4920 | _get_all_objs_of_type | train | def _get_all_objs_of_type(type_, parent):
"""Get all attributes of the given type from the given object.
Parameters
----------
type_ : The desired type
parent : The object from which to get the attributes with type matching
'type_'
Returns
-------
| python | {
"resource": ""
} |
q4921 | _prune_invalid_time_reductions | train | def _prune_invalid_time_reductions(spec):
"""Prune time reductions of spec with no time dimension."""
valid_reductions = []
if not spec['var'].def_time and spec['dtype_out_time'] is not None:
for reduction in spec['dtype_out_time']:
if reduction not in _TIME_DEFINED_REDUCTIONS:
valid_reductions.append(reduction)
else:
msg = ("Var {0} has no time dimension "
| python | {
"resource": ""
} |
q4922 | _compute_or_skip_on_error | train | def _compute_or_skip_on_error(calc, compute_kwargs):
"""Execute the Calc, catching and logging exceptions, but don't re-raise.
Prevents one failed calculation from stopping a larger requested set
of calculations.
"""
try:
return calc.compute(**compute_kwargs)
except Exception:
| python | {
"resource": ""
} |
q4923 | _submit_calcs_on_client | train | def _submit_calcs_on_client(calcs, client, func):
"""Submit calculations via dask.bag and a distributed client"""
logging.info('Connected to client: {}'.format(client))
| python | {
"resource": ""
} |
q4924 | _exec_calcs | train | def _exec_calcs(calcs, parallelize=False, client=None, **compute_kwargs):
"""Execute the given calculations.
Parameters
----------
calcs : Sequence of ``aospy.Calc`` objects
parallelize : bool, default False
Whether to submit the calculations in parallel or not
client : distributed.Client or None
The distributed Client used if parallelize is set to True; if None
a distributed LocalCluster is used.
compute_kwargs : dict of keyword arguments passed to ``Calc.compute``
Returns
-------
A list of the values returned by each Calc object that was executed.
"""
| python | {
"resource": ""
} |
q4925 | submit_mult_calcs | train | def submit_mult_calcs(calc_suite_specs, exec_options=None):
"""Generate and execute all specified computations.
Once the calculations are prepped and submitted for execution, any
calculation that triggers any exception or error is skipped, and the rest
of the calculations proceed unaffected. This prevents an error in a single
calculation from crashing a large suite of calculations.
Parameters
----------
calc_suite_specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed. Accepted keys and their values:
library : module or package comprising an aospy object library
The aospy object library for these calculations.
projects : list of aospy.Proj objects
The projects to permute over.
models : 'all', 'default', or list of aospy.Model objects
The models to permute over. If 'all', use all models in the
``models`` attribute of each ``Proj``. If 'default', use all
models in the ``default_models`` attribute of each ``Proj``.
runs : 'all', 'default', or list of aospy.Run objects
The runs to permute over. If 'all', use all runs in the
``runs`` attribute of each ``Model``. If 'default', use all
runs in the ``default_runs`` attribute of each ``Model``.
variables : list of aospy.Var objects
The variables to be calculated.
regions : 'all' or list of aospy.Region objects
The region(s) over which any regional reductions will be performed.
If 'all', use all regions in the ``regions`` attribute of each
``Proj``.
date_ranges : 'default' or a list of tuples
The range of dates (inclusive) over which to perform calculations.
If 'default', use the ``default_start_date`` and
``default_end_date`` attribute of each ``Run``. Else provide a
list of tuples, each containing a pair of start and end dates,
such as ``date_ranges=[(start, end)]`` where ``start`` and
``end`` are each ``datetime.datetime`` objects, partial
datetime strings (e.g. '0001'), ``np.datetime64`` objects, or
``cftime.datetime`` objects.
output_time_intervals : {'ann', season-string, month-integer}
The sub-annual time interval over which to aggregate.
- 'ann' : Annual mean
- season-string : E.g. 'JJA' for June-July-August
- month-integer : 1 for January, 2 for February, etc. Each one is
a separate reduction, e.g. [1, 2] would produce averages (or
other specified time reduction) over all Januaries, and
separately over all Februaries.
output_time_regional_reductions : list of reduction string identifiers
Unlike most other keys, these are not permuted over when creating
the :py:class:`aospy.Calc` objects that execute the calculations;
each :py:class:`aospy.Calc` performs all of the specified
reductions. Accepted string identifiers are:
- Gridpoint-by-gridpoint output:
- 'av' : Gridpoint-by-gridpoint time-average
- 'std' : Gridpoint-by-gridpoint temporal standard deviation
- 'ts' : Gridpoint-by-gridpoint time-series
- Averages over each region specified via `region`:
- 'reg.av', 'reg.std', 'reg.ts' : analogous to 'av', 'std', 'ts'
output_vertical_reductions : {None, 'vert_av', 'vert_int'}, optional
How to reduce the data vertically:
- None : no vertical reduction
- 'vert_av' : mass-weighted vertical average
- 'vert_int' : mass-weighted vertical integral
input_time_intervals : {'annual', 'monthly', 'daily', '#hr'}
A string specifying the time resolution of the input data. In
'#hr' above, the '#' stands for a number, e.g. 3hr or 6hr, for
sub-daily output. These are the suggested specifiers, but others
may be used if they are also used by the DataLoaders for the given
Runs.
input_time_datatypes : {'inst', 'ts', 'av'}
What the time axis of the input data represents:
- 'inst' : Timeseries of instantaneous values
- 'ts' : Timeseries of averages over the period of each time-index
- 'av' : A single value averaged over a date range
input_vertical_datatypes : {False, 'pressure', 'sigma'}, optional
The vertical coordinate system used by the input data:
- False : not defined vertically
- 'pressure' : pressure coordinates
- 'sigma' : hybrid sigma-pressure coordinates
input_time_offsets : {None, dict}, optional
| python | {
"resource": ""
} |
q4926 | CalcSuite._get_requested_spec | train | def _get_requested_spec(self, obj, spec_name):
"""Helper to translate user specifications to needed objects."""
requested = self._specs_in[spec_name]
if isinstance(requested, str):
| python | {
"resource": ""
} |
q4927 | CalcSuite._permute_core_specs | train | def _permute_core_specs(self):
"""Generate all requested combinations of the core objects."""
obj_trees = []
projects = self._get_requested_spec(self._obj_lib, _PROJECTS_STR)
for project in projects:
models = self._get_requested_spec(project, _MODELS_STR)
for model in models:
runs = self._get_requested_spec(model, _RUNS_STR)
for run in runs:
| python | {
"resource": ""
} |
q4928 | CalcSuite._get_regions | train | def _get_regions(self):
"""Get the requested regions."""
if self._specs_in[_REGIONS_STR] == 'all':
return [_get_all_objs_of_type(
| python | {
"resource": ""
} |
q4929 | CalcSuite._get_variables | train | def _get_variables(self):
"""Get the requested variables."""
if self._specs_in[_VARIABLES_STR] == 'all':
return _get_all_objs_of_type(
| python | {
"resource": ""
} |
q4930 | CalcSuite._get_aux_specs | train | def _get_aux_specs(self):
"""Get and pre-process all of the non-core specifications."""
# Drop the "core" specifications, which are handled separately.
specs = self._specs_in.copy()
[specs.pop(core) for core in self._CORE_SPEC_NAMES]
specs[_REGIONS_STR] | python | {
"resource": ""
} |
q4931 | CalcSuite._permute_aux_specs | train | def _permute_aux_specs(self):
"""Generate all permutations of the non-core specifications."""
# Convert to attr names that Calc is expecting.
calc_aux_mapping = self._NAMES_SUITE_TO_CALC.copy()
# Special case: manually add 'library' to mapping
calc_aux_mapping[_OBJ_LIB_STR] = None
[calc_aux_mapping.pop(core) for core in self._CORE_SPEC_NAMES]
| python | {
"resource": ""
} |
q4932 | CalcSuite._combine_core_aux_specs | train | def _combine_core_aux_specs(self):
"""Combine permutations over core and auxilliary Calc specs."""
all_specs = []
for core_dict in self._permute_core_specs():
| python | {
"resource": ""
} |
q4933 | CalcSuite.create_calcs | train | def create_calcs(self):
"""Generate a Calc object for each requested parameter combination."""
specs = self._combine_core_aux_specs()
for spec in specs:
| python | {
"resource": ""
} |
q4934 | data_in_label | train | def data_in_label(intvl_in, dtype_in_time, dtype_in_vert=False):
"""Create string label specifying the input data of a calculation."""
intvl_lbl = intvl_in
time_lbl = dtype_in_time
lbl = '_'.join(['from', intvl_lbl, time_lbl]).replace('__', '_')
| python | {
"resource": ""
} |
q4935 | data_name_gfdl | train | def data_name_gfdl(name, domain, data_type, intvl_type, data_yr,
intvl, data_in_start_yr, data_in_dur):
"""Determine the filename of GFDL model data output."""
# Determine starting year of netCDF file to be accessed.
extra_yrs = (data_yr - data_in_start_yr) % data_in_dur
data_in_yr = data_yr - extra_yrs
# Determine file name. Two cases: time series (ts) or time-averaged (av).
if data_type in ('ts', 'inst'):
if intvl_type == 'annual':
if data_in_dur == 1:
filename = '.'.join([domain, '{:04d}'.format(data_in_yr),
name, 'nc'])
else:
filename = '.'.join([domain, '{:04d}-{:04d}'.format(
data_in_yr, data_in_yr + data_in_dur - 1
), name, 'nc'])
elif intvl_type == 'monthly':
filename = (domain + '.{:04d}'.format(data_in_yr) + '01-' +
| python | {
"resource": ""
} |
q4936 | dmget | train | def dmget(files_list):
"""Call GFDL command 'dmget' to access archived files."""
if isinstance(files_list, str):
files_list = [files_list]
archive_files = []
for f in files_list:
if f.startswith('/archive'):
archive_files.append(f)
| python | {
"resource": ""
} |
q4937 | _replace_pressure | train | def _replace_pressure(arguments, dtype_in_vert):
"""Replace p and dp Vars with appropriate Var objects specific to
the dtype_in_vert."""
arguments_out = []
for arg in arguments:
if isinstance(arg, Var):
| python | {
"resource": ""
} |
q4938 | _add_metadata_as_attrs | train | def _add_metadata_as_attrs(data, units, description, dtype_out_vert):
"""Add metadata attributes to Dataset or DataArray"""
if isinstance(data, xr.DataArray):
return _add_metadata_as_attrs_da(data, units, description,
| python | {
"resource": ""
} |
q4939 | _add_metadata_as_attrs_da | train | def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):
"""Add metadata attributes to DataArray"""
if dtype_out_vert == 'vert_int':
if units != '':
units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)
else:
| python | {
"resource": ""
} |
q4940 | Calc._dir_out | train | def _dir_out(self):
"""Create string of the data directory to save individual .nc files."""
return | python | {
"resource": ""
} |
q4941 | Calc._dir_tar_out | train | def _dir_tar_out(self):
"""Create string of the data directory to store a tar file."""
return | python | {
"resource": ""
} |
q4942 | Calc._file_name | train | def _file_name(self, dtype_out_time, extension='nc'):
"""Create the name of the aospy file."""
if dtype_out_time is None:
dtype_out_time = ''
out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,
dtype_vert=self.dtype_out_vert)
in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,
self.dtype_in_vert)
start_year = utils.times.infer_year(self.start_date)
end_year | python | {
"resource": ""
} |
q4943 | Calc._print_verbose | train | def _print_verbose(*args):
"""Print diagnostic message."""
try:
return '{0} {1} ({2})'.format(args[0], args[1], ctime())
| python | {
"resource": ""
} |
q4944 | Calc._to_desired_dates | train | def _to_desired_dates(self, arr):
"""Restrict the xarray DataArray or Dataset to the desired months."""
times = utils.times.extract_months(
| python | {
"resource": ""
} |
q4945 | Calc._add_grid_attributes | train | def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and (model_attr is not None):
# Force coords to have desired name.
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = ("Values for '{0}' are nearly (but not exactly) "
"the same in the Run {1} and the Model {2}. "
"Therefore replacing Run's values with the "
"model's.".format(name_int, self.run,
self.model))
logging.info(msg)
ds[name_int].values = model_attr.values
else:
msg = ("Model coordinates for '{0}' do not match those"
| python | {
"resource": ""
} |
q4946 | Calc._get_input_data | train | def _get_input_data(self, var, start_date, end_date):
"""Get the data for a single variable over the desired date range."""
logging.info(self._print_verbose("Getting input data:", var))
if isinstance(var, (float, int)):
return var
else:
cond_pfull = ((not hasattr(self, internal_names.PFULL_STR))
and var.def_vert and
self.dtype_in_vert == internal_names.ETA_STR)
data = self.data_loader.recursively_compute_variable(
var, start_date, end_date, self.time_offset, self.model,
**self.data_loader_attrs)
name = data.name
data = self._add_grid_attributes(data.to_dataset(name=data.name))
data = data[name]
if cond_pfull:
try:
self.pfull_coord = data[internal_names.PFULL_STR]
except KeyError:
pass
| python | {
"resource": ""
} |
q4947 | Calc._get_all_data | train | def _get_all_data(self, start_date, end_date):
"""Get the needed data from all of the vars in the calculation."""
return [self._get_input_data(var, start_date, end_date)
| python | {
"resource": ""
} |
q4948 | Calc._compute | train | def _compute(self, data):
"""Perform the calculation."""
local_ts = self._local_ts(*data)
dt = | python | {
"resource": ""
} |
q4949 | Calc._compute_full_ts | train | def _compute_full_ts(self, data):
"""Perform calculation and create yearly timeseries at each point."""
# Get results at each desired timestep and spatial point.
full_ts, dt = self._compute(data)
# Vertically integrate.
vert_types = ('vert_int', 'vert_av')
if self.dtype_out_vert in vert_types and self.var.def_vert:
dp = self._get_input_data(_DP_VARS[self.dtype_in_vert],
self.start_date, self.end_date)
full_ts = | python | {
"resource": ""
} |
q4950 | Calc._full_to_yearly_ts | train | def _full_to_yearly_ts(self, arr, dt):
"""Average the full timeseries within each year."""
time_defined = | python | {
"resource": ""
} |
q4951 | Calc._time_reduce | train | def _time_reduce(self, arr, reduction):
"""Perform the specified time reduction on a local time-series."""
if self.dtype_in_time == 'av' or not self.def_time:
return arr
reductions = {
'ts': lambda xarr: xarr,
| python | {
"resource": ""
} |
q4952 | Calc.region_calcs | train | def region_calcs(self, arr, func):
"""Perform a calculation for all regions."""
# Get pressure values for data output on hybrid vertical coordinates.
bool_pfull = (self.def_vert and self.dtype_in_vert ==
internal_names.ETA_STR and self.dtype_out_vert is False)
if bool_pfull:
pfull_data = self._get_input_data(_P_VARS[self.dtype_in_vert],
self.start_date,
self.end_date)
pfull = self._full_to_yearly_ts(
pfull_data, arr[internal_names.TIME_WEIGHTS_STR]
).rename('pressure')
# Loop over the regions, performing the calculation.
reg_dat = {}
for reg in self.region:
# Just pass along the data if averaged already.
if 'av' in self.dtype_in_time:
data_out = reg.ts(arr)
# Otherwise perform the calculation.
else:
| python | {
"resource": ""
} |
q4953 | Calc._apply_all_time_reductions | train | def _apply_all_time_reductions(self, data):
"""Apply all requested time reductions to the data."""
logging.info(self._print_verbose("Applying desired time-"
"reduction methods."))
reduc_specs = [r.split('.') for r in self.dtype_out_time]
reduced = {}
for reduc, specs in zip(self.dtype_out_time, reduc_specs):
func = specs[-1]
if 'reg' in specs:
| python | {
"resource": ""
} |
q4954 | Calc.compute | train | def compute(self, write_to_tar=True):
"""Perform all desired calculations on the data and save externally."""
data = self._get_all_data(self.start_date, self.end_date)
logging.info('Computing timeseries for {0} -- '
'{1}.'.format(self.start_date, self.end_date))
full, full_dt = self._compute_full_ts(data)
full_out = self._full_to_yearly_ts(full, full_dt)
reduced = self._apply_all_time_reductions(full_out)
logging.info("Writing desired gridded outputs to disk.")
for | python | {
"resource": ""
} |
q4955 | Calc._save_files | train | def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
| python | {
"resource": ""
} |
q4956 | Calc._write_to_tar | train | def _write_to_tar(self, dtype_out_time):
"""Add the data to the tar file in tar_out_direc."""
# When submitted in parallel and the directory does not exist yet
# multiple processes may try to create a new directory; this leads
# to an OSError for all processes that tried to make the
# directory, but were later than the first.
try:
os.makedirs(self.dir_tar_out)
except OSError:
pass
# tarfile 'append' mode won't overwrite the old file, which we want.
# So open in 'read' mode, extract the file, and then delete it.
# But 'read' mode throws OSError if file doesn't exist: make it first.
utils.io.dmget([self.path_tar_out])
with tarfile.open(self.path_tar_out, 'a') as tar:
pass
with tarfile.open(self.path_tar_out, 'r') as tar:
old_data_path = os.path.join(self.dir_tar_out,
self.file_name[dtype_out_time])
try:
tar.extract(self.file_name[dtype_out_time],
path=old_data_path)
except KeyError:
pass
else:
# The os module treats files on archive as non-empty
# directories, so can't use os.remove or os.rmdir.
shutil.rmtree(old_data_path)
retcode = subprocess.call([
"tar", "--delete", "--file={}".format(self.path_tar_out),
self.file_name[dtype_out_time]
])
if | python | {
"resource": ""
} |
q4957 | Calc._update_data_out | train | def _update_data_out(self, data, dtype):
"""Append the data of the given dtype_out to the data_out attr."""
try:
| python | {
"resource": ""
} |
q4958 | Calc.save | train | def save(self, data, dtype_out_time, dtype_out_vert=False,
save_files=True, write_to_tar=False):
"""Save aospy data to data_out attr and to an external file."""
self._update_data_out(data, dtype_out_time)
if save_files:
self._save_files(data, dtype_out_time)
if write_to_tar | python | {
"resource": ""
} |
q4959 | Calc._load_from_disk | train | def _load_from_disk(self, dtype_out_time, dtype_out_vert=False,
region=False):
"""Load aospy data saved as netcdf files on the file system."""
ds = xr.open_dataset(self.path_out[dtype_out_time])
if region:
arr = ds[region.name]
# Use region-specific pressure values if available.
if (self.dtype_in_vert == internal_names.ETA_STR
and not dtype_out_vert):
reg_pfull_str = region.name + '_pressure'
arr = arr.drop([r for r in arr.coords.iterkeys()
if r not in (internal_names.PFULL_STR,
reg_pfull_str)])
# Rename pfull to pfull_ref always.
| python | {
"resource": ""
} |
q4960 | Calc._load_from_tar | train | def _load_from_tar(self, dtype_out_time, dtype_out_vert=False):
"""Load data save in tarball form on the file system."""
path = os.path.join(self.dir_tar_out, 'data.tar')
utils.io.dmget([path])
with tarfile.open(path, 'r') as data_tar:
ds | python | {
"resource": ""
} |
q4961 | Calc.load | train | def load(self, dtype_out_time, dtype_out_vert=False, region=False,
plot_units=False, mask_unphysical=False):
"""Load the data from the object if possible or from disk."""
msg = ("Loading data from disk for object={0}, dtype_out_time={1}, "
"dtype_out_vert={2}, and region="
"{3}".format(self, dtype_out_time, dtype_out_vert, region))
logging.info(msg + ' ({})'.format(ctime()))
# Grab from the object if its there.
try:
data = self.data_out[dtype_out_time]
except (AttributeError, KeyError):
# Otherwise get from disk. Try scratch first, then archive.
try:
data = self._load_from_disk(dtype_out_time, dtype_out_vert,
region=region)
except IOError:
data = | python | {
"resource": ""
} |
q4962 | conv_precip_frac | train | def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
| python | {
"resource": ""
} |
q4963 | dumps | train | def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
with lock:
if float_bits == 32:
encode_func[float] = encode_float32
elif float_bits == 64:
encode_func[float] = encode_float64
| python | {
"resource": ""
} |
q4964 | Brain.stop_batch_learning | train | def stop_batch_learning(self):
"""Finish a series of batch learn operations."""
self._learning = False
self.graph.commit()
| python | {
"resource": ""
} |
q4965 | Brain.learn | train | def learn(self, text):
"""Learn a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
| python | {
"resource": ""
} |
q4966 | Brain._to_graph | train | def _to_graph(self, contexts):
"""This is an iterator that returns each edge of our graph
with its two nodes"""
prev = None
for context in contexts:
if prev is None:
| python | {
"resource": ""
} |
q4967 | Brain.reply | train | def reply(self, text, loop_ms=500, max_len=None):
"""Reply to a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
text = text.decode("utf-8", "ignore")
tokens = self.tokenizer.split(text)
input_ids = map(self.graph.get_token_by_text, tokens)
# filter out unknown words and non-words from the potential pivots
pivot_set = self._filter_pivots(input_ids)
# Conflate the known ids with the stems of their words
if self.stemmer is not None:
self._conflate_stems(pivot_set, tokens)
# If we didn't recognize any word tokens in the input, pick
# something random from the database and babble.
if len(pivot_set) == 0:
pivot_set = self._babble()
score_cache = {}
best_score = -1.0
best_reply = None
# Loop for approximately loop_ms milliseconds. This can either
# take more (if the first reply takes a long time to generate)
# or less (if the _generate_replies search ends early) time,
# but it should stay roughly accurate.
start = time.time()
end = start + loop_ms * 0.001
count = 0
all_replies = []
_start = time.time()
for edges, pivot_node in self._generate_replies(pivot_set):
reply = Reply(self.graph, tokens, input_ids, pivot_node, edges)
if max_len and self._too_long(max_len, reply):
continue
key = reply.edge_ids
if key not in score_cache:
with trace_us("Brain.evaluate_reply_us"):
score = self.scorer.score(reply)
score_cache[key] = score
else:
# skip scoring, we've already seen this reply
score = -1
if score > best_score:
best_reply = reply
best_score = score
# dump all replies to the console if debugging is enabled
| python | {
"resource": ""
} |
q4968 | Brain.init | train | def init(filename, order=3, tokenizer=None):
"""Initialize a brain. This brain's file must not already exist.
Keyword arguments:
order -- Order of the forward/reverse Markov chains (integer)
tokenizer -- One of Cobe, MegaHAL (default Cobe). See documentation
for cobe.tokenizers for details. (string)"""
log.info("Initializing a cobe brain: %s" % filename)
if tokenizer is None:
tokenizer = "Cobe"
if tokenizer not in ("Cobe", "MegaHAL"):
| python | {
"resource": ""
} |
q4969 | PulseExtStreamRestoreInfo.struct_from_value | train | def struct_from_value( cls, name, volume,
channel_list=None, mute=False, device=None ):
'Same arguments as with class instance init.'
chan_map = c.PA_CHANNEL_MAP()
if not channel_list: c.pa.channel_map_init_mono(chan_map)
else:
if not is_str(channel_list):
channel_list = b','.join(map(c.force_bytes, channel_list))
c.pa.channel_map_parse(chan_map, channel_list)
if not isinstance(volume, PulseVolumeInfo): | python | {
"resource": ""
} |
q4970 | Pulse.connect | train | def connect(self, autospawn=False, wait=False):
'''Connect to pulseaudio server.
"autospawn" option will start new pulse daemon, if necessary.
Specifying "wait" option will make function block until pulseaudio server appears.'''
if self._loop_closed:
raise PulseError('Eventloop object was already'
' destroyed and cannot be reused from this instance.')
if self.connected is not None: self._ctx_init()
flags, self.connected = 0, None
if not autospawn: flags |= c.PA_CONTEXT_NOAUTOSPAWN
if wait: flags |= | python | {
"resource": ""
} |
q4971 | Pulse.stream_restore_delete | train | def stream_restore_delete(obj_name_or_list):
'''Can be passed string name,
PulseExtStreamRestoreInfo object or a list of any of these.'''
if is_str(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
name_list = list((obj.name if isinstance( obj,
PulseExtStreamRestoreInfo | python | {
"resource": ""
} |
q4972 | Pulse.default_set | train | def default_set(self, obj):
'Set passed sink or source to be used as default one by pulseaudio server.'
assert_pulse_object(obj)
method = {
PulseSinkInfo: | python | {
"resource": ""
} |
q4973 | MLink.update | train | def update(uid, post_data):
'''
Updat the link.
'''
entry = TabLink.update(
name=post_data['name'],
link=post_data['link'],
order=post_data['order'],
logo=post_data['logo'] if 'logo' in post_data else '',
| python | {
"resource": ""
} |
q4974 | MLink.create_link | train | def create_link(id_link, post_data):
'''
Add record in link.
'''
if MLink.get_by_uid(id_link):
return False
try:
the_order = int(post_data['order'])
except:
the_order = 999
TabLink.create(name=post_data['name'],
link=post_data['link'],
| python | {
"resource": ""
} |
q4975 | WikiHandler.recent | train | def recent(self):
'''
List recent wiki.
'''
kwd = {
'pager': '',
'title': 'Recent Pages',
}
self.render('wiki_page/wiki_list.html',
view=MWiki.query_recent(),
| python | {
"resource": ""
} |
q4976 | WikiHandler.view_or_add | train | def view_or_add(self, title):
'''
To judge if there is a post of the title.
Then, to show, or to add.
'''
postinfo = MWiki.get_by_wiki(title)
if postinfo:
if postinfo.kind == self.kind:
| python | {
"resource": ""
} |
q4977 | WikiHandler.update | train | def update(self, uid):
'''
Update the wiki.
'''
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
| python | {
"resource": ""
} |
q4978 | WikiHandler.view | train | def view(self, view):
'''
View the wiki.
'''
kwd = {
'pager': '',
'editable': self.editable(),
| python | {
"resource": ""
} |
q4979 | FilterHandler.echo_html | train | def echo_html(self, url_str):
'''
Show the HTML
'''
logger.info('info echo html: {0}'.format(url_str))
condition = self.gen_redis_kw()
url_arr = self.parse_url(url_str)
sig = url_arr[0]
num = (len(url_arr) - 2) // 2
catinfo = MCategory.get_by_uid(sig)
if catinfo.pid == '0000':
condition['def_cat_pid'] = sig
else:
condition['def_cat_uid'] = sig
fenye_num = 1
for idx in range(num):
ckey = url_arr[idx * 2 + 2]
tval = url_arr[idx * 2 + 3]
if tval == '0':
continue
if ckey == 'fenye':
# 分页参数。单独处理。
fenye_num = int(tval)
continue
else:
cval | python | {
"resource": ""
} |
q4980 | gen_array_crud | train | def gen_array_crud():
'''
Return the dictionay of the switcher form XLXS file.
if valud of the column of the row is `1`, it will be added to the array.
'''
if WORK_BOOK:
pass
else:
return False
papa_id = 0
switch_dics = {}
kind_dics = {}
for work_sheet in WORK_BOOK:
kind_sig = str(work_sheet['A1'].value).strip()
# the number of the categories in a website won't greater than 1000.
for row_num in range(3, 1000):
# 父类, column A
a_cell_value = work_sheet['A{0}'.format(row_num)].value
# 子类, column B
b_cell_val = work_sheet['B{0}'.format(row_num)].value
if a_cell_value or b_cell_val:
pass
else:
break
if a_cell_value and a_cell_value != '':
papa_id = a_cell_value.strip()[1:]
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}00'.format(papa_id)] = | python | {
"resource": ""
} |
q4981 | __get_switch_arr | train | def __get_switch_arr(work_sheet, row_num):
'''
if valud of the column of the row is `1`, it will be added to the array.
'''
u_dic = []
for col_idx in FILTER_COLUMNS:
cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value
if cell_val in [1, '1']:
| python | {
"resource": ""
} |
q4982 | MUsage.add_or_update | train | def add_or_update(user_id, post_id, kind):
'''
Create the record if new, else update it.
'''
rec = MUsage.query_by_signature(user_id, post_id)
cate_rec = MInfor2Catalog.get_first_category(post_id)
if cate_rec:
cat_id = cate_rec.tag_id
else:
return False
if rec.count() > 0:
logger.info('Usage update: {uid}'.format(uid=post_id))
rec = rec.get()
query = TabUsage.update(kind=kind).where(TabUsage.uid == rec.uid)
query.execute()
MUsage.count_increate(rec.uid, cat_id, rec.count)
else:
| python | {
"resource": ""
} |
q4983 | run_send_all | train | def run_send_all(*args):
'''
Send email to all user.
'''
for user_rec in MUser.query_all():
email_add = user_rec.user_email
send_mail([email_add],
| python | {
"resource": ""
} |
q4984 | run_send_nologin | train | def run_send_nologin(*args):
'''
Send email to who not logged in recently.
'''
for user_rec in MUser.query_nologin():
email_add = user_rec.user_email
print(email_add)
send_mail([email_add],
| python | {
"resource": ""
} |
q4985 | gen_xlsx_category | train | def gen_xlsx_category():
'''
Genereting catetory from xlsx file.
'''
if os.path.exists(XLSX_FILE):
pass
else:
return
# 在分类中排序
order_index = 1
all_cate_arr = []
for sheet_ranges in load_workbook(filename=XLSX_FILE):
kind_sig = str(sheet_ranges['A1'].value).strip()
for row_num in range(3, 10000):
# 父类
a_cell_val = sheet_ranges['A{0}'.format(row_num)].value
b_cell_val = sheet_ranges['B{0}'.format(row_num)].value
c_cell_val = sheet_ranges['C{0}'.format(row_num)].value
if a_cell_val or b_cell_val or c_cell_val:
pass
else:
break
if a_cell_val and a_cell_val != '':
cell_arr = a_cell_val.strip()
p_uid = cell_arr[1:] # 所有以 t 开头
t_slug = sheet_ranges['C{0}'.format(row_num)].value.strip()
t_title = sheet_ranges['D{0}'.format(row_num)].value.strip()
u_uid = p_uid + (4 - len(p_uid)) * '0'
pp_uid = '0000'
elif b_cell_val and b_cell_val != '':
cell_arr = b_cell_val
| python | {
"resource": ""
} |
q4986 | gen_category | train | def gen_category(yaml_file, sig):
'''
Genereting catetory from YAML file.
'''
out_dic = yaml.load(open(yaml_file))
for key in out_dic:
if key.endswith('00'):
uid = key[1:]
cur_dic = out_dic[key]
porder = cur_dic['order']
cat_dic = {
'uid': uid,
'slug': cur_dic['slug'],
'name': cur_dic['name'],
'count': 0,
'tmpl': 1,
'pid': '0000',
'order': porder * 100,
'kind': '{0}'.format(sig),
}
MCategory.add_or_update(uid, cat_dic)
else:
sub_arr = out_dic[key]
pid = key[1:3]
for sub_dic in sub_arr:
porder = out_dic['z' + pid + '00']['order']
for key2 in sub_dic:
| python | {
"resource": ""
} |
q4987 | gen_yaml_category | train | def gen_yaml_category():
'''
find YAML.
'''
for wroot, _, wfiles in os.walk('./database/meta'):
for wfile in wfiles:
| python | {
"resource": ""
} |
q4988 | get_cfg | train | def get_cfg():
'''
Get the configure value.
'''
cfg_var = dir(cfg)
if 'DB_CFG' in cfg_var:
db_cfg = cfg.DB_CFG
else:
db_cfg = ConfigDefault.DB_CFG
if 'SMTP_CFG' in cfg_var:
smtp_cfg = cfg.SMTP_CFG
else:
smtp_cfg = ConfigDefault.SMTP_CFG
if 'SITE_CFG' in cfg_var:
site_cfg = cfg.SITE_CFG
else:
site_cfg = ConfigDefault.SITE_CFG
if 'ROLE_CFG' in cfg_var:
role_cfg = cfg.ROLE_CFG
else:
role_cfg = ConfigDefault.ROLE_CFG
role_cfg['view'] = role_cfg.get('view', '')
role_cfg['add'] = role_cfg.get('add', '1000')
role_cfg['edit'] = role_cfg.get('edit', '2000')
role_cfg['delete'] = role_cfg.get('delete', '3000')
role_cfg['admin'] = role_cfg.get('admin', '0300')
###################################################################
site_url = site_cfg['site_url'].strip('/')
site_cfg['site_url'] = site_url
infor = site_url.split(':')
if len(infor) == 1:
site_cfg['PORT'] = | python | {
"resource": ""
} |
q4989 | PageHandler.view_or_add | train | def view_or_add(self, slug):
'''
When access with the slug, It will add the page if there is no record in database.
'''
rec_page = MWiki.get_by_uid(slug)
if rec_page:
if rec_page.kind == self.kind:
| python | {
"resource": ""
} |
q4990 | PageHandler.to_add | train | def to_add(self, citiao):
'''
To Add page.
'''
kwd = {
'cats': MCategory.query_all(),
'slug': citiao,
'pager': '',
| python | {
"resource": ""
} |
q4991 | PageHandler.__could_edit | train | def __could_edit(self, slug):
'''
Test if the user could edit the page.
'''
page_rec = MWiki.get_by_uid(slug)
if not page_rec:
return False
if self.check_post_role()['EDIT']:
| python | {
"resource": ""
} |
q4992 | PageHandler.update | train | def update(self, slug):
'''
Update the page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
pageinfo = MWiki.get_by_uid(slug)
cnt_old = tornado.escape.xhtml_unescape(pageinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass | python | {
"resource": ""
} |
q4993 | PageHandler.to_modify | train | def to_modify(self, uid):
'''
Try to modify the page.
'''
kwd = {
'pager': '',
}
| python | {
"resource": ""
} |
q4994 | PageHandler.view | train | def view(self, rec):
'''
View the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_view.html',
| python | {
"resource": ""
} |
q4995 | PageHandler.ajax_count_plus | train | def ajax_count_plus(self, slug):
'''
post count plus one via ajax.
'''
output = {
| python | {
"resource": ""
} |
q4996 | PageHandler.add_page | train | def add_page(self, slug):
'''
Add new page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
if MWiki.get_by_uid(slug):
self.set_status(400)
return False
| python | {
"resource": ""
} |
q4997 | MPostHist.update_cnt | train | def update_cnt(uid, post_data):
'''
Update the content by ID.
'''
entry = TabPostHist.update(
user_name=post_data['user_name'],
| python | {
"resource": ""
} |
q4998 | MPostHist.query_by_postid | train | def query_by_postid(postid, limit=5):
'''
Query history of certian records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
| python | {
"resource": ""
} |
q4999 | MPostHist.get_last | train | def get_last(postid, limit=10):
'''
Get the last one of the records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.