Search is not available for this dataset
text stringlengths 75 104k |
|---|
def set_metadata(self, set_id, fp):
"""
Set the XML metadata on a set.
:param file fp: file-like object to read the XML metadata from.
"""
base_url = self.client.get_url('SET', 'GET', 'single', {'id': set_id})
self._metadata.set(base_url, fp) |
def set_metadata(self, fp):
"""
Set the XML metadata on a set.
:param file fp: file-like object to read the XML metadata from.
"""
base_url = self._client.get_url('SET', 'GET', 'single', {'id': self.id})
self._manager._metadata.set(base_url, fp)
# reload myself
... |
def set(self, parent_url, fp):
"""
If the parent object already has XML metadata, it will be overwritten.
Accepts XML metadata in any of the three supported formats.
The format will be detected from the XML content.
The Metadata object becomes invalid after setting
:pa... |
def get_xml(self, fp, format=FORMAT_NATIVE):
"""
Returns the XML metadata for this source, converted to the requested format.
Converted metadata may not contain all the same information as the native format.
:param file fp: A path, or an open file-like object which the content should be... |
def get_formats(self):
""" Return the available format names for this metadata """
formats = []
for key in (self.FORMAT_DC, self.FORMAT_FGDC, self.FORMAT_ISO):
if hasattr(self, key):
formats.append(key)
return formats |
def is_bound(method):
"""
Decorator that asserts the model instance is bound.
Requires:
1. an ``id`` attribute
2. a ``url`` attribute
2. a manager set
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self._is_bound:
raise ValueError("%r mus... |
def query_image_id(self, image_id):
"""Query OPUS via the image_id.
This is a query using the 'primaryfilespec' field of the OPUS database.
It returns a list of URLS into the `obsids` attribute.
This example queries for an image of Titan:
>>> opus = opusapi.OPUS()
>>> ... |
def create_request_with_query(self, kind, query, size="thumb", fmt="json"):
"""api/data.[fmt], api/images/[size].[fmt] api/files.[fmt]
kind = ['data', 'images', 'files']
"""
if kind == "data" or kind == "files":
url = "{}/{}.{}".format(base_url, kind, fmt)
elif kin... |
def get_between_times(self, t1, t2, target=None):
"""
Query for OPUS data between times t1 and t2.
Parameters
----------
t1, t2 : datetime.datetime, strings
Start and end time for the query. If type is datetime, will be
converted to isoformat string. If t... |
def show_images(self, size="small"):
"""Shows preview images using the Jupyter notebook HTML display.
Parameters
==========
size : {'small', 'med', 'thumb', 'full'}
Determines the size of the preview image to be shown.
"""
d = dict(small=256, med=512, thumb=1... |
def download_results(self, savedir=None, raw=True, calib=False, index=None):
"""Download the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
... |
def download_previews(self, savedir=None):
"""Download preview files for the previously found and stored Opus obsids.
Parameters
==========
savedir: str or pathlib.Path, optional
If the database root folder as defined by the config.ini should not be used,
provide... |
def which_epi_janus_resonance(name, time):
"""Find which swap situtation we are in by time.
Starting from 2006-01-21 where a Janus-Epimetheus swap occured, and
defining the next 4 years until the next swap as `scenario1, and the 4
years after that `scenario2`.
Calculate in units of 4 years, in whic... |
def list_drafts(self):
"""
A filterable list views of layers, returning the draft version of each layer.
If the most recent version of a layer or table has been published already,
it won’t be returned here.
"""
target_url = self.client.get_url('LAYER', 'GET', 'multidraft'... |
def list_versions(self, layer_id):
"""
Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
... |
def get_version(self, layer_id, version_id, expand=[]):
"""
Get a specific version of a layer.
"""
target_url = self.client.get_url('VERSION', 'GET', 'single', {'layer_id': layer_id, 'version_id': version_id})
return self._get(target_url, expand=expand) |
def get_draft(self, layer_id, expand=[]):
"""
Get the current draft version of a layer.
:raises NotFound: if there is no draft version.
"""
target_url = self.client.get_url('VERSION', 'GET', 'draft', {'layer_id': layer_id})
return self._get(target_url, expand=expand) |
def get_published(self, layer_id, expand=[]):
"""
Get the latest published version of this layer.
:raises NotFound: if there is no published version.
"""
target_url = self.client.get_url('VERSION', 'GET', 'published', {'layer_id': layer_id})
return self._get(target_url, e... |
def create_draft(self, layer_id):
"""
Creates a new draft version.
If anything in the data object has changed then an import will begin immediately.
Otherwise to force a re-import from the previous sources call :py:meth:`koordinates.layers.LayerManager.start_import`.
:rtype: La... |
def start_import(self, layer_id, version_id):
"""
Starts importing the specified draft version (cancelling any running import),
even if the data object hasn’t changed from the previous version.
"""
target_url = self.client.get_url('VERSION', 'POST', 'import', {'layer_id': layer_i... |
def start_update(self, layer_id):
"""
A shortcut to create a new version and start importing it.
Effectively the same as :py:meth:`koordinates.layers.LayerManager.create_draft` followed by :py:meth:`koordinates.layers.LayerManager.start_import`.
"""
target_url = self.client.get_u... |
def set_metadata(self, layer_id, version_id, fp):
"""
Set the XML metadata on a layer draft version.
:param file fp: file-like object to read the XML metadata from.
:raises NotAllowed: if the version is already published.
"""
base_url = self.client.get_url('VERSION', 'GE... |
def is_published_version(self):
""" Return if this version is the published version of a layer """
pub_ver = getattr(self, 'published_version', None)
this_ver = getattr(self, 'this_version', None)
return this_ver and pub_ver and (this_ver == pub_ver) |
def is_draft_version(self):
""" Return if this version is the draft version of a layer """
pub_ver = getattr(self, 'published_version', None)
latest_ver = getattr(self, 'latest_version', None)
this_ver = getattr(self, 'this_version', None)
return this_ver and latest_ver and (this... |
def list_versions(self):
"""
Filterable list of versions of a layer, always ordered newest to oldest.
If the version’s source supports revisions, you can get a specific revision using
``.filter(data__source__revision=value)``. Specific values depend on the source type.
Use ``dat... |
def get_version(self, version_id, expand=[]):
"""
Get a specific version of this layer
"""
target_url = self._client.get_url('VERSION', 'GET', 'single', {'layer_id': self.id, 'version_id': version_id})
return self._manager._get(target_url, expand=expand) |
def get_draft_version(self, expand=[]):
"""
Get the current draft version of this layer.
:raises NotFound: if there is no draft version.
"""
target_url = self._client.get_url('VERSION', 'GET', 'draft', {'layer_id': self.id})
return self._manager._get(target_url, expand=ex... |
def start_import(self, version_id=None):
"""
Starts importing this draft layerversion (cancelling any running import), even
if the data object hasn’t changed from the previous version.
:raises Conflict: if this version is already published.
"""
if not version_id:
... |
def start_update(self):
"""
A shortcut to create a new version and start importing it.
Effectively the same as :py:meth:`.create_draft_version` followed by :py:meth:`koordinates.layers.Layer.start_import`.
:rtype: Layer
:return: the new version
:raises Conflict: if there... |
def publish(self, version_id=None):
"""
Creates a publish task just for this version, which publishes as soon as any import is complete.
:return: the publish task
:rtype: Publish
:raises Conflict: If the version is already published, or already has a publish job.
"""
... |
def save(self, with_data=False):
"""
Edits this draft layerversion.
# If anything in the data object has changed, cancel any existing import and start a new one.
:param bool with_data: if ``True``, send the data object, which will start a new import and cancel
any existing o... |
def delete_version(self, version_id=None):
"""
Deletes this draft version (revert to published)
:raises NotAllowed: if this version is already published.
:raises Conflict: if this version is already deleted.
"""
if not version_id:
version_id = self.version.id... |
def _get_item_class(self, url):
""" Return the model class matching a URL """
if '/layers/' in url:
return Layer
elif '/tables/' in url:
return Table
elif '/sets/' in url:
return Set
# elif '/documents/' in url:
# return Document
... |
def get_year_since_resonance(ringcube):
"Calculate the fraction of the year since moon swap."
t0 = dt(2006, 1, 21)
td = ringcube.imagetime - t0
return td.days / 365.25 |
def create_polynoms():
"""Create and return poly1d objects.
Uses the parameters from Morgan to create poly1d objects for
calculations.
"""
fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv')
res_df = pd.read_csv(fname)
polys = {}
for resorder, row in zip('65... |
def check_for_soliton(img_id):
"""Workhorse function.
Creates the polynom.
Calculates radius constraints from attributes in `ringcube` object.
Parameters
----------
ringcube : pyciss.ringcube.RingCube
A containter class for a ring-projected ISS image file.
Returns
-------
... |
def get_manager(self, model):
"""
Return the active manager for the given model.
:param model: Model class to look up the manager instance for.
:return: Manager instance for the model associated with this client.
"""
if isinstance(model, six.string_types):
# u... |
def _assemble_headers(self, method, user_headers=None):
"""
Takes the supplied headers and adds in any which
are defined at a client level and then returns
the result.
:param user_headers: a `dict` containing headers defined at the
request level, opt... |
def reverse_url(self, datatype, url, verb='GET', urltype='single', api_version=None):
"""
Extracts parameters from a populated URL
:param datatype: a string identifying the data the url accesses.
:param url: the fully-qualified URL to extract parameters from.
:param verb: the HT... |
def get_url(self, datatype, verb, urltype, params={}, api_host=None, api_version=None):
"""Returns a fully formed url
:param datatype: a string identifying the data the url will access.
:param verb: the HTTP verb needed for use with the url.
:param urltype: an adjective used to the natu... |
def open_store_variable(self, name, var):
"""Turn CDMRemote variable into something like a numpy.ndarray."""
data = indexing.LazilyOuterIndexedArray(CDMArrayWrapper(name, self))
return Variable(var.dimensions, data, {a: getattr(var, a) for a in var.ncattrs()}) |
def get_attrs(self):
"""Get the global attributes from underlying data set."""
return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs()) |
def get_dimensions(self):
"""Get the dimensions from underlying data set."""
return FrozenOrderedDict((k, len(v)) for k, v in self.ds.dimensions.items()) |
def _find_base_tds_url(catalog_url):
"""Identify the base URL of the THREDDS server from the catalog URL.
Will retain URL scheme, host, port and username/password when present.
"""
url_components = urlparse(catalog_url)
if url_components.path:
return catalog_url.split(url_components.path)[0... |
def filter_time_nearest(self, time, regex=None):
"""Filter keys for an item closest to the desired time.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. The collection of `datetime`s is compared to `start` and the value that
has a `datetime` clos... |
def filter_time_range(self, start, end, regex=None):
"""Filter keys for all items within the desired time range.
Loops over all keys in the collection and uses `regex` to extract and build
`datetime`s. From the collection of `datetime`s, all values within `start` and `end`
(inclusive) a... |
def pop(self, key, *args, **kwargs):
"""Remove and return the value associated with case-insensitive ``key``."""
return super(CaseInsensitiveDict, self).pop(CaseInsensitiveStr(key)) |
def _keys_to_lower(self):
"""Convert key set to lowercase."""
for k in list(self.keys()):
val = super(CaseInsensitiveDict, self).__getitem__(k)
super(CaseInsensitiveDict, self).__delitem__(k)
self.__setitem__(CaseInsensitiveStr(k), val) |
def resolve_url(self, catalog_url):
"""Resolve the url of the dataset when reading latest.xml.
Parameters
----------
catalog_url : str
The catalog url to be resolved
"""
if catalog_url != '':
resolver_base = catalog_url.split('catalog.xml')[0]
... |
def make_access_urls(self, catalog_url, all_services, metadata=None):
"""Make fully qualified urls for the access methods enabled on the dataset.
Parameters
----------
catalog_url : str
The top level server url
all_services : List[SimpleService]
list of :... |
def add_access_element_info(self, access_element):
"""Create an access method from a catalog element."""
service_name = access_element.attrib['serviceName']
url_path = access_element.attrib['urlPath']
self.access_element_info[service_name] = url_path |
def download(self, filename=None):
"""Download the dataset to a local file.
Parameters
----------
filename : str, optional
The full path to which the dataset will be saved
"""
if filename is None:
filename = self.name
with self.remote_ope... |
def remote_access(self, service=None, use_xarray=None):
"""Access the remote dataset.
Open the remote dataset and get a netCDF4-compatible `Dataset` object providing
index-based subsetting capabilities.
Parameters
----------
service : str, optional
The name ... |
def subset(self, service=None):
"""Subset the dataset.
Open the remote dataset and get a client for talking to ``service``.
Parameters
----------
service : str, optional
The name of the service for subsetting the dataset. Defaults to 'NetcdfSubset'
or 'N... |
def access_with_service(self, service, use_xarray=None):
"""Access the dataset using a particular service.
Return an Python object capable of communicating with the server using the particular
service. For instance, for 'HTTPServer' this is a file-like object capable of
HTTP communicati... |
def get_wind_components(speed, wdir):
r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : array_like
The wind speed (magnitude)
wdir : array_like
The wind direction, specified as the direction from which the wind is
blowi... |
def _get_metadata(self):
"""Get header information and store as metadata for the endpoint."""
self.metadata = self.fetch_header()
self.variables = {g.name for g in self.metadata.grids} |
def fetch_header(self):
"""Make a header request to the endpoint."""
query = self.query().add_query_parameter(req='header')
return self._parse_messages(self.get_query(query).content)[0] |
def fetch_feature_type(self):
"""Request the featureType from the endpoint."""
query = self.query().add_query_parameter(req='featureType')
return self.get_query(query).content |
def fetch_coords(self, query):
"""Pull down coordinate data from the endpoint."""
q = query.add_query_parameter(req='coord')
return self._parse_messages(self.get_query(q).content) |
def request_data(cls, time, site_id, derived=False):
"""Retreive IGRA version 2 data for one station.
Parameters
--------
site_id : str
11-character IGRA2 station identifier.
time : datetime
The date and time of the desired observation. If list of two tim... |
def _get_data(self):
"""Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data.
"""
# Split the list of times into b... |
def _get_data_raw(self):
"""Download observations matching the time range.
Returns a tuple with a string for the body, string for the headers,
and a list of dates.
"""
# Import need to be here so we can monkeypatch urlopen for testing and avoid
# downloading live data fo... |
def _select_date_range(self, lines):
"""Identify lines containing headers within the range begin_date to end_date.
Parameters
-----
lines: list
list of lines from the IGRA2 data file.
"""
headers = []
num_lev = []
dates = []
# Get in... |
def _get_fwf_params(self):
"""Produce a dictionary with names, colspecs, and dtype for IGRA2 data.
Returns a dict with entries 'body' and 'header'.
"""
def _cdec(power=1):
"""Make a function to convert string 'value*10^power' to float."""
def _cdec_power(val):
... |
def _clean_body_df(self, df):
"""Format the dataframe, remove empty rows, and add units attribute."""
if self.suffix == '-drvd.txt':
df = df.dropna(subset=('temperature', 'reported_relative_humidity',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
... |
def _clean_header_df(self, df):
"""Format the header dataframe and add units."""
if self.suffix == '-drvd.txt':
df.units = {'release_time': 'second',
'precipitable_water': 'millimeter',
'inv_pressure': 'hPa',
'inv_height... |
def realtime_observations(cls, buoy, data_type='txt'):
"""Retrieve the realtime buoy data from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
... |
def _parse_met(content):
"""Parse standard meteorological data from NDBC buoys.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'da... |
def _parse_supl(content):
"""Parse supplemental measurements data.
Parameters
----------
content : str
Data to parse
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
col_names = ['year', 'month', 'day', 'hour', '... |
def _check_if_url_valid(url):
"""Check if a url is valid (returns 200) or not.
Parameters
----------
url : str
URL to check
Returns
-------
bool if url is valid
"""
r = requests.head(url)
if r.status_code == 200:
... |
def buoy_data_types(cls, buoy):
"""Determine which types of data are available for a given buoy.
Parameters
----------
buoy : str
Buoy name
Returns
-------
dict of valid file extensions and their descriptions
"""
endpoint = cls()... |
def raw_buoy_data(cls, buoy, data_type='txt'):
"""Retrieve the raw buoy data contents from NDBC.
Parameters
----------
buoy : str
Name of buoy
data_type : str
Type of data requested, must be one of
'txt' standard meteorological data
... |
def create_session(self):
"""Create a new HTTP session with our user-agent set.
Returns
-------
session : requests.Session
The created session
See Also
--------
urlopen, set_session_options
"""
ret = requests.Session()
ret.he... |
def urlopen(self, url, **kwargs):
"""GET a file-like object for a URL using HTTP.
This is a thin wrapper around :meth:`requests.Session.get` that returns a file-like
object wrapped around the resulting content.
Parameters
----------
url : str
The URL to requ... |
def lonlat_box(self, west, east, south, north):
"""Add a latitude/longitude bounding box to the query.
This adds a request for a spatial bounding box, bounded by ('north', 'south')
for latitude and ('east', 'west') for the longitude. This modifies the query
in-place, but returns `self` ... |
def lonlat_point(self, lon, lat):
"""Add a latitude/longitude point to the query.
This adds a request for a (`lon`, `lat`) point. This modifies the query
in-place, but returns `self` so that multiple queries can be chained together on
one line.
This replaces any existing spatia... |
def time(self, time):
"""Add a request for a specific time to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
------... |
def time_range(self, start, end):
"""Add a request for a time range to the query.
This modifies the query in-place, but returns `self` so that multiple queries
can be chained together on one line.
This replaces any existing temporal queries that have been set.
Parameters
... |
def get_query(self, query):
"""Make a GET request, including a query, to the endpoint.
The path of the request is to the base URL assigned to the endpoint.
Parameters
----------
query : DataQuery
The query to pass when making the request
Returns
---... |
def get_path(self, path, query=None):
"""Make a GET request, optionally including a query, to a relative path.
The path of the request includes a path on top of the base URL
assigned to the endpoint.
Parameters
----------
path : str
The path to request, rela... |
def get(self, path, params=None):
"""Make a GET request, optionally including a parameters, to a path.
The path of the request is the full URL.
Parameters
----------
path : str
The URL to request
params : DataQuery, optional
The query to pass whe... |
def path(self):
"""Return the full path to the Group, including any parent Groups."""
# If root, return '/'
if self.dataset is self:
return ''
else: # Otherwise recurse
return self.dataset.path + '/' + self.name |
def load_from_stream(self, group):
"""Load a Group from an NCStream object."""
self._unpack_attrs(group.atts)
self.name = group.name
for dim in group.dims:
new_dim = Dimension(self, dim.name)
self.dimensions[dim.name] = new_dim
new_dim.load_from_strea... |
def load_from_stream(self, var):
"""Populate the Variable from an NCStream object."""
dims = []
for d in var.shape:
dim = Dimension(None, d.name)
dim.load_from_stream(d)
dims.append(dim)
self.dimensions = tuple(dim.name for dim in dims)
self.s... |
def load_from_stream(self, dim):
"""Load from an NCStream object."""
self.unlimited = dim.isUnlimited
self.private = dim.isPrivate
self.vlen = dim.isVlen
if not self.vlen:
self.size = dim.length |
def _read_header(self):
"""Get the needed header information to initialize dataset."""
self._header = self.cdmrf.fetch_header()
self.load_from_stream(self._header) |
def load_from_stream(self, header):
"""Populate the CoverageDataset from the protobuf information."""
self._unpack_attrs(header.atts)
self.name = header.name
self.lon_lat_domain = header.latlonRect
self.proj_domain = header.projRect
self.date_range = header.dateRange
... |
def request_data(cls, time, site_id, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for a single station.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO ide... |
def request_all_data(cls, time, pressure=None, **kwargs):
"""Retrieve upper air observations from Iowa State's archive for all stations.
Parameters
----------
time : datetime
The date and time of the desired observation.
pressure : float, optional
The ma... |
def _get_data(self, time, site_id, pressure=None):
"""Download data from Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
... |
def _get_data_raw(self, time, site_id, pressure=None):
r"""Download data from the Iowa State's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downlo... |
def parse_station_table(root):
"""Parse station list XML file."""
stations = [parse_xml_station(elem) for elem in root.findall('station')]
return {st.id: st for st in stations} |
def parse_xml_station(elem):
"""Create a :class:`Station` instance from an XML tag."""
stid = elem.attrib['id']
name = elem.find('name').text
lat = float(elem.find('latitude').text)
lon = float(elem.find('longitude').text)
elev = float(elem.find('elevation').text)
return Station(id=stid, ele... |
def stations(self, *stns):
"""Specify one or more stations for the query.
This modifies the query in-place, but returns `self` so that multiple
queries can be chained together on one line.
This replaces any existing spatial queries that have been set.
Parameters
------... |
def validate_query(self, query):
"""Validate a query.
Determines whether `query` is well-formed. This includes checking for all
required parameters, as well as checking parameters for valid values.
Parameters
----------
query : RadarQuery
The query to valida... |
def get_catalog(self, query):
"""Fetch a parsed THREDDS catalog from the radar server.
Requests a catalog of radar data files data from the radar server given the
parameters in `query` and returns a :class:`~siphon.catalog.TDSCatalog` instance.
Parameters
----------
que... |
def acis_request(method, params):
"""Request data from the ACIS Web Services API.
Makes a request from the ACIS Web Services API for data
based on a given method (StnMeta,StnData,MultiStnData,GridData,General)
and parameters string. Information about the parameters can be obtained at:
http://www.rc... |
def parse_xml(data, handle_units):
"""Parse XML data returned by NCSS."""
root = ET.fromstring(data)
return squish(parse_xml_dataset(root, handle_units)) |
def parse_xml_point(elem):
"""Parse an XML point tag."""
point = {}
units = {}
for data in elem.findall('data'):
name = data.get('name')
unit = data.get('units')
point[name] = float(data.text) if name != 'date' else parse_iso_date(data.text)
if unit:
units[nam... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.