_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q259000 | reapply_all | validation | def reapply_all(ast_node, lib2to3_node):
"""Reapplies the typed_ast node into the lib2to3 tree.
Also does post-processing. This is done in reverse order to enable placing
TypeVars and aliases that depend on one another.
"""
late_processing = reapply(ast_node, lib2to3_node)
for lazy_func in reversed(late_processing):
lazy_func() | python | {
"resource": ""
} |
q259001 | fix_remaining_type_comments | validation | def fix_remaining_type_comments(node):
"""Converts type comments in `node` to proper annotated assignments."""
assert node.type == syms.file_input
last_n = None
for n in node.post_order():
if last_n is not None:
if n.type == token.NEWLINE and is_assignment(last_n):
fix_variable_annotation_type_comment(n, last_n)
elif n.type == syms.funcdef and last_n.type == syms.suite:
fix_signature_annotation_type_comment(n, last_n, offset=1)
elif n.type == syms.async_funcdef and last_n.type == syms.suite:
fix_signature_annotation_type_comment(n, last_n, offset=2)
last_n = n | python | {
"resource": ""
} |
q259002 | parse_signature_type_comment | validation | def parse_signature_type_comment(type_comment):
"""Parse the fugly signature type comment into AST nodes.
Caveats: ASTifying **kwargs is impossible with the current grammar so we
hack it into unary subtraction (to differentiate from Starred in vararg).
For example from:
"(str, int, *int, **Any) -> 'SomeReturnType'"
To:
([ast3.Name, ast.Name, ast3.Name, ast.Name], ast3.Str)
"""
try:
result = ast3.parse(type_comment, '<func_type>', 'func_type')
except SyntaxError:
raise ValueError(f"invalid function signature type comment: {type_comment!r}")
assert isinstance(result, ast3.FunctionType)
if len(result.argtypes) == 1:
argtypes = result.argtypes[0]
else:
argtypes = result.argtypes
return argtypes, result.returns | python | {
"resource": ""
} |
q259003 | parse_type_comment | validation | def parse_type_comment(type_comment):
"""Parse a type comment string into AST nodes."""
try:
result = ast3.parse(type_comment, '<type_comment>', 'eval')
except SyntaxError:
raise ValueError(f"invalid type comment: {type_comment!r}") from None
assert isinstance(result, ast3.Expression)
return result.body | python | {
"resource": ""
} |
q259004 | copy_arguments_to_annotations | validation | def copy_arguments_to_annotations(args, type_comment, *, is_method=False):
"""Copies AST nodes from `type_comment` into the ast3.arguments in `args`.
Does validaation of argument count (allowing for untyped self/cls)
and type (vararg and kwarg).
"""
if isinstance(type_comment, ast3.Ellipsis):
return
expected = len(args.args)
if args.vararg:
expected += 1
expected += len(args.kwonlyargs)
if args.kwarg:
expected += 1
actual = len(type_comment) if isinstance(type_comment, list) else 1
if expected != actual:
if is_method and expected - actual == 1:
pass # fine, we're just skipping `self`, `cls`, etc.
else:
raise ValueError(
f"number of arguments in type comment doesn't match; " +
f"expected {expected}, found {actual}"
)
if isinstance(type_comment, list):
next_value = type_comment.pop
else:
# If there's just one value, only one of the loops and ifs below will
# be populated. We ensure this with the expected/actual length check
# above.
_tc = type_comment
def next_value(index: int = 0) -> ast3.expr:
return _tc
for arg in args.args[expected - actual:]:
ensure_no_annotation(arg.annotation)
arg.annotation = next_value(0)
if args.vararg:
ensure_no_annotation(args.vararg.annotation)
args.vararg.annotation = next_value(0)
for arg in args.kwonlyargs:
ensure_no_annotation(arg.annotation)
arg.annotation = next_value(0)
if args.kwarg:
ensure_no_annotation(args.kwarg.annotation)
args.kwarg.annotation = next_value(0) | python | {
"resource": ""
} |
q259005 | copy_type_comments_to_annotations | validation | def copy_type_comments_to_annotations(args):
"""Copies argument type comments from the legacy long form to annotations
in the entire function signature.
"""
for arg in args.args:
copy_type_comment_to_annotation(arg)
if args.vararg:
copy_type_comment_to_annotation(args.vararg)
for arg in args.kwonlyargs:
copy_type_comment_to_annotation(arg)
if args.kwarg:
copy_type_comment_to_annotation(args.kwarg) | python | {
"resource": ""
} |
q259006 | maybe_replace_any_if_equal | validation | def maybe_replace_any_if_equal(name, expected, actual):
"""Return the type given in `expected`.
Raise ValueError if `expected` isn't equal to `actual`. If --replace-any is
used, the Any type in `actual` is considered equal.
The implementation is naively checking if the string representation of
`actual` is one of "Any", "typing.Any", or "t.Any". This is done for two
reasons:
1. I'm lazy.
2. We want people to be able to explicitly state that they want Any without it
being replaced. This way they can use an alias.
"""
is_equal = expected == actual
if not is_equal and Config.replace_any:
actual_str = minimize_whitespace(str(actual))
if actual_str and actual_str[0] in {'"', "'"}:
actual_str = actual_str[1:-1]
is_equal = actual_str in {'Any', 'typing.Any', 't.Any'}
if not is_equal:
expected_annotation = minimize_whitespace(str(expected))
actual_annotation = minimize_whitespace(str(actual))
raise ValueError(
f"incompatible existing {name}. " +
f"Expected: {expected_annotation!r}, actual: {actual_annotation!r}"
)
return expected or actual | python | {
"resource": ""
} |
q259007 | remove_function_signature_type_comment | validation | def remove_function_signature_type_comment(body):
"""Removes the legacy signature type comment, leaving other comments if any."""
for node in body.children:
if node.type == token.INDENT:
prefix = node.prefix.lstrip()
if prefix.startswith('# type: '):
node.prefix = '\n'.join(prefix.split('\n')[1:])
break | python | {
"resource": ""
} |
q259008 | get_offset_and_prefix | validation | def get_offset_and_prefix(body, skip_assignments=False):
"""Returns the offset after which a statement can be inserted to the `body`.
This offset is calculated to come after all imports, and maybe existing
(possibly annotated) assignments if `skip_assignments` is True.
Also returns the indentation prefix that should be applied to the inserted
node.
"""
assert body.type in (syms.file_input, syms.suite)
_offset = 0
prefix = ''
for _offset, child in enumerate(body.children):
if child.type == syms.simple_stmt:
stmt = child.children[0]
if stmt.type == syms.expr_stmt:
expr = stmt.children
if not skip_assignments:
break
if (
len(expr) != 2 or
expr[0].type != token.NAME or
expr[1].type != syms.annassign or
_eq in expr[1].children
):
break
elif stmt.type not in (syms.import_name, syms.import_from, token.STRING):
break
elif child.type == token.INDENT:
assert isinstance(child, Leaf)
prefix = child.value
elif child.type != token.NEWLINE:
break
prefix, child.prefix = child.prefix, prefix
return _offset, prefix | python | {
"resource": ""
} |
q259009 | fix_line_numbers | validation | def fix_line_numbers(body):
r"""Recomputes all line numbers based on the number of \n characters."""
maxline = 0
for node in body.pre_order():
maxline += node.prefix.count('\n')
if isinstance(node, Leaf):
node.lineno = maxline
maxline += str(node.value).count('\n') | python | {
"resource": ""
} |
q259010 | new | validation | def new(n, prefix=None):
"""lib2to3's AST requires unique objects as children."""
if isinstance(n, Leaf):
return Leaf(n.type, n.value, prefix=n.prefix if prefix is None else prefix)
# this is hacky, we assume complex nodes are just being reused once from the
# original AST.
n.parent = None
if prefix is not None:
n.prefix = prefix
return n | python | {
"resource": ""
} |
q259011 | S3._load_info | validation | def _load_info(self):
'''Get user info for GBDX S3, put into instance vars for convenience.
Args:
None.
Returns:
Dictionary with S3 access key, S3 secret key, S3 session token,
user bucket and user prefix (dict).
'''
url = '%s/prefix?duration=36000' % self.base_url
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json() | python | {
"resource": ""
} |
q259012 | PlotMixin.histogram_equalize | validation | def histogram_equalize(self, use_bands, **kwargs):
''' Equalize and the histogram and normalize value range
Equalization is on all three bands, not per-band'''
data = self._read(self[use_bands,...], **kwargs)
data = np.rollaxis(data.astype(np.float32), 0, 3)
flattened = data.flatten()
if 0 in data:
masked = np.ma.masked_values(data, 0).compressed()
image_histogram, bin_edges = np.histogram(masked, 256)
else:
image_histogram, bin_edges = np.histogram(flattened, 256)
bins = (bin_edges[:-1] + bin_edges[1:]) / 2.0
cdf = image_histogram.cumsum()
cdf = cdf / float(cdf[-1])
image_equalized = np.interp(flattened, bins, cdf).reshape(data.shape)
if 'stretch' in kwargs or 'gamma' in kwargs:
return self._histogram_stretch(image_equalized, **kwargs)
else:
return image_equalized | python | {
"resource": ""
} |
q259013 | PlotMixin.histogram_match | validation | def histogram_match(self, use_bands, blm_source=None, **kwargs):
''' Match the histogram to existing imagery '''
assert has_rio, "To match image histograms please install rio_hist"
data = self._read(self[use_bands,...], **kwargs)
data = np.rollaxis(data.astype(np.float32), 0, 3)
if 0 in data:
data = np.ma.masked_values(data, 0)
bounds = self._reproject(box(*self.bounds), from_proj=self.proj, to_proj="EPSG:4326").bounds
if blm_source == 'browse':
from gbdxtools.images.browse_image import BrowseImage
ref = BrowseImage(self.cat_id, bbox=bounds).read()
else:
from gbdxtools.images.tms_image import TmsImage
tms = TmsImage(zoom=self._calc_tms_zoom(self.affine[0]), bbox=bounds, **kwargs)
ref = np.rollaxis(tms.read(), 0, 3)
out = np.dstack([rio_match(data[:,:,idx], ref[:,:,idx].astype(np.double)/255.0)
for idx in range(data.shape[-1])])
if 'stretch' in kwargs or 'gamma' in kwargs:
return self._histogram_stretch(out, **kwargs)
else:
return out | python | {
"resource": ""
} |
q259014 | PlotMixin.histogram_stretch | validation | def histogram_stretch(self, use_bands, **kwargs):
''' entry point for contrast stretching '''
data = self._read(self[use_bands,...], **kwargs)
data = np.rollaxis(data.astype(np.float32), 0, 3)
return self._histogram_stretch(data, **kwargs) | python | {
"resource": ""
} |
q259015 | PlotMixin.ndvi | validation | def ndvi(self, **kwargs):
"""
Calculates Normalized Difference Vegetation Index using NIR and Red of an image.
Returns: numpy array with ndvi values
"""
data = self._read(self[self._ndvi_bands,...]).astype(np.float32)
return (data[0,:,:] - data[1,:,:]) / (data[0,:,:] + data[1,:,:]) | python | {
"resource": ""
} |
q259016 | PlotMixin.ndwi | validation | def ndwi(self):
"""
Calculates Normalized Difference Water Index using Coastal and NIR2 bands for WV02, WV03.
For Landsat8 and sentinel2 calculated by using Green and NIR bands.
Returns: numpy array of ndwi values
"""
data = self._read(self[self._ndwi_bands,...]).astype(np.float32)
return (data[1,:,:] - data[0,:,:]) / (data[0,:,:] + data[1,:,:]) | python | {
"resource": ""
} |
q259017 | PlotMixin.plot | validation | def plot(self, spec="rgb", **kwargs):
''' Plot the image with MatplotLib
Plot sizing includes default borders and spacing. If the image is shown in Jupyter the outside whitespace will be automatically cropped to save size, resulting in a smaller sized image than expected.
Histogram options:
* 'equalize': performs histogram equalization on the image.
* 'minmax': stretch the pixel range to the minimum and maximum input pixel values. Equivalent to stretch=[0,100].
* 'match': match the histogram to the Maps API imagery. Pass the additional keyword blm_source='browse' to match to the Browse Service (image thumbnail) instead.
* 'ignore': Skip dynamic range adjustment, in the event the image is already correctly balanced and the values are in the correct range.
Gamma values greater than 1 will brighten the image midtones, values less than 1 will darken the midtones.
Plots generated with the histogram options of 'match' and 'equalize' can be combined with the stretch and gamma options. The stretch and gamma adjustments will be applied after the histogram adjustments.
Args:
w (float or int): width of plot in inches at 72 dpi, default is 10
h (float or int): height of plot in inches at 72 dpi, default is 10
title (str): Title to use on the plot
fontsize (int): Size of title font, default is 22. Size is measured in points.
bands (list): bands to use for plotting, such as bands=[4,2,1]. Defaults to the image's natural RGB bands. This option is useful for generating pseudocolor images when passed a list of three bands. If only a single band is provided, a colormapped plot will be generated instead.
cmap (str): MatPlotLib colormap name to use for single band images. Default is colormap='Grey_R'.
histogram (str): either 'equalize', 'minmax', 'match', or ignore
stretch (list): stretch the histogram between two percentile values, default is [2,98]
gamma (float): adjust image gamma, default is 1.0
'''
if self.shape[0] == 1 or ("bands" in kwargs and len(kwargs["bands"]) == 1):
if "cmap" in kwargs:
cmap = kwargs["cmap"]
del kwargs["cmap"]
else:
cmap = "Greys_r"
self._plot(tfm=self._single_band, cmap=cmap, **kwargs)
else:
if spec == "rgb" and self._has_token(**kwargs):
self._plot(tfm=self.rgb, **kwargs)
else:
self._plot(tfm=getattr(self, spec), **kwargs) | python | {
"resource": ""
} |
q259018 | Idaho.describe_images | validation | def describe_images(self, idaho_image_results):
"""Describe the result set of a catalog search for IDAHO images.
Args:
idaho_image_results (dict): Result set of catalog search.
Returns:
results (json): The full catalog-search response for IDAHO images
corresponding to the given catID.
"""
results = idaho_image_results['results']
# filter only idaho images:
results = [r for r in results if 'IDAHOImage' in r['type']]
self.logger.debug('Describing %s IDAHO images.' % len(results))
# figure out which catids are represented in this set of images
catids = set([r['properties']['catalogID'] for r in results])
description = {}
for catid in catids:
# images associated with a single catid
description[catid] = {}
description[catid]['parts'] = {}
images = [r for r in results if r['properties']['catalogID'] == catid]
for image in images:
description[catid]['sensorPlatformName'] = image['properties']['sensorPlatformName']
part = int(image['properties']['vendorDatasetIdentifier'].split(':')[1][-3:])
color = image['properties']['colorInterpretation']
bucket = image['properties']['tileBucketName']
identifier = image['identifier']
boundstr = image['properties']['footprintWkt']
try:
description[catid]['parts'][part]
except:
description[catid]['parts'][part] = {}
description[catid]['parts'][part][color] = {}
description[catid]['parts'][part][color]['id'] = identifier
description[catid]['parts'][part][color]['bucket'] = bucket
description[catid]['parts'][part][color]['boundstr'] = boundstr
return description | python | {
"resource": ""
} |
q259019 | Idaho.get_chip | validation | def get_chip(self, coordinates, catid, chip_type='PAN', chip_format='TIF', filename='chip.tif'):
"""Downloads a native resolution, orthorectified chip in tif format
from a user-specified catalog id.
Args:
coordinates (list): Rectangle coordinates in order West, South, East, North.
West and East are longitudes, North and South are latitudes.
The maximum chip size is (2048 pix)x(2048 pix)
catid (str): The image catalog id.
chip_type (str): 'PAN' (panchromatic), 'MS' (multispectral), 'PS' (pansharpened).
'MS' is 4 or 8 bands depending on sensor.
chip_format (str): 'TIF' or 'PNG'
filename (str): Where to save chip.
Returns:
True if chip is successfully downloaded; else False.
"""
def t2s1(t):
# Tuple to string 1
return str(t).strip('(,)').replace(',', '')
def t2s2(t):
# Tuple to string 2
return str(t).strip('(,)').replace(' ', '')
if len(coordinates) != 4:
print('Wrong coordinate entry')
return False
W, S, E, N = coordinates
box = ((W, S), (W, N), (E, N), (E, S), (W, S))
box_wkt = 'POLYGON ((' + ','.join([t2s1(corner) for corner in box]) + '))'
# get IDAHO images which intersect box
results = self.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=box_wkt)
description = self.describe_images(results)
pan_id, ms_id, num_bands = None, None, 0
for catid, images in description.items():
for partnum, part in images['parts'].items():
if 'PAN' in part.keys():
pan_id = part['PAN']['id']
bucket = part['PAN']['bucket']
if 'WORLDVIEW_8_BAND' in part.keys():
ms_id = part['WORLDVIEW_8_BAND']['id']
num_bands = 8
bucket = part['WORLDVIEW_8_BAND']['bucket']
elif 'RGBN' in part.keys():
ms_id = part['RGBN']['id']
num_bands = 4
bucket = part['RGBN']['bucket']
# specify band information
band_str = ''
if chip_type == 'PAN':
band_str = pan_id + '?bands=0'
elif chip_type == 'MS':
band_str = ms_id + '?'
elif chip_type == 'PS':
if num_bands == 8:
band_str = ms_id + '?bands=4,2,1&panId=' + pan_id
elif num_bands == 4:
band_str = ms_id + '?bands=0,1,2&panId=' + pan_id
# specify location information
location_str = '&upperLeft={}&lowerRight={}'.format(t2s2((W, N)), t2s2((E, S)))
service_url = 'https://idaho.geobigdata.io/v1/chip/bbox/' + bucket + '/'
url = service_url + band_str + location_str
url += '&format=' + chip_format + '&token=' + self.gbdx_connection.access_token
r = requests.get(url)
if r.status_code == 200:
with open(filename, 'wb') as f:
f.write(r.content)
return True
else:
print('Cannot download chip')
return False | python | {
"resource": ""
} |
q259020 | Idaho.create_leaflet_viewer | validation | def create_leaflet_viewer(self, idaho_image_results, filename):
"""Create a leaflet viewer html file for viewing idaho images.
Args:
idaho_image_results (dict): IDAHO image result set as returned from
the catalog.
filename (str): Where to save output html file.
"""
description = self.describe_images(idaho_image_results)
if len(description) > 0:
functionstring = ''
for catid, images in description.items():
for partnum, part in images['parts'].items():
num_images = len(list(part.keys()))
partname = None
if num_images == 1:
# there is only one image, use the PAN
partname = [p for p in list(part.keys())][0]
pan_image_id = ''
elif num_images == 2:
# there are two images in this part, use the multi (or pansharpen)
partname = [p for p in list(part.keys()) if p is not 'PAN'][0]
pan_image_id = part['PAN']['id']
if not partname:
self.logger.debug("Cannot find part for idaho image.")
continue
bandstr = {
'RGBN': '0,1,2',
'WORLDVIEW_8_BAND': '4,2,1',
'PAN': '0'
}.get(partname, '0,1,2')
part_boundstr_wkt = part[partname]['boundstr']
part_polygon = from_wkt(part_boundstr_wkt)
bucketname = part[partname]['bucket']
image_id = part[partname]['id']
W, S, E, N = part_polygon.bounds
functionstring += "addLayerToMap('%s','%s',%s,%s,%s,%s,'%s');\n" % (
bucketname, image_id, W, S, E, N, pan_image_id)
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
try:
with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile:
data = htmlfile.read().decode("utf8")
except AttributeError:
with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile:
data = htmlfile.read()
data = data.replace('FUNCTIONSTRING', functionstring)
data = data.replace('CENTERLAT', str(S))
data = data.replace('CENTERLON', str(W))
data = data.replace('BANDS', bandstr)
data = data.replace('TOKEN', self.gbdx_connection.access_token)
with codecs.open(filename, 'w', 'utf8') as outputfile:
self.logger.debug("Saving %s" % filename)
outputfile.write(data)
else:
print('No items returned.') | python | {
"resource": ""
} |
q259021 | is_ordered | validation | def is_ordered(cat_id):
"""
Checks to see if a CatalogID has been ordered or not.
Args:
catalogID (str): The catalog ID from the platform catalog.
Returns:
ordered (bool): Whether or not the image has been ordered
"""
url = 'https://rda.geobigdata.io/v1/stripMetadata/{}'.format(cat_id)
auth = Auth()
r = _req_with_retries(auth.gbdx_connection, url)
if r is not None:
return r.status_code == 200
return False | python | {
"resource": ""
} |
q259022 | deprecate_module_attr | validation | def deprecate_module_attr(mod, deprecated):
"""Return a wrapped object that warns about deprecated accesses"""
deprecated = set(deprecated)
class Wrapper(object):
def __getattr__(self, attr):
if attr in deprecated:
warnings.warn("Property {} is deprecated".format(attr), GBDXDeprecation)
return getattr(mod, attr)
def __setattr__(self, attr, value):
if attr in deprecated:
warnings.warn("Property {} is deprecated".format(attr), GBDXDeprecation)
return setattr(mod, attr, value)
return Wrapper() | python | {
"resource": ""
} |
q259023 | PortList.get_matching_multiplex_port | validation | def get_matching_multiplex_port(self,name):
"""
Given a name, figure out if a multiplex port prefixes this name and return it. Otherwise return none.
"""
# short circuit: if the attribute name already exists return none
# if name in self._portnames: return None
# if not len([p for p in self._portnames if name.startswith(p) and name != p]): return None
matching_multiplex_ports = [self.__getattribute__(p) for p in self._portnames
if name.startswith(p)
and name != p
and hasattr(self, p)
and self.__getattribute__(p).is_multiplex
]
for port in matching_multiplex_ports:
return port
return None | python | {
"resource": ""
} |
q259024 | Task.set | validation | def set(self, **kwargs):
"""
Set input values on task
Args:
arbitrary_keys: values for the keys
Returns:
None
"""
for port_name, port_value in kwargs.items():
# Support both port and port.value
if hasattr(port_value, 'value'):
port_value = port_value.value
self.inputs.__setattr__(port_name, port_value) | python | {
"resource": ""
} |
q259025 | Workflow.savedata | validation | def savedata(self, output, location=None):
'''
Save output data from any task in this workflow to S3
Args:
output: Reference task output (e.g. task.outputs.output1).
location (optional): Subfolder under which the output will be saved.
It will be placed under the account directory in gbd-customer-data bucket:
s3://gbd-customer-data/{account_id}/{location}
Leave blank to save to: workflow_output/{workflow_id}/{task_name}/{port_name}
Returns:
None
'''
output.persist = True
if location:
output.persist_location = location | python | {
"resource": ""
} |
q259026 | Workflow.generate_workflow_description | validation | def generate_workflow_description(self):
'''
Generate workflow json for launching the workflow against the gbdx api
Args:
None
Returns:
json string
'''
if not self.tasks:
raise WorkflowError('Workflow contains no tasks, and cannot be executed.')
self.definition = self.workflow_skeleton()
if self.batch_values:
self.definition["batch_values"] = self.batch_values
all_input_port_values = [t.inputs.__getattribute__(input_port_name).value for t in self.tasks for
input_port_name in t.inputs._portnames]
for task in self.tasks:
# only include multiplex output ports in this task if other tasks refer to them in their inputs.
# 1. find the multplex output port_names in this task
# 2. see if they are referred to in any other tasks inputs
# 3. If not, exclude them from the workflow_def
output_multiplex_ports_to_exclude = []
multiplex_output_port_names = [portname for portname in task.outputs._portnames if
task.outputs.__getattribute__(portname).is_multiplex]
for p in multiplex_output_port_names:
output_port_reference = 'source:' + task.name + ':' + p
if output_port_reference not in all_input_port_values:
output_multiplex_ports_to_exclude.append(p)
task_def = task.generate_task_workflow_json(
output_multiplex_ports_to_exclude=output_multiplex_ports_to_exclude)
self.definition['tasks'].append(task_def)
if self.callback:
self.definition['callback'] = self.callback
return self.definition | python | {
"resource": ""
} |
q259027 | Workflow.execute | validation | def execute(self):
'''
Execute the workflow.
Args:
None
Returns:
Workflow_id
'''
# if not self.tasks:
# raise WorkflowError('Workflow contains no tasks, and cannot be executed.')
# for task in self.tasks:
# self.definition['tasks'].append( task.generate_task_workflow_json() )
self.generate_workflow_description()
# hit batch workflow endpoint if batch values
if self.batch_values:
self.id = self.workflow.launch_batch_workflow(self.definition)
# use regular workflow endpoint if no batch values
else:
self.id = self.workflow.launch(self.definition)
return self.id | python | {
"resource": ""
} |
q259028 | Workflow.task_ids | validation | def task_ids(self):
'''
Get the task IDs of a running workflow
Args:
None
Returns:
List of task IDs
'''
if not self.id:
raise WorkflowError('Workflow is not running. Cannot get task IDs.')
if self.batch_values:
raise NotImplementedError("Query Each Workflow Id within the Batch Workflow for task IDs.")
wf = self.workflow.get(self.id)
return [task['id'] for task in wf['tasks']] | python | {
"resource": ""
} |
q259029 | Workflow.cancel | validation | def cancel(self):
'''
Cancel a running workflow.
Args:
None
Returns:
None
'''
if not self.id:
raise WorkflowError('Workflow is not running. Cannot cancel.')
if self.batch_values:
self.workflow.batch_workflow_cancel(self.id)
else:
self.workflow.cancel(self.id) | python | {
"resource": ""
} |
q259030 | Workflow.stdout | validation | def stdout(self):
''' Get stdout from all the tasks of a workflow.
Returns:
(list): tasks with their stdout
Example:
>>> workflow.stdout
[
{
"id": "4488895771403082552",
"taskType": "AOP_Strip_Processor",
"name": "Task1",
"stdout": "............"
}
]
'''
if not self.id:
raise WorkflowError('Workflow is not running. Cannot get stdout.')
if self.batch_values:
raise NotImplementedError("Query Each Workflow Id within the Batch Workflow for stdout.")
wf = self.workflow.get(self.id)
stdout_list = []
for task in wf['tasks']:
stdout_list.append(
{
'id': task['id'],
'taskType': task['taskType'],
'name': task['name'],
'stdout': self.workflow.get_stdout(self.id, task['id'])
}
)
return stdout_list | python | {
"resource": ""
} |
q259031 | Workflow.stderr | validation | def stderr(self):
'''Get stderr from all the tasks of a workflow.
Returns:
(list): tasks with their stderr
Example:
>>> workflow.stderr
[
{
"id": "4488895771403082552",
"taskType": "AOP_Strip_Processor",
"name": "Task1",
"stderr": "............"
}
]
'''
if not self.id:
raise WorkflowError('Workflow is not running. Cannot get stderr.')
if self.batch_values:
raise NotImplementedError("Query Each Workflow Id within the Batch Workflow for stderr.")
wf = self.workflow.get(self.id)
stderr_list = []
for task in wf['tasks']:
stderr_list.append(
{
'id': task['id'],
'taskType': task['taskType'],
'name': task['name'],
'stderr': self.workflow.get_stderr(self.id, task['id'])
}
)
return stderr_list | python | {
"resource": ""
} |
q259032 | VectorLayer.layers | validation | def layers(self):
""" Renders the list of layers to add to the map.
Returns:
layers (list): list of layer entries suitable for use in mapbox-gl 'map.addLayer()' call
"""
layers = [self._layer_def(style) for style in self.styles]
return layers | python | {
"resource": ""
} |
q259033 | get_proj | validation | def get_proj(prj_code):
"""
Helper method for handling projection codes that are unknown to pyproj
Args:
prj_code (str): an epsg proj code
Returns:
projection: a pyproj projection
"""
if prj_code in CUSTOM_PRJ:
proj = pyproj.Proj(CUSTOM_PRJ[prj_code])
else:
proj = pyproj.Proj(init=prj_code)
return proj | python | {
"resource": ""
} |
q259034 | preview | validation | def preview(image, **kwargs):
''' Show a slippy map preview of the image. Requires iPython.
Args:
image (image): image object to display
zoom (int): zoom level to intialize the map, default is 16
center (list): center coordinates to initialize the map, defaults to center of image
bands (list): bands of image to display, defaults to the image's default RGB bands
'''
try:
from IPython.display import Javascript, HTML, display
from gbdxtools.rda.interface import RDA
from gbdxtools import Interface
gbdx = Interface()
except:
print("IPython is required to produce maps.")
return
zoom = kwargs.get("zoom", 16)
bands = kwargs.get("bands")
if bands is None:
bands = image._rgb_bands
wgs84_bounds = kwargs.get("bounds", list(loads(image.metadata["image"]["imageBoundsWGS84"]).bounds))
center = kwargs.get("center", list(shape(image).centroid.bounds[0:2]))
if image.proj != 'EPSG:4326':
code = image.proj.split(':')[1]
conn = gbdx.gbdx_connection
proj_info = conn.get('https://ughlicoordinates.geobigdata.io/ughli/v1/projinfo/{}'.format(code)).json()
tfm = partial(pyproj.transform, pyproj.Proj(init='EPSG:4326'), pyproj.Proj(init=image.proj))
bounds = list(ops.transform(tfm, box(*wgs84_bounds)).bounds)
else:
proj_info = {}
bounds = wgs84_bounds
# Applying DRA to a DRA'ed image looks bad, skip if already in graph
if not image.options.get('dra'):
rda = RDA()
# Need some simple DRA to get the image in range for display.
dra = rda.HistogramDRA(image)
image = dra.aoi(bbox=image.bounds)
graph_id = image.rda_id
node_id = image.rda.graph()['nodes'][0]['id']
map_id = "map_{}".format(str(int(time.time())))
scales = ','.join(['1'] * len(bands))
offsets = ','.join(['0'] * len(bands))
display(HTML(Template('''
<div id="$map_id"/>
<link href='https://openlayers.org/en/v4.6.4/css/ol.css' rel='stylesheet' />
<script src="https://cdn.polyfill.io/v2/polyfill.min.js?features=requestAnimationFrame,Element.prototype.classList,URL"></script>
<style>body{margin:0;padding:0;}#$map_id{position:relative;top:0;bottom:0;width:100%;height:400px;}</style>
<style></style>
''').substitute({"map_id": map_id})))
js = Template("""
require.config({
paths: {
oljs: 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/4.6.4/ol',
proj4: 'https://cdnjs.cloudflare.com/ajax/libs/proj4js/2.4.4/proj4'
}
});
require(['oljs', 'proj4'], function(oljs, proj4) {
oljs.proj.setProj4(proj4)
var md = $md;
var georef = $georef;
var graphId = '$graphId';
var nodeId = '$nodeId';
var extents = $bounds;
var x1 = md.minTileX * md.tileXSize;
var y1 = ((md.minTileY + md.numYTiles) * md.tileYSize + md.tileYSize);
var x2 = ((md.minTileX + md.numXTiles) * md.tileXSize + md.tileXSize);
var y2 = md.minTileY * md.tileYSize;
var tileLayerResolutions = [georef.scaleX];
var url = '$url' + '/tile/';
url += graphId + '/' + nodeId;
url += "/{x}/{y}.png?token=$token&display_bands=$bands&display_scales=$scales&display_offsets=$offsets";
var proj = '$proj';
var projInfo = $projInfo;
if ( proj !== 'EPSG:4326' ) {
var proj4def = projInfo["proj4"];
proj4.defs(proj, proj4def);
var area = projInfo["area_of_use"];
var bbox = [area["area_west_bound_lon"], area["area_south_bound_lat"],
area["area_east_bound_lon"], area["area_north_bound_lat"]]
var projection = oljs.proj.get(proj);
var fromLonLat = oljs.proj.getTransform('EPSG:4326', projection);
var extent = oljs.extent.applyTransform(
[bbox[0], bbox[1], bbox[2], bbox[3]], fromLonLat);
projection.setExtent(extent);
} else {
var projection = oljs.proj.get(proj);
}
var rda = new oljs.layer.Tile({
title: 'RDA',
opacity: 1,
extent: extents,
source: new oljs.source.TileImage({
crossOrigin: null,
projection: projection,
extent: extents,
tileGrid: new oljs.tilegrid.TileGrid({
extent: extents,
origin: [extents[0], extents[3]],
resolutions: tileLayerResolutions,
tileSize: [md.tileXSize, md.tileYSize],
}),
tileUrlFunction: function (coordinate) {
if (coordinate === null) return undefined;
const x = coordinate[1] + md.minTileX;
const y = -(coordinate[2] + 1 - md.minTileY);
if (x < md.minTileX || x > md.maxTileX) return undefined;
if (y < md.minTileY || y > md.maxTileY) return undefined;
return url.replace('{x}', x).replace('{y}', y);
}
})
});
var map = new oljs.Map({
layers: [ rda ],
target: '$map_id',
view: new oljs.View({
projection: projection,
center: $center,
zoom: $zoom
})
});
});
""").substitute({
"map_id": map_id,
"proj": image.proj,
"projInfo": json.dumps(proj_info),
"graphId": graph_id,
"bounds": bounds,
"bands": ",".join(map(str, bands)),
"nodeId": node_id,
"md": json.dumps(image.metadata["image"]),
"georef": json.dumps(image.metadata["georef"]),
"center": center,
"zoom": zoom,
"token": gbdx.gbdx_connection.access_token,
"scales": scales,
"offsets": offsets,
"url": VIRTUAL_RDA_URL
})
display(Javascript(js)) | python | {
"resource": ""
} |
q259035 | TaskRegistry.list | validation | def list(self):
"""Lists available and visible GBDX tasks.
Returns:
List of tasks
"""
r = self.gbdx_connection.get(self._base_url)
raise_for_status(r)
return r.json()['tasks'] | python | {
"resource": ""
} |
q259036 | TaskRegistry.register | validation | def register(self, task_json=None, json_filename=None):
"""Registers a new GBDX task.
Args:
task_json (dict): Dictionary representing task definition.
json_filename (str): A full path of a file with json representing the task definition.
Only one out of task_json and json_filename should be provided.
Returns:
Response (str).
"""
if not task_json and not json_filename:
raise Exception("Both task json and filename can't be none.")
if task_json and json_filename:
raise Exception("Both task json and filename can't be provided.")
if json_filename:
task_json = json.load(open(json_filename, 'r'))
r = self.gbdx_connection.post(self._base_url, json=task_json)
raise_for_status(r)
return r.text | python | {
"resource": ""
} |
q259037 | TaskRegistry.get_definition | validation | def get_definition(self, task_name):
"""Gets definition of a registered GBDX task.
Args:
task_name (str): Task name.
Returns:
Dictionary representing the task definition.
"""
r = self.gbdx_connection.get(self._base_url + '/' + task_name)
raise_for_status(r)
return r.json() | python | {
"resource": ""
} |
q259038 | TaskRegistry.delete | validation | def delete(self, task_name):
"""Deletes a GBDX task.
Args:
task_name (str): Task name.
Returns:
Response (str).
"""
r = self.gbdx_connection.delete(self._base_url + '/' + task_name)
raise_for_status(r)
return r.text | python | {
"resource": ""
} |
q259039 | TaskRegistry.update | validation | def update(self, task_name, task_json):
"""Updates a GBDX task.
Args:
task_name (str): Task name.
task_json (dict): Dictionary representing updated task definition.
Returns:
Dictionary representing the updated task definition.
"""
r = self.gbdx_connection.put(self._base_url + '/' + task_name, json=task_json)
raise_for_status(r)
return r.json() | python | {
"resource": ""
} |
q259040 | to_geotiff | validation | def to_geotiff(arr, path='./output.tif', proj=None, spec=None, bands=None, **kwargs):
''' Write out a geotiff file of the image
Args:
path (str): path to write the geotiff file to, default is ./output.tif
proj (str): EPSG string of projection to reproject to
spec (str): if set to 'rgb', write out color-balanced 8-bit RGB tif
bands (list): list of bands to export. If spec='rgb' will default to RGB bands
Returns:
str: path the geotiff was written to'''
assert has_rasterio, "To create geotiff images please install rasterio"
try:
img_md = arr.rda.metadata["image"]
x_size = img_md["tileXSize"]
y_size = img_md["tileYSize"]
except (AttributeError, KeyError):
x_size = kwargs.get("chunk_size", 256)
y_size = kwargs.get("chunk_size", 256)
try:
tfm = kwargs['transform'] if 'transform' in kwargs else arr.affine
except:
tfm = None
dtype = arr.dtype.name if arr.dtype.name != 'int8' else 'uint8'
if spec is not None and spec.lower() == 'rgb':
if bands is None:
bands = arr._rgb_bands
# skip if already DRA'ed
if not arr.options.get('dra'):
# add the RDA HistogramDRA op to get a RGB 8-bit image
from gbdxtools.rda.interface import RDA
rda = RDA()
dra = rda.HistogramDRA(arr)
# Reset the bounds and select the bands on the new Dask
arr = dra.aoi(bbox=arr.bounds)
arr = arr[bands,...].astype(np.uint8)
dtype = 'uint8'
else:
if bands is not None:
arr = arr[bands,...]
meta = {
'width': arr.shape[2],
'height': arr.shape[1],
'count': arr.shape[0],
'dtype': dtype,
'driver': 'GTiff',
'transform': tfm
}
if proj is not None:
meta["crs"] = {'init': proj}
if "tiled" in kwargs and kwargs["tiled"]:
meta.update(blockxsize=x_size, blockysize=y_size, tiled="yes")
with rasterio.open(path, "w", **meta) as dst:
writer = rio_writer(dst)
result = store(arr, writer, compute=False)
result.compute(scheduler=threaded_get)
return path | python | {
"resource": ""
} |
q259041 | Recipe.ingest_vectors | validation | def ingest_vectors(self, output_port_value):
''' append two required tasks to the given output to ingest to VS
'''
# append two tasks to self['definition']['tasks']
ingest_task = Task('IngestItemJsonToVectorServices')
ingest_task.inputs.items = output_port_value
ingest_task.impersonation_allowed = True
stage_task = Task('StageDataToS3')
stage_task.inputs.destination = 's3://{vector_ingest_bucket}/{recipe_id}/{run_id}/{task_name}'
stage_task.inputs.data = ingest_task.outputs.result.value
self.definition['tasks'].append(ingest_task.generate_task_workflow_json())
self.definition['tasks'].append(stage_task.generate_task_workflow_json()) | python | {
"resource": ""
} |
q259042 | Recipe.get | validation | def get(self, recipe_id):
'''
Retrieves an AnswerFactory Recipe by id
Args:
recipe_id The id of the recipe
Returns:
A JSON representation of the recipe
'''
self.logger.debug('Retrieving recipe by id: ' + recipe_id)
url = '%(base_url)s/recipe/%(recipe_id)s' % {
'base_url': self.base_url, 'recipe_id': recipe_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json() | python | {
"resource": ""
} |
q259043 | Recipe.save | validation | def save(self, recipe):
'''
Saves an AnswerFactory Recipe
Args:
recipe (dict): Dictionary specifying a recipe
Returns:
AnswerFactory Recipe id
'''
# test if this is a create vs. an update
if 'id' in recipe and recipe['id'] is not None:
# update -> use put op
self.logger.debug("Updating existing recipe: " + json.dumps(recipe))
url = '%(base_url)s/recipe/json/%(recipe_id)s' % {
'base_url': self.base_url, 'recipe_id': recipe['id']
}
r = self.gbdx_connection.put(url, json=recipe)
try:
r.raise_for_status()
except:
print(r.text)
raise
return recipe['id']
else:
# create -> use post op
self.logger.debug("Creating new recipe: " + json.dumps(recipe))
url = '%(base_url)s/recipe/json' % {
'base_url': self.base_url
}
r = self.gbdx_connection.post(url, json=recipe)
try:
r.raise_for_status()
except:
print(r.text)
raise
recipe_json = r.json()
return recipe_json['id'] | python | {
"resource": ""
} |
q259044 | Project.save | validation | def save(self, project):
'''
Saves an AnswerFactory Project
Args:
project (dict): Dictionary specifying an AnswerFactory Project.
Returns:
AnswerFactory Project id
'''
# test if this is a create vs. an update
if 'id' in project and project['id'] is not None:
# update -> use put op
self.logger.debug('Updating existing project: ' + json.dumps(project))
url = '%(base_url)s/%(project_id)s' % {
'base_url': self.base_url, 'project_id': project['id']
}
r = self.gbdx_connection.put(url, json=project)
try:
r.raise_for_status()
except:
print(r.text)
raise
# updates only get the Accepted response -> return the original project id
return project['id']
else:
self.logger.debug('Creating new project: ' + json.dumps(project))
# create -> use post op
url = self.base_url
r = self.gbdx_connection.post(url, json=project)
try:
r.raise_for_status()
except:
print(r.text)
raise
project_json = r.json()
# create returns the saved project -> return the project id that's saved
return project_json['id'] | python | {
"resource": ""
} |
q259045 | Project.delete | validation | def delete(self, project_id):
'''
Deletes a project by id
Args:
project_id: The project id to delete
Returns:
Nothing
'''
self.logger.debug('Deleting project by id: ' + project_id)
url = '%(base_url)s/%(project_id)s' % {
'base_url': self.base_url, 'project_id': project_id
}
r = self.gbdx_connection.delete(url)
r.raise_for_status() | python | {
"resource": ""
} |
q259046 | LineStyle.paint | validation | def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl line paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
# TODO Figure out why i cant use some of these props
snippet = {
'line-opacity': VectorStyle.get_style_value(self.opacity),
'line-color': VectorStyle.get_style_value(self.color),
#'line-cap': self.cap,
#'line-join': self.join,
'line-width': VectorStyle.get_style_value(self.width),
#'line-gap-width': self.gap_width,
#'line-blur': self.blur,
}
if self.translate:
snippet['line-translate'] = self.translate
if self.dasharray:
snippet['line-dasharray'] = VectorStyle.get_style_value(self.dasharray)
return snippet | python | {
"resource": ""
} |
q259047 | FillStyle.paint | validation | def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl fill paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
snippet = {
'fill-opacity': VectorStyle.get_style_value(self.opacity),
'fill-color': VectorStyle.get_style_value(self.color),
'fill-outline-color': VectorStyle.get_style_value(self.outline_color)
}
if self.translate:
snippet['fill-translate'] = self.translate
return snippet | python | {
"resource": ""
} |
q259048 | FillExtrusionStyle.paint | validation | def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
snippet = {
'fill-extrusion-opacity': VectorStyle.get_style_value(self.opacity),
'fill-extrusion-color': VectorStyle.get_style_value(self.color),
'fill-extrusion-base': VectorStyle.get_style_value(self.base),
'fill-extrusion-height': VectorStyle.get_style_value(self.height)
}
if self.translate:
snippet['fill-extrusion-translate'] = self.translate
return snippet | python | {
"resource": ""
} |
q259049 | HeatmapStyle.paint | validation | def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl heatmap paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
snippet = {
'heatmap-radius': VectorStyle.get_style_value(self.radius),
'heatmap-opacity': VectorStyle.get_style_value(self.opacity),
'heatmap-color': VectorStyle.get_style_value(self.color),
'heatmap-intensity': VectorStyle.get_style_value(self.intensity),
'heatmap-weight': VectorStyle.get_style_value(self.weight)
}
return snippet | python | {
"resource": ""
} |
q259050 | Vectors.create | validation | def create(self,vectors):
""" Create a vectors in the vector service.
Args:
vectors: A single geojson vector or a list of geojson vectors. Item_type and ingest_source are required.
Returns:
(list): IDs of the vectors created
Example:
>>> vectors.create(
... {
... "type": "Feature",
... "geometry": {
... "type": "Point",
... "coordinates": [1.0,1.0]
... },
... "properties": {
... "text" : "item text",
... "name" : "item name",
... "item_type" : "type",
... "ingest_source" : "source",
... "attributes" : {
... "latitude" : 1,
... "institute_founded" : "2015-07-17",
... "mascot" : "moth"
... }
... }
... }
... )
"""
if type(vectors) is dict:
vectors = [vectors]
# validate they all have item_type and ingest_source in properties
for vector in vectors:
if not 'properties' in list(vector.keys()):
raise Exception('Vector does not contain "properties" field.')
if not 'item_type' in list(vector['properties'].keys()):
raise Exception('Vector does not contain "item_type".')
if not 'ingest_source' in list(vector['properties'].keys()):
raise Exception('Vector does not contain "ingest_source".')
r = self.gbdx_connection.post(self.create_url, data=json.dumps(vectors))
r.raise_for_status()
return r.json() | python | {
"resource": ""
} |
q259051 | Vectors.create_from_wkt | validation | def create_from_wkt(self, wkt, item_type, ingest_source, **attributes):
'''
Create a single vector in the vector service
Args:
wkt (str): wkt representation of the geometry
item_type (str): item_type of the vector
ingest_source (str): source of the vector
attributes: a set of key-value pairs of attributes
Returns:
id (str): string identifier of the vector created
'''
# verify the "depth" of the attributes is single layer
geojson = load_wkt(wkt).__geo_interface__
vector = {
'type': "Feature",
'geometry': geojson,
'properties': {
'item_type': item_type,
'ingest_source': ingest_source,
'attributes': attributes
}
}
return self.create(vector)[0] | python | {
"resource": ""
} |
q259052 | Vectors.get | validation | def get(self, ID, index='vector-web-s'):
'''Retrieves a vector. Not usually necessary because searching is the best way to find & get stuff.
Args:
ID (str): ID of the vector object
index (str): Optional. Index the object lives in. defaults to 'vector-web-s'
Returns:
record (dict): A dict object identical to the json representation of the catalog record
'''
url = self.get_url % index
r = self.gbdx_connection.get(url + ID)
r.raise_for_status()
return r.json() | python | {
"resource": ""
} |
q259053 | Vectors.aggregate_query | validation | def aggregate_query(self, searchAreaWkt, agg_def, query=None, start_date=None, end_date=None, count=10, index=default_index):
"""Aggregates results of a query into buckets defined by the 'agg_def' parameter. The aggregations are
represented by dicts containing a 'name' key and a 'terms' key holding a list of the aggregation buckets.
Each bucket element is a dict containing a 'term' key containing the term used for this bucket, a 'count' key
containing the count of items that match this bucket, and an 'aggregations' key containing any child
aggregations.
Args:
searchAreaWkt (str): wkt representation of the geometry
agg_def (str or AggregationDef): the aggregation definitions
query (str): a valid Elasticsearch query string to constrain the items going into the aggregation
start_date (str): either an ISO-8601 date string or a 'now' expression (e.g. "now-6d" or just "now")
end_date (str): either an ISO-8601 date string or a 'now' expression (e.g. "now-6d" or just "now")
count (int): the number of buckets to include in the aggregations (the top N will be returned)
index (str): the index (or alias or wildcard index expression) to run aggregations against, set to None for the entire set of vector indexes
Returns:
results (list): A (usually single-element) list of dict objects containing the aggregation results.
"""
geojson = load_wkt(searchAreaWkt).__geo_interface__
aggs_str = str(agg_def) # could be string or AggregationDef
params = {
"count": count,
"aggs": aggs_str
}
if query:
params['query'] = query
if start_date:
params['start_date'] = start_date
if end_date:
params['end_date'] = end_date
url = self.aggregations_by_index_url % index if index else self.aggregations_url
r = self.gbdx_connection.post(url, params=params, json=geojson)
r.raise_for_status()
return r.json(object_pairs_hook=OrderedDict)['aggregations'] | python | {
"resource": ""
} |
q259054 | Vectors.tilemap | validation | def tilemap(self, query, styles={}, bbox=[-180,-90,180,90], zoom=16,
api_key=os.environ.get('MAPBOX_API_KEY', None),
image=None, image_bounds=None,
index="vector-user-provided", name="GBDX_Task_Output", **kwargs):
"""
Renders a mapbox gl map from a vector service query
"""
try:
from IPython.display import display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a token or set the MAPBOX_API_KEY environment variable."
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=index)
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
url = 'https://vector.geobigdata.io/insight-vector/api/mvt/{z}/{x}/{y}?';
url += 'q={}&index={}'.format(query, index);
if styles is not None and not isinstance(styles, list):
styles = [styles]
map_id = "map_{}".format(str(int(time.time())))
map_data = VectorTileLayer(url, source_name=name, styles=styles, **kwargs)
image_layer = self._build_image_layer(image, image_bounds)
template = BaseTemplate(map_id, **{
"lat": lat,
"lon": lon,
"zoom": zoom,
"datasource": json.dumps(map_data.datasource),
"layers": json.dumps(map_data.layers),
"image_layer": image_layer,
"mbkey": api_key,
"token": self.gbdx_connection.access_token
})
template.inject() | python | {
"resource": ""
} |
q259055 | Vectors.map | validation | def map(self, features=None, query=None, styles=None,
bbox=[-180,-90,180,90], zoom=10, center=None,
image=None, image_bounds=None, cmap='viridis',
api_key=os.environ.get('MAPBOX_API_KEY', None), **kwargs):
"""
Renders a mapbox gl map from a vector service query or a list of geojson features
Args:
features (list): a list of geojson features
query (str): a VectorServices query
styles (list): a list of VectorStyles to apply to the features
bbox (list): a bounding box to query for features ([minx, miny, maxx, maxy])
zoom (int): the initial zoom level of the map
center (list): a list of [lat, lon] used to center the map
api_key (str): a valid Mapbox API key
image (dict): a CatalogImage or a ndarray
image_bounds (list): a list of bounds for image positioning
Use outside of GBDX Notebooks requires a MapBox API key, sign up for free at https://www.mapbox.com/pricing/
Pass the key using the `api_key` keyword or set an environmental variable called `MAPBOX API KEY`
cmap (str): MatPlotLib colormap to use for rendering single band images (default: viridis)
"""
try:
from IPython.display import display
except:
print("IPython is required to produce maps.")
return
assert api_key is not None, "No Mapbox API Key found. You can either pass in a key or set the MAPBOX_API_KEY environment variable. Use outside of GBDX Notebooks requires a MapBox API key, sign up for free at https://www.mapbox.com/pricing/"
if features is None and query is not None:
wkt = box(*bbox).wkt
features = self.query(wkt, query, index=None)
elif features is None and query is None and image is None:
print('Must provide either a list of features or a query or an image')
return
if styles is not None and not isinstance(styles, list):
styles = [styles]
geojson = {"type":"FeatureCollection", "features": features}
if center is None and features is not None:
union = cascaded_union([shape(f['geometry']) for f in features])
lon, lat = union.centroid.coords[0]
elif center is None and image is not None:
try:
lon, lat = shape(image).centroid.coords[0]
except:
lon, lat = box(*image_bounds).centroid.coords[0]
else:
lat, lon = center
map_id = "map_{}".format(str(int(time.time())))
map_data = VectorGeojsonLayer(geojson, styles=styles, **kwargs)
image_layer = self._build_image_layer(image, image_bounds, cmap)
template = BaseTemplate(map_id, **{
"lat": lat,
"lon": lon,
"zoom": zoom,
"datasource": json.dumps(map_data.datasource),
"layers": json.dumps(map_data.layers),
"image_layer": image_layer,
"mbkey": api_key,
"token": 'dummy'
})
template.inject() | python | {
"resource": ""
} |
q259056 | DaskImage.read | validation | def read(self, bands=None, **kwargs):
"""Reads data from a dask array and returns the computed ndarray matching the given bands
Args:
bands (list): band indices to read from the image. Returns bands in the order specified in the list of bands.
Returns:
ndarray: a numpy array of image data
"""
arr = self
if bands is not None:
arr = self[bands, ...]
return arr.compute(scheduler=threaded_get) | python | {
"resource": ""
} |
q259057 | DaskImage.randwindow | validation | def randwindow(self, window_shape):
"""Get a random window of a given shape from within an image
Args:
window_shape (tuple): The desired shape of the returned image as (height, width) in pixels.
Returns:
image: a new image object of the specified shape and same type
"""
row = random.randrange(window_shape[0], self.shape[1])
col = random.randrange(window_shape[1], self.shape[2])
return self[:, row-window_shape[0]:row, col-window_shape[1]:col] | python | {
"resource": ""
} |
q259058 | DaskImage.iterwindows | validation | def iterwindows(self, count=64, window_shape=(256, 256)):
""" Iterate over random windows of an image
Args:
count (int): the number of the windows to generate. Defaults to 64, if `None` will continue to iterate over random windows until stopped.
window_shape (tuple): The desired shape of each image as (height, width) in pixels.
Yields:
image: an image of the given shape and same type.
"""
if count is None:
while True:
yield self.randwindow(window_shape)
else:
for i in xrange(count):
yield self.randwindow(window_shape) | python | {
"resource": ""
} |
q259059 | DaskImage.window_at | validation | def window_at(self, geom, window_shape):
"""Return a subsetted window of a given size, centered on a geometry object
Useful for generating training sets from vector training data
Will throw a ValueError if the window is not within the image bounds
Args:
geom (shapely,geometry): Geometry to center the image on
window_shape (tuple): The desired shape of the image as (height, width) in pixels.
Returns:
image: image object of same type
"""
# Centroids of the input geometry may not be centered on the object.
# For a covering image we use the bounds instead.
# This is also a workaround for issue 387.
y_size, x_size = window_shape[0], window_shape[1]
bounds = box(*geom.bounds)
px = ops.transform(self.__geo_transform__.rev, bounds).centroid
miny, maxy = int(px.y - y_size/2), int(px.y + y_size/2)
minx, maxx = int(px.x - x_size/2), int(px.x + x_size/2)
_, y_max, x_max = self.shape
if minx < 0 or miny < 0 or maxx > x_max or maxy > y_max:
raise ValueError("Input geometry resulted in a window outside of the image")
return self[:, miny:maxy, minx:maxx] | python | {
"resource": ""
} |
q259060 | DaskImage.window_cover | validation | def window_cover(self, window_shape, pad=True):
""" Iterate over a grid of windows of a specified shape covering an image.
The image is divided into a grid of tiles of size window_shape. Each iteration returns
the next window.
Args:
window_shape (tuple): The desired shape of each image as (height,
width) in pixels.
pad: (bool): Whether or not to pad edge cells. If False, cells that do not
have the desired shape will not be returned. Defaults to True.
Yields:
image: image object of same type.
"""
size_y, size_x = window_shape[0], window_shape[1]
_ndepth, _nheight, _nwidth = self.shape
nheight, _m = divmod(_nheight, size_y)
nwidth, _n = divmod(_nwidth, size_x)
img = self
if pad is True:
new_height, new_width = _nheight, _nwidth
if _m != 0:
new_height = (nheight + 1) * size_y
if _n != 0:
new_width = (nwidth + 1) * size_x
if (new_height, new_width) != (_nheight, _nwidth):
bounds = box(0, 0, new_width, new_height)
geom = ops.transform(self.__geo_transform__.fwd, bounds)
img = self[geom]
row_lims = range(0, img.shape[1], size_y)
col_lims = range(0, img.shape[2], size_x)
for maxy, maxx in product(row_lims, col_lims):
reg = img[:, maxy:(maxy + size_y), maxx:(maxx + size_x)]
if pad is False:
if reg.shape[1:] == window_shape:
yield reg
else:
yield reg | python | {
"resource": ""
} |
q259061 | GeoDaskImage.aoi | validation | def aoi(self, **kwargs):
""" Subsets the Image by the given bounds
Args:
bbox (list): optional. A bounding box array [minx, miny, maxx, maxy]
wkt (str): optional. A WKT geometry string
geojson (str): optional. A GeoJSON geometry dictionary
Returns:
image: an image instance of the same type
"""
g = self._parse_geoms(**kwargs)
if g is None:
return self
else:
return self[g] | python | {
"resource": ""
} |
q259062 | GeoDaskImage.pxbounds | validation | def pxbounds(self, geom, clip=False):
""" Returns the bounds of a geometry object in pixel coordinates
Args:
geom: Shapely geometry object or GeoJSON as Python dictionary or WKT string
clip (bool): Clip the bounds to the min/max extent of the image
Returns:
list: bounds in pixels [min x, min y, max x, max y] clipped to image bounds
"""
try:
if isinstance(geom, dict):
if 'geometry' in geom:
geom = shape(geom['geometry'])
else:
geom = shape(geom)
elif isinstance(geom, BaseGeometry):
geom = shape(geom)
else:
geom = wkt.loads(geom)
except:
raise TypeError ("Invalid geometry object")
# if geometry doesn't overlap the image, return an error
if geom.disjoint(shape(self)):
raise ValueError("Geometry outside of image bounds")
# clip to pixels within the image
(xmin, ymin, xmax, ymax) = ops.transform(self.__geo_transform__.rev, geom).bounds
_nbands, ysize, xsize = self.shape
if clip:
xmin = max(xmin, 0)
ymin = max(ymin, 0)
xmax = min(xmax, xsize)
ymax = min(ymax, ysize)
return (xmin, ymin, xmax, ymax) | python | {
"resource": ""
} |
q259063 | GeoDaskImage.geotiff | validation | def geotiff(self, **kwargs):
""" Creates a geotiff on the filesystem
Args:
path (str): optional, path to write the geotiff file to, default is ./output.tif
proj (str): optional, EPSG string of projection to reproject to
spec (str): optional, if set to 'rgb', write out color-balanced 8-bit RGB tif
bands (list): optional, list of bands to export. If spec='rgb' will default to RGB bands,
otherwise will export all bands
Returns:
str: path the geotiff was written to """
if 'proj' not in kwargs:
kwargs['proj'] = self.proj
return to_geotiff(self, **kwargs) | python | {
"resource": ""
} |
q259064 | GeoDaskImage._parse_geoms | validation | def _parse_geoms(self, **kwargs):
""" Finds supported geometry types, parses them and returns the bbox """
bbox = kwargs.get('bbox', None)
wkt_geom = kwargs.get('wkt', None)
geojson = kwargs.get('geojson', None)
if bbox is not None:
g = box(*bbox)
elif wkt_geom is not None:
g = wkt.loads(wkt_geom)
elif geojson is not None:
g = shape(geojson)
else:
return None
if self.proj is None:
return g
else:
return self._reproject(g, from_proj=kwargs.get('from_proj', 'EPSG:4326')) | python | {
"resource": ""
} |
q259065 | TmsMeta._tile_coords | validation | def _tile_coords(self, bounds):
""" convert mercator bbox to tile index limits """
tfm = partial(pyproj.transform,
pyproj.Proj(init="epsg:3857"),
pyproj.Proj(init="epsg:4326"))
bounds = ops.transform(tfm, box(*bounds)).bounds
# because tiles have a common corner, the tiles that cover a
# given tile includes the adjacent neighbors.
# https://github.com/mapbox/mercantile/issues/84#issuecomment-413113791
west, south, east, north = bounds
epsilon = 1.0e-10
if east != west and north != south:
# 2D bbox
# shrink the bounds a small amount so that
# shapes/tiles round trip.
west += epsilon
south += epsilon
east -= epsilon
north -= epsilon
params = [west, south, east, north, [self.zoom_level]]
tile_coords = [(tile.x, tile.y) for tile in mercantile.tiles(*params)]
xtiles, ytiles = zip(*tile_coords)
minx = min(xtiles)
miny = min(ytiles)
maxx = max(xtiles)
maxy = max(ytiles)
return minx, miny, maxx, maxy | python | {
"resource": ""
} |
q259066 | Workflow.launch | validation | def launch(self, workflow):
"""Launches GBDX workflow.
Args:
workflow (dict): Dictionary specifying workflow tasks.
Returns:
Workflow id (str).
"""
# hit workflow api
try:
r = self.gbdx_connection.post(self.workflows_url, json=workflow)
try:
r.raise_for_status()
except:
print("GBDX API Status Code: %s" % r.status_code)
print("GBDX API Response: %s" % r.text)
r.raise_for_status()
workflow_id = r.json()['id']
return workflow_id
except TypeError:
self.logger.debug('Workflow not launched!') | python | {
"resource": ""
} |
q259067 | Workflow.status | validation | def status(self, workflow_id):
"""Checks workflow status.
Args:
workflow_id (str): Workflow id.
Returns:
Workflow status (str).
"""
self.logger.debug('Get status of workflow: ' + workflow_id)
url = '%(wf_url)s/%(wf_id)s' % {
'wf_url': self.workflows_url, 'wf_id': workflow_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json()['state'] | python | {
"resource": ""
} |
q259068 | Workflow.get_stdout | validation | def get_stdout(self, workflow_id, task_id):
"""Get stdout for a particular task.
Args:
workflow_id (str): Workflow id.
task_id (str): Task id.
Returns:
Stdout of the task (string).
"""
url = '%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % {
'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.text | python | {
"resource": ""
} |
q259069 | Workflow.cancel | validation | def cancel(self, workflow_id):
"""Cancels a running workflow.
Args:
workflow_id (str): Workflow id.
Returns:
Nothing
"""
self.logger.debug('Canceling workflow: ' + workflow_id)
url = '%(wf_url)s/%(wf_id)s/cancel' % {
'wf_url': self.workflows_url, 'wf_id': workflow_id
}
r = self.gbdx_connection.post(url, data='')
r.raise_for_status() | python | {
"resource": ""
} |
q259070 | Workflow.launch_batch_workflow | validation | def launch_batch_workflow(self, batch_workflow):
"""Launches GBDX batch workflow.
Args:
batch_workflow (dict): Dictionary specifying batch workflow tasks.
Returns:
Batch Workflow id (str).
"""
# hit workflow api
url = '%(base_url)s/batch_workflows' % {
'base_url': self.base_url
}
try:
r = self.gbdx_connection.post(url, json=batch_workflow)
batch_workflow_id = r.json()['batch_workflow_id']
return batch_workflow_id
except TypeError as e:
self.logger.debug('Batch Workflow not launched, reason: {0}'.format(e)) | python | {
"resource": ""
} |
q259071 | Workflow.batch_workflow_status | validation | def batch_workflow_status(self, batch_workflow_id):
"""Checks GBDX batch workflow status.
Args:
batch workflow_id (str): Batch workflow id.
Returns:
Batch Workflow status (str).
"""
self.logger.debug('Get status of batch workflow: ' + batch_workflow_id)
url = '%(base_url)s/batch_workflows/%(batch_id)s' % {
'base_url': self.base_url, 'batch_id': batch_workflow_id
}
r = self.gbdx_connection.get(url)
return r.json() | python | {
"resource": ""
} |
q259072 | Ordering.order | validation | def order(self, image_catalog_ids, batch_size=100, callback=None):
'''Orders images from GBDX.
Args:
image_catalog_ids (str or list): A single catalog id or a list of
catalog ids.
batch_size (int): The image_catalog_ids will be split into
batches of batch_size. The ordering API max
batch size is 100, if batch_size is greater
than 100 it will be truncated.
callback (str): A url to call when ordering is completed.
Returns:
order_ids (str or list): If one batch, returns a string. If more
than one batch, returns a list of order ids,
one for each batch.
'''
def _order_single_batch(url_, ids, results_list):
data = json.dumps(ids) if callback is None else json.dumps({"acquisitionIds": ids, "callback": callback})
r = self.gbdx_connection.post(url_, data=data)
r.raise_for_status()
order_id = r.json().get("order_id")
if order_id:
results_list.append(order_id)
self.logger.debug('Place order')
url = ('%s/order' if callback is None else '%s/ordercb') % self.base_url
batch_size = min(100, batch_size)
if not isinstance(image_catalog_ids, list):
image_catalog_ids = [image_catalog_ids]
sanitized_ids = list(set((id for id in (_id.strip() for _id in image_catalog_ids) if id)))
res = []
# Use itertool batch recipe
acq_ids_by_batch = zip(*([iter(sanitized_ids)] * batch_size))
for ids_batch in acq_ids_by_batch:
_order_single_batch(url, ids_batch, res)
# Order reminder
remain_count = len(sanitized_ids) % batch_size
if remain_count > 0:
_order_single_batch(url, sanitized_ids[-remain_count:], res)
if len(res) == 1:
return res[0]
elif len(res)>1:
return res | python | {
"resource": ""
} |
q259073 | Ordering.status | validation | def status(self, order_id):
'''Checks imagery order status. There can be more than one image per
order and this function returns the status of all images
within the order.
Args:
order_id (str): The id of the order placed.
Returns:
List of dictionaries, one per image. Each dictionary consists
of the keys 'acquisition_id', 'location' and 'state'.
'''
self.logger.debug('Get status of order ' + order_id)
url = '%(base_url)s/order/%(order_id)s' % {
'base_url': self.base_url, 'order_id': order_id
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json().get("acquisitions", {}) | python | {
"resource": ""
} |
q259074 | Ordering.heartbeat | validation | def heartbeat(self):
'''
Check the heartbeat of the ordering API
Args: None
Returns: True or False
'''
url = '%s/heartbeat' % self.base_url
# Auth is not required to hit the heartbeat
r = requests.get(url)
try:
return r.json() == "ok"
except:
return False | python | {
"resource": ""
} |
q259075 | Catalog.get | validation | def get(self, catID, includeRelationships=False):
'''Retrieves the strip footprint WKT string given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
includeRelationships (bool): whether to include graph links to related objects. Default False.
Returns:
record (dict): A dict object identical to the json representation of the catalog record
'''
url = '%(base_url)s/record/%(catID)s' % {
'base_url': self.base_url, 'catID': catID
}
r = self.gbdx_connection.get(url)
r.raise_for_status()
return r.json() | python | {
"resource": ""
} |
q259076 | Catalog.get_strip_metadata | validation | def get_strip_metadata(self, catID):
'''Retrieves the strip catalog metadata given a cat ID.
Args:
catID (str): The source catalog ID from the platform catalog.
Returns:
metadata (dict): A metadata dictionary .
TODO: have this return a class object with interesting information exposed.
'''
self.logger.debug('Retrieving strip catalog metadata')
url = '%(base_url)s/record/%(catID)s?includeRelationships=false' % {
'base_url': self.base_url, 'catID': catID
}
r = self.gbdx_connection.get(url)
if r.status_code == 200:
return r.json()['properties']
elif r.status_code == 404:
self.logger.debug('Strip not found: %s' % catID)
r.raise_for_status()
else:
self.logger.debug('There was a problem retrieving catid: %s' % catID)
r.raise_for_status() | python | {
"resource": ""
} |
q259077 | Catalog.get_address_coords | validation | def get_address_coords(self, address):
''' Use the google geocoder to get latitude and longitude for an address string
Args:
address: any address string
Returns:
A tuple of (lat,lng)
'''
url = "https://maps.googleapis.com/maps/api/geocode/json?&address=" + address
r = requests.get(url)
r.raise_for_status()
results = r.json()['results']
lat = results[0]['geometry']['location']['lat']
lng = results[0]['geometry']['location']['lng']
return lat, lng | python | {
"resource": ""
} |
q259078 | Catalog.search_address | validation | def search_address(self, address, filters=None, startDate=None, endDate=None, types=None):
''' Perform a catalog search over an address string
Args:
address: any address string
filters: Array of filters. Optional. Example:
[
"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')",
"cloudCover < 10",
"offNadirAngle < 10"
]
startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
types: Array of types to search for. Optional. Example (and default): ["Acquisition"]
Returns:
catalog search resultset
'''
lat, lng = self.get_address_coords(address)
return self.search_point(lat,lng, filters=filters, startDate=startDate, endDate=endDate, types=types) | python | {
"resource": ""
} |
q259079 | Catalog.search_point | validation | def search_point(self, lat, lng, filters=None, startDate=None, endDate=None, types=None, type=None):
''' Perform a catalog search over a specific point, specified by lat,lng
Args:
lat: latitude
lng: longitude
filters: Array of filters. Optional. Example:
[
"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')",
"cloudCover < 10",
"offNadirAngle < 10"
]
startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
types: Array of types to search for. Optional. Example (and default): ["Acquisition"]
Returns:
catalog search resultset
'''
searchAreaWkt = "POLYGON ((%s %s, %s %s, %s %s, %s %s, %s %s))" % (lng, lat,lng,lat,lng,lat,lng,lat,lng,lat)
return self.search(searchAreaWkt=searchAreaWkt, filters=filters, startDate=startDate, endDate=endDate, types=types) | python | {
"resource": ""
} |
q259080 | Catalog.get_data_location | validation | def get_data_location(self, catalog_id):
"""
Find and return the S3 data location given a catalog_id.
Args:
catalog_id: The catalog ID
Returns:
A string containing the s3 location of the data associated with a catalog ID. Returns
None if the catalog ID is not found, or if there is no data yet associated with it.
"""
try:
record = self.get(catalog_id)
except:
return None
# Handle Landsat8
if 'Landsat8' in record['type'] and 'LandsatAcquisition' in record['type']:
bucket = record['properties']['bucketName']
prefix = record['properties']['bucketPrefix']
return 's3://' + bucket + '/' + prefix
# Handle DG Acquisition
if 'DigitalGlobeAcquisition' in record['type']:
o = Ordering()
res = o.location([catalog_id])
return res['acquisitions'][0]['location']
return None | python | {
"resource": ""
} |
q259081 | Catalog.search | validation | def search(self, searchAreaWkt=None, filters=None, startDate=None, endDate=None, types=None):
''' Perform a catalog search
Args:
searchAreaWkt: WKT Polygon of area to search. Optional.
filters: Array of filters. Optional. Example:
[
"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')",
"cloudCover < 10",
"offNadirAngle < 10"
]
startDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
endDate: string. Optional. Example: "2004-01-01T00:00:00.000Z"
types: Array of types to search for. Optional. Example (and default): ["Acquisition"]
Returns:
catalog search resultset
'''
# Default to search for Acquisition type objects.
if not types:
types = ['Acquisition']
# validation: we must have either a WKT or one-week of time window
if startDate:
startDateTime = datetime.datetime.strptime(startDate, '%Y-%m-%dT%H:%M:%S.%fZ')
if endDate:
endDateTime = datetime.datetime.strptime(endDate, '%Y-%m-%dT%H:%M:%S.%fZ')
if startDate and endDate:
diff = endDateTime - startDateTime
if diff.days < 0:
raise Exception("startDate must come before endDate.")
postdata = {
"searchAreaWkt": searchAreaWkt,
"types": types,
"startDate": startDate,
"endDate": endDate,
}
if filters:
postdata['filters'] = filters
if searchAreaWkt:
postdata['searchAreaWkt'] = searchAreaWkt
url = '%(base_url)s/search' % {
'base_url': self.base_url
}
headers = {'Content-Type':'application/json'}
r = self.gbdx_connection.post(url, headers=headers, data=json.dumps(postdata))
r.raise_for_status()
results = r.json()['results']
return results | python | {
"resource": ""
} |
q259082 | Catalog.get_most_recent_images | validation | def get_most_recent_images(self, results, types=[], sensors=[], N=1):
''' Return the most recent image
Args:
results: a catalog resultset, as returned from a search
types: array of types you want. optional.
sensors: array of sensornames. optional.
N: number of recent images to return. defaults to 1.
Returns:
single catalog item, or none if not found
'''
if not len(results):
return None
# filter on type
if types:
results = [r for r in results if r['type'] in types]
# filter on sensor
if sensors:
results = [r for r in results if r['properties'].get('sensorPlatformName') in sensors]
# sort by date:
#sorted(results, key=results.__getitem__('properties').get('timestamp'))
newlist = sorted(results, key=lambda k: k['properties'].get('timestamp'), reverse=True)
return newlist[:N] | python | {
"resource": ""
} |
q259083 | BaseView.use | validation | def use(cls, name, method: [str, Set, List], url=None):
""" interface helper function"""
if not isinstance(method, (str, list, set, tuple)):
raise BaseException('Invalid type of method: %s' % type(method).__name__)
if isinstance(method, str):
method = {method}
# TODO: check methods available
cls._interface[name] = [{'method': method, 'url': url}] | python | {
"resource": ""
} |
q259084 | validate | validation | def validate(method):
"""
Config option name value validator decorator.
"""
# Name error template
name_error = 'configuration option "{}" is not supported'
@functools.wraps(method)
def validator(self, name, *args):
if name not in self.allowed_opts:
raise ValueError(name_error.format(name))
return method(self, name, *args)
return validator | python | {
"resource": ""
} |
q259085 | Runner.run | validation | def run(self, ctx):
"""
Runs the current phase.
"""
# Reverse engine assertion if needed
if ctx.reverse:
self.engine.reverse()
if self.engine.empty:
raise AssertionError('grappa: no assertions to run')
try:
# Run assertion in series and return error, if present
return self.run_assertions(ctx)
except Exception as _err:
# Handle legit grappa internval errors
if getattr(_err, '__legit__', False):
raise _err
# Otherwise render it
return self.render_error(ctx, _err) | python | {
"resource": ""
} |
q259086 | Operator.run_matcher | validation | def run_matcher(self, subject, *expected, **kw):
"""
Runs the operator matcher test function.
"""
# Update assertion expectation
self.expected = expected
_args = (subject,)
if self.kind == OperatorTypes.MATCHER:
_args += expected
try:
result = self.match(*_args, **kw)
except Exception as error:
return self._make_error(error=error)
reasons = []
if isinstance(result, tuple):
result, reasons = result
if result is False and self.ctx.negate:
return True
if result is True and not self.ctx.negate:
return True
return self._make_error(reasons=reasons) | python | {
"resource": ""
} |
q259087 | Operator.run | validation | def run(self, *args, **kw):
"""
Runs the current operator with the subject arguments to test.
This method is implemented by matchers only.
"""
log.debug('[operator] run "{}" with arguments: {}'.format(
self.__class__.__name__, args
))
if self.kind == OperatorTypes.ATTRIBUTE:
return self.match(self.ctx)
else:
return self.run_matcher(*args, **kw) | python | {
"resource": ""
} |
q259088 | operator | validation | def operator(name=None, operators=None, aliases=None, kind=None):
"""
Registers a new operator function in the test engine.
Arguments:
*args: variadic arguments.
**kw: variadic keyword arguments.
Returns:
function
"""
def delegator(assertion, subject, expected, *args, **kw):
return assertion.test(subject, expected, *args, **kw)
def decorator(fn):
operator = Operator(fn=fn, aliases=aliases, kind=kind)
_name = name if isinstance(name, six.string_types) else fn.__name__
operator.operators = (_name,)
_operators = operators
if isinstance(_operators, list):
_operators = tuple(_operators)
if isinstance(_operators, tuple):
operator.operators += _operators
# Register operator
Engine.register(operator)
return functools.partial(delegator, operator)
return decorator(name) if inspect.isfunction(name) else decorator | python | {
"resource": ""
} |
q259089 | attribute | validation | def attribute(*args, **kw):
"""
Registers a new attribute only operator function in the test engine.
Arguments:
*args: variadic arguments.
**kw: variadic keyword arguments.
Returns:
function
"""
return operator(kind=Operator.Type.ATTRIBUTE, *args, **kw) | python | {
"resource": ""
} |
q259090 | use | validation | def use(plugin):
"""
Register plugin in grappa.
`plugin` argument can be a function or a object that implement `register`
method, which should accept one argument: `grappa.Engine` instance.
Arguments:
plugin (function|module): grappa plugin object to register.
Raises:
ValueError: if `plugin` is not a valid interface.
Example::
import grappa
class MyOperator(grappa.Operator):
pass
def my_plugin(engine):
engine.register(MyOperator)
grappa.use(my_plugin)
"""
log.debug('register new plugin: {}'.format(plugin))
if inspect.isfunction(plugin):
return plugin(Engine)
if plugin and hasattr(plugin, 'register'):
return plugin.register(Engine)
raise ValueError('invalid plugin: must be a function or '
'implement register() method') | python | {
"resource": ""
} |
q259091 | load | validation | def load():
"""
Loads the built-in operators into the global test engine.
"""
for operator in operators:
module, symbols = operator[0], operator[1:]
path = 'grappa.operators.{}'.format(module)
# Dynamically import modules
operator = __import__(path, None, None, symbols)
# Register operators in the test engine
for symbol in symbols:
Engine.register(getattr(operator, symbol)) | python | {
"resource": ""
} |
q259092 | register_operators | validation | def register_operators(*operators):
"""
Registers one or multiple operators in the test engine.
"""
def validate(operator):
if isoperator(operator):
return True
raise NotImplementedError('invalid operator: {}'.format(operator))
def register(operator):
# Register operator by DSL keywords
for name in operator.operators:
# Check valid operators
if name in Engine.operators:
raise ValueError('operator name "{}" from {} is already '
'in use by other operator'.format(
name,
operator.__name__
))
# Register operator by name
Engine.operators[name] = operator
# Validates and registers operators
[register(operator) for operator in operators if validate(operator)] | python | {
"resource": ""
} |
q259093 | OMXPlayer.set_rate | validation | def set_rate(self, rate):
"""
Set the playback rate of the video as a multiple of the default playback speed
Examples:
>>> player.set_rate(2)
# Will play twice as fast as normal speed
>>> player.set_rate(0.5)
# Will play half speed
"""
self._rate = self._player_interface_property('Rate', dbus.Double(rate))
return self._rate | python | {
"resource": ""
} |
q259094 | OMXPlayer.play_pause | validation | def play_pause(self):
"""
Pause playback if currently playing, otherwise start playing if currently paused.
"""
self._player_interface.PlayPause()
self._is_playing = not self._is_playing
if self._is_playing:
self.playEvent(self)
else:
self.pauseEvent(self) | python | {
"resource": ""
} |
q259095 | OMXPlayer.seek | validation | def seek(self, relative_position):
"""
Seek the video by `relative_position` seconds
Args:
relative_position (float): The position in seconds to seek to.
"""
self._player_interface.Seek(Int64(1000.0 * 1000 * relative_position))
self.seekEvent(self, relative_position) | python | {
"resource": ""
} |
q259096 | OMXPlayer.set_position | validation | def set_position(self, position):
"""
Set the video to playback position to `position` seconds from the start of the video
Args:
position (float): The position in seconds.
"""
self._player_interface.SetPosition(ObjectPath("/not/used"), Int64(position * 1000.0 * 1000))
self.positionEvent(self, position) | python | {
"resource": ""
} |
q259097 | OMXPlayer.set_video_pos | validation | def set_video_pos(self, x1, y1, x2, y2):
"""
Set the video position on the screen
Args:
x1 (int): Top left x coordinate (px)
y1 (int): Top left y coordinate (px)
x2 (int): Bottom right x coordinate (px)
y2 (int): Bottom right y coordinate (px)
"""
position = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2))
self._player_interface.VideoPos(ObjectPath('/not/used'), String(position)) | python | {
"resource": ""
} |
q259098 | OMXPlayer.play_sync | validation | def play_sync(self):
"""
Play the video and block whilst the video is playing
"""
self.play()
logger.info("Playing synchronously")
try:
time.sleep(0.05)
logger.debug("Wait for playing to start")
while self.is_playing():
time.sleep(0.05)
except DBusException:
logger.error(
"Cannot play synchronously any longer as DBus calls timed out."
) | python | {
"resource": ""
} |
q259099 | OMXPlayer.play | validation | def play(self):
"""
Play the video asynchronously returning control immediately to the calling code
"""
if not self.is_playing():
self.play_pause()
self._is_playing = True
self.playEvent(self) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.