_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q257400 | extract_contours | validation | def extract_contours(array, tile, interval=100, field='elev', base=0):
"""
Extract contour lines from an array.
Parameters
----------
array : array
input elevation data
tile : Tile
tile covering the array
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
"""
import matplotlib.pyplot as plt
levels = _get_contour_values(
array.min(), array.max(), interval=interval, base=base)
if not levels:
return []
contours = plt.contour(array, levels)
index = 0
out_contours = []
for level in range(len(contours.collections)):
elevation = levels[index]
index += 1
paths = contours.collections[level].get_paths()
for path in paths:
out_coords = [
(
tile.left + (y * tile.pixel_x_size),
tile.top - (x * tile.pixel_y_size),
)
for x, y in zip(path.vertices[:, 1], path.vertices[:, 0])
]
if len(out_coords) >= 2:
out_contours.append(
dict(
properties={field: elevation},
geometry=mapping(LineString(out_coords))
)
)
return out_contours | python | {
"resource": ""
} |
q257401 | _get_contour_values | validation | def _get_contour_values(min_val, max_val, base=0, interval=100):
"""Return a list of values between min and max within an interval."""
i = base
out = []
if min_val < base:
while i >= min_val:
i -= interval
while i <= max_val:
if i >= min_val:
out.append(i)
i += interval
return out | python | {
"resource": ""
} |
q257402 | create | validation | def create(
mapchete_file,
process_file,
out_format,
out_path=None,
pyramid_type=None,
force=False
):
"""Create an empty Mapchete and process file in a given directory."""
if os.path.isfile(process_file) or os.path.isfile(mapchete_file):
if not force:
raise IOError("file(s) already exists")
out_path = out_path if out_path else os.path.join(os.getcwd(), "output")
# copy file template to target directory
process_template = pkg_resources.resource_filename(
"mapchete.static", "process_template.py"
)
process_file = os.path.join(os.getcwd(), process_file)
copyfile(process_template, process_file)
# modify and copy mapchete file template to target directory
mapchete_template = pkg_resources.resource_filename(
"mapchete.static", "mapchete_template.mapchete"
)
output_options = dict(
format=out_format, path=out_path, **FORMAT_MANDATORY[out_format]
)
pyramid_options = {'grid': pyramid_type}
substitute_elements = {
'process_file': process_file,
'output': dump({'output': output_options}, default_flow_style=False),
'pyramid': dump({'pyramid': pyramid_options}, default_flow_style=False)
}
with open(mapchete_template, 'r') as config_template:
config = Template(config_template.read())
customized_config = config.substitute(substitute_elements)
with open(mapchete_file, 'w') as target_config:
target_config.write(customized_config) | python | {
"resource": ""
} |
q257403 | OutputData.get_path | validation | def get_path(self, tile):
"""
Determine target file path.
Parameters
----------
tile : ``BufferedTile``
must be member of output ``TilePyramid``
Returns
-------
path : string
"""
return os.path.join(*[
self.path,
str(tile.zoom),
str(tile.row),
str(tile.col) + self.file_extension
]) | python | {
"resource": ""
} |
q257404 | OutputData.prepare_path | validation | def prepare_path(self, tile):
"""
Create directory and subdirectory if necessary.
Parameters
----------
tile : ``BufferedTile``
must be member of output ``TilePyramid``
"""
makedirs(os.path.dirname(self.get_path(tile))) | python | {
"resource": ""
} |
q257405 | OutputData.output_is_valid | validation | def output_is_valid(self, process_data):
"""
Check whether process output is allowed with output driver.
Parameters
----------
process_data : raw process output
Returns
-------
True or False
"""
if self.METADATA["data_type"] == "raster":
return (
is_numpy_or_masked_array(process_data) or
is_numpy_or_masked_array_with_tags(process_data)
)
elif self.METADATA["data_type"] == "vector":
return is_feature_list(process_data) | python | {
"resource": ""
} |
q257406 | OutputData.output_cleaned | validation | def output_cleaned(self, process_data):
"""
Return verified and cleaned output.
Parameters
----------
process_data : raw process output
Returns
-------
NumPy array or list of features.
"""
if self.METADATA["data_type"] == "raster":
if is_numpy_or_masked_array(process_data):
return process_data
elif is_numpy_or_masked_array_with_tags(process_data):
data, tags = process_data
return self.output_cleaned(data), tags
elif self.METADATA["data_type"] == "vector":
return list(process_data) | python | {
"resource": ""
} |
q257407 | OutputData.extract_subset | validation | def extract_subset(self, input_data_tiles=None, out_tile=None):
"""
Extract subset from multiple tiles.
input_data_tiles : list of (``Tile``, process data) tuples
out_tile : ``Tile``
Returns
-------
NumPy array or list of features.
"""
if self.METADATA["data_type"] == "raster":
mosaic = create_mosaic(input_data_tiles)
return extract_from_array(
in_raster=prepare_array(
mosaic.data,
nodata=self.nodata,
dtype=self.output_params["dtype"]
),
in_affine=mosaic.affine,
out_tile=out_tile
)
elif self.METADATA["data_type"] == "vector":
return [
feature for feature in list(
chain.from_iterable([features for _, features in input_data_tiles])
)
if shape(feature["geometry"]).intersects(out_tile.bbox)
] | python | {
"resource": ""
} |
q257408 | calculate_slope_aspect | validation | def calculate_slope_aspect(elevation, xres, yres, z=1.0, scale=1.0):
"""
Calculate slope and aspect map.
Return a pair of arrays 2 pixels smaller than the input elevation array.
Slope is returned in radians, from 0 for sheer face to pi/2 for
flat ground. Aspect is returned in radians, counterclockwise from -pi
at north around to pi.
Logic here is borrowed from hillshade.cpp:
http://www.perrygeo.net/wordpress/?p=7
Parameters
----------
elevation : array
input elevation data
xres : float
column width
yres : float
row height
z : float
vertical exaggeration factor
scale : float
scale factor of pixel size units versus height units (insert 112000
when having elevation values in meters in a geodetic projection)
Returns
-------
slope shade : array
"""
z = float(z)
scale = float(scale)
height, width = elevation.shape[0] - 2, elevation.shape[1] - 2
window = [
z * elevation[row:(row + height), col:(col + width)]
for (row, col) in product(range(3), range(3))
]
x = (
(window[0] + window[3] + window[3] + window[6])
- (window[2] + window[5] + window[5] + window[8])
) / (8.0 * xres * scale)
y = (
(window[6] + window[7] + window[7] + window[8])
- (window[0] + window[1] + window[1] + window[2])
) / (8.0 * yres * scale)
# in radians, from 0 to pi/2
slope = math.pi/2 - np.arctan(np.sqrt(x*x + y*y))
# in radians counterclockwise, from -pi at north back to pi
aspect = np.arctan2(x, y)
return slope, aspect | python | {
"resource": ""
} |
q257409 | hillshade | validation | def hillshade(elevation, tile, azimuth=315.0, altitude=45.0, z=1.0, scale=1.0):
"""
Return hillshaded numpy array.
Parameters
----------
elevation : array
input elevation data
tile : Tile
tile covering the array
z : float
vertical exaggeration factor
scale : float
scale factor of pixel size units versus height units (insert 112000
when having elevation values in meters in a geodetic projection)
"""
azimuth = float(azimuth)
altitude = float(altitude)
z = float(z)
scale = float(scale)
xres = tile.tile.pixel_x_size
yres = -tile.tile.pixel_y_size
slope, aspect = calculate_slope_aspect(
elevation, xres, yres, z=z, scale=scale)
deg2rad = math.pi / 180.0
shaded = np.sin(altitude * deg2rad) * np.sin(slope) \
+ np.cos(altitude * deg2rad) * np.cos(slope) \
* np.cos((azimuth - 90.0) * deg2rad - aspect)
# shaded now has values between -1.0 and +1.0
# stretch to 0 - 255 and invert
shaded = (((shaded+1.0)/2)*-255.0).astype("uint8")
# add one pixel padding using the edge values
return ma.masked_array(
data=np.pad(shaded, 1, mode='edge'), mask=elevation.mask
) | python | {
"resource": ""
} |
q257410 | BufferedTilePyramid.tile | validation | def tile(self, zoom, row, col):
"""
Return ``BufferedTile`` object of this ``BufferedTilePyramid``.
Parameters
----------
zoom : integer
zoom level
row : integer
tile matrix row
col : integer
tile matrix column
Returns
-------
buffered tile : ``BufferedTile``
"""
tile = self.tile_pyramid.tile(zoom, row, col)
return BufferedTile(tile, pixelbuffer=self.pixelbuffer) | python | {
"resource": ""
} |
q257411 | BufferedTilePyramid.tiles_from_bounds | validation | def tiles_from_bounds(self, bounds, zoom):
"""
Return all tiles intersecting with bounds.
Bounds values will be cleaned if they cross the antimeridian or are
outside of the Northern or Southern tile pyramid bounds.
Parameters
----------
bounds : tuple
(left, bottom, right, top) bounding values in tile pyramid CRS
zoom : integer
zoom level
Yields
------
intersecting tiles : generator
generates ``BufferedTiles``
"""
for tile in self.tiles_from_bbox(box(*bounds), zoom):
yield self.tile(*tile.id) | python | {
"resource": ""
} |
q257412 | BufferedTilePyramid.tiles_from_bbox | validation | def tiles_from_bbox(self, geometry, zoom):
"""
All metatiles intersecting with given bounding box.
Parameters
----------
geometry : ``shapely.geometry``
zoom : integer
zoom level
Yields
------
intersecting tiles : generator
generates ``BufferedTiles``
"""
for tile in self.tile_pyramid.tiles_from_bbox(geometry, zoom):
yield self.tile(*tile.id) | python | {
"resource": ""
} |
q257413 | BufferedTilePyramid.tiles_from_geom | validation | def tiles_from_geom(self, geometry, zoom):
"""
Return all tiles intersecting with input geometry.
Parameters
----------
geometry : ``shapely.geometry``
zoom : integer
zoom level
Yields
------
intersecting tiles : ``BufferedTile``
"""
for tile in self.tile_pyramid.tiles_from_geom(geometry, zoom):
yield self.tile(*tile.id) | python | {
"resource": ""
} |
q257414 | BufferedTilePyramid.intersecting | validation | def intersecting(self, tile):
"""
Return all BufferedTiles intersecting with tile.
Parameters
----------
tile : ``BufferedTile``
another tile
"""
return [
self.tile(*intersecting_tile.id)
for intersecting_tile in self.tile_pyramid.intersecting(tile)
] | python | {
"resource": ""
} |
q257415 | BufferedTilePyramid.to_dict | validation | def to_dict(self):
"""
Return dictionary representation of pyramid parameters.
"""
return dict(
grid=self.grid.to_dict(),
metatiling=self.metatiling,
tile_size=self.tile_size,
pixelbuffer=self.pixelbuffer
) | python | {
"resource": ""
} |
q257416 | BufferedTile.get_neighbors | validation | def get_neighbors(self, connectedness=8):
"""
Return tile neighbors.
Tile neighbors are unique, i.e. in some edge cases, where both the left
and right neighbor wrapped around the antimeridian is the same. Also,
neighbors ouside the northern and southern TilePyramid boundaries are
excluded, because they are invalid.
-------------
| 8 | 1 | 5 |
-------------
| 4 | x | 2 |
-------------
| 7 | 3 | 6 |
-------------
Parameters
----------
connectedness : int
[4 or 8] return four direct neighbors or all eight.
Returns
-------
list of BufferedTiles
"""
return [
BufferedTile(t, self.pixelbuffer)
for t in self._tile.get_neighbors(connectedness=connectedness)
] | python | {
"resource": ""
} |
q257417 | BufferedTile.is_on_edge | validation | def is_on_edge(self):
"""Determine whether tile touches or goes over pyramid edge."""
return (
self.left <= self.tile_pyramid.left or # touches_left
self.bottom <= self.tile_pyramid.bottom or # touches_bottom
self.right >= self.tile_pyramid.right or # touches_right
self.top >= self.tile_pyramid.top # touches_top
) | python | {
"resource": ""
} |
q257418 | execute | validation | def execute(
mp,
resampling="nearest",
scale_method=None,
scales_minmax=None
):
"""
Read, stretch and return raster data.
Inputs:
-------
raster
raster file
Parameters:
-----------
resampling : str
rasterio.Resampling method
scale_method : str
- dtype_scale: use dtype minimum and maximum values
- minmax_scale: use dataset bands minimum and maximum values
- crop: clip data to output dtype
scales_minmax : tuple
tuple of band specific scale values
Output:
-------
np.ndarray
"""
with mp.open("raster", resampling=resampling) as raster_file:
# exit if input tile is empty
if raster_file.is_empty():
return "empty"
# actually read data and iterate through bands
scaled = ()
mask = ()
raster_data = raster_file.read()
if raster_data.ndim == 2:
raster_data = ma.expand_dims(raster_data, axis=0)
if not scale_method:
scales_minmax = [(i, i) for i in range(len(raster_data))]
for band, (scale_min, scale_max) in zip(raster_data, scales_minmax):
if scale_method in ["dtype_scale", "minmax_scale"]:
scaled += (_stretch_array(band, scale_min, scale_max), )
elif scale_method == "crop":
scaled += (np.clip(band, scale_min, scale_max), )
else:
scaled += (band, )
mask += (band.mask, )
return ma.masked_array(np.stack(scaled), np.stack(mask)) | python | {
"resource": ""
} |
q257419 | OutputData.open | validation | def open(self, tile, process, **kwargs):
"""
Open process output as input for other process.
Parameters
----------
tile : ``Tile``
process : ``MapcheteProcess``
kwargs : keyword arguments
"""
return InputTile(tile, process, kwargs.get("resampling", None)) | python | {
"resource": ""
} |
q257420 | OutputData.for_web | validation | def for_web(self, data):
"""
Convert data to web output.
Parameters
----------
data : array
Returns
-------
web data : array
"""
rgba = self._prepare_array_for_png(data)
data = ma.masked_where(rgba == self.nodata, rgba)
return memory_file(data, self.profile()), 'image/png' | python | {
"resource": ""
} |
q257421 | serve | validation | def serve(
mapchete_file,
port=None,
internal_cache=None,
zoom=None,
bounds=None,
overwrite=False,
readonly=False,
memory=False,
input_file=None,
debug=False,
logfile=None
):
"""
Serve a Mapchete process.
Creates the Mapchete host and serves both web page with OpenLayers and the
WMTS simple REST endpoint.
"""
app = create_app(
mapchete_files=[mapchete_file], zoom=zoom,
bounds=bounds, single_input_file=input_file,
mode=_get_mode(memory, readonly, overwrite), debug=debug
)
if os.environ.get("MAPCHETE_TEST") == "TRUE":
logger.debug("don't run flask app, MAPCHETE_TEST environment detected")
else:
app.run(
threaded=True, debug=True, port=port, host='0.0.0.0',
extra_files=[mapchete_file]
) | python | {
"resource": ""
} |
q257422 | create_app | validation | def create_app(
mapchete_files=None, zoom=None, bounds=None, single_input_file=None,
mode="continue", debug=None
):
"""Configure and create Flask app."""
from flask import Flask, render_template_string
app = Flask(__name__)
mapchete_processes = {
os.path.splitext(os.path.basename(mapchete_file))[0]: mapchete.open(
mapchete_file, zoom=zoom, bounds=bounds,
single_input_file=single_input_file, mode=mode, with_cache=True,
debug=debug)
for mapchete_file in mapchete_files
}
mp = next(iter(mapchete_processes.values()))
pyramid_type = mp.config.process_pyramid.grid
pyramid_srid = mp.config.process_pyramid.crs.to_epsg()
process_bounds = ",".join([str(i) for i in mp.config.bounds_at_zoom()])
grid = "g" if pyramid_srid == 3857 else "WGS84"
web_pyramid = BufferedTilePyramid(pyramid_type)
@app.route('/', methods=['GET'])
def index():
"""Render and hosts the appropriate OpenLayers instance."""
return render_template_string(
pkgutil.get_data(
'mapchete.static', 'index.html').decode("utf-8"),
srid=pyramid_srid,
process_bounds=process_bounds,
is_mercator=(pyramid_srid == 3857),
process_names=mapchete_processes.keys()
)
@app.route(
"/".join([
"", "wmts_simple", "1.0.0", "<string:mp_name>", "default",
grid, "<int:zoom>", "<int:row>", "<int:col>.<string:file_ext>"]),
methods=['GET'])
def get(mp_name, zoom, row, col, file_ext):
"""Return processed, empty or error (in pink color) tile."""
logger.debug(
"received tile (%s, %s, %s) for process %s", zoom, row, col,
mp_name)
# convert zoom, row, col into tile object using web pyramid
return _tile_response(
mapchete_processes[mp_name], web_pyramid.tile(zoom, row, col),
debug)
return app | python | {
"resource": ""
} |
q257423 | read_raster_window | validation | def read_raster_window(
input_files,
tile,
indexes=None,
resampling="nearest",
src_nodata=None,
dst_nodata=None,
gdal_opts=None
):
"""
Return NumPy arrays from an input raster.
NumPy arrays are reprojected and resampled to tile properties from input
raster. If tile boundaries cross the antimeridian, data on the other side
of the antimeridian will be read and concatenated to the numpy array
accordingly.
Parameters
----------
input_files : string or list
path to a raster file or list of paths to multiple raster files readable by
rasterio.
tile : Tile
a Tile object
indexes : list or int
a list of band numbers; None will read all.
resampling : string
one of "nearest", "average", "bilinear" or "lanczos"
src_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
dst_nodata : int or float, optional
if not set, the nodata value from the source dataset will be used
gdal_opts : dict
GDAL options passed on to rasterio.Env()
Returns
-------
raster : MaskedArray
"""
with rasterio.Env(
**get_gdal_options(
gdal_opts,
is_remote=path_is_remote(
input_files[0] if isinstance(input_files, list) else input_files, s3=True
)
)
) as env:
logger.debug("reading %s with GDAL options %s", input_files, env.options)
return _read_raster_window(
input_files,
tile,
indexes=indexes,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
) | python | {
"resource": ""
} |
q257424 | _get_warped_array | validation | def _get_warped_array(
input_file=None,
indexes=None,
dst_bounds=None,
dst_shape=None,
dst_crs=None,
resampling=None,
src_nodata=None,
dst_nodata=None
):
"""Extract a numpy array from a raster file."""
try:
return _rasterio_read(
input_file=input_file,
indexes=indexes,
dst_bounds=dst_bounds,
dst_shape=dst_shape,
dst_crs=dst_crs,
resampling=resampling,
src_nodata=src_nodata,
dst_nodata=dst_nodata
)
except Exception as e:
logger.exception("error while reading file %s: %s", input_file, e)
raise | python | {
"resource": ""
} |
q257425 | write_raster_window | validation | def write_raster_window(
in_tile=None, in_data=None, out_profile=None, out_tile=None, out_path=None,
tags=None, bucket_resource=None
):
"""
Write a window from a numpy array to an output file.
Parameters
----------
in_tile : ``BufferedTile``
``BufferedTile`` with a data attribute holding NumPy data
in_data : array
out_profile : dictionary
metadata dictionary for rasterio
out_tile : ``Tile``
provides output boundaries; if None, in_tile is used
out_path : string
output path to write to
tags : optional tags to be added to GeoTIFF file
bucket_resource : boto3 bucket resource to write to in case of S3 output
"""
if not isinstance(out_path, str):
raise TypeError("out_path must be a string")
logger.debug("write %s", out_path)
if out_path == "memoryfile":
raise DeprecationWarning(
"Writing to memoryfile with write_raster_window() is deprecated. "
"Please use RasterWindowMemoryFile."
)
out_tile = in_tile if out_tile is None else out_tile
_validate_write_window_params(in_tile, out_tile, in_data, out_profile)
# extract data
window_data = extract_from_array(
in_raster=in_data,
in_affine=in_tile.affine,
out_tile=out_tile
) if in_tile != out_tile else in_data
# use transform instead of affine
if "affine" in out_profile:
out_profile["transform"] = out_profile.pop("affine")
# write if there is any band with non-masked data
if window_data.all() is not ma.masked:
try:
if out_path.startswith("s3://"):
with RasterWindowMemoryFile(
in_tile=out_tile,
in_data=window_data,
out_profile=out_profile,
out_tile=out_tile,
tags=tags
) as memfile:
logger.debug((out_tile.id, "upload tile", out_path))
bucket_resource.put_object(
Key="/".join(out_path.split("/")[3:]),
Body=memfile
)
else:
with rasterio.open(out_path, 'w', **out_profile) as dst:
logger.debug((out_tile.id, "write tile", out_path))
dst.write(window_data.astype(out_profile["dtype"], copy=False))
_write_tags(dst, tags)
except Exception as e:
logger.exception("error while writing file %s: %s", out_path, e)
raise
else:
logger.debug((out_tile.id, "array window empty", out_path)) | python | {
"resource": ""
} |
q257426 | extract_from_array | validation | def extract_from_array(in_raster=None, in_affine=None, out_tile=None):
"""
Extract raster data window array.
Parameters
----------
in_raster : array or ReferencedRaster
in_affine : ``Affine`` required if in_raster is an array
out_tile : ``BufferedTile``
Returns
-------
extracted array : array
"""
if isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_raster = in_raster.data
# get range within array
minrow, maxrow, mincol, maxcol = bounds_to_ranges(
out_bounds=out_tile.bounds, in_affine=in_affine, in_shape=in_raster.shape
)
# if output window is within input window
if (
minrow >= 0 and
mincol >= 0 and
maxrow <= in_raster.shape[-2] and
maxcol <= in_raster.shape[-1]
):
return in_raster[..., minrow:maxrow, mincol:maxcol]
# raise error if output is not fully within input
else:
raise ValueError("extraction fails if output shape is not within input") | python | {
"resource": ""
} |
q257427 | resample_from_array | validation | def resample_from_array(
in_raster=None,
in_affine=None,
out_tile=None,
in_crs=None,
resampling="nearest",
nodataval=0
):
"""
Extract and resample from array to target tile.
Parameters
----------
in_raster : array
in_affine : ``Affine``
out_tile : ``BufferedTile``
resampling : string
one of rasterio's resampling methods (default: nearest)
nodataval : integer or float
raster nodata value (default: 0)
Returns
-------
resampled array : array
"""
# TODO rename function
if isinstance(in_raster, ma.MaskedArray):
pass
if isinstance(in_raster, np.ndarray):
in_raster = ma.MaskedArray(in_raster, mask=in_raster == nodataval)
elif isinstance(in_raster, ReferencedRaster):
in_affine = in_raster.affine
in_crs = in_raster.crs
in_raster = in_raster.data
elif isinstance(in_raster, tuple):
in_raster = ma.MaskedArray(
data=np.stack(in_raster),
mask=np.stack([
band.mask
if isinstance(band, ma.masked_array)
else np.where(band == nodataval, True, False)
for band in in_raster
]),
fill_value=nodataval
)
else:
raise TypeError("wrong input data type: %s" % type(in_raster))
if in_raster.ndim == 2:
in_raster = ma.expand_dims(in_raster, axis=0)
elif in_raster.ndim == 3:
pass
else:
raise TypeError("input array must have 2 or 3 dimensions")
if in_raster.fill_value != nodataval:
ma.set_fill_value(in_raster, nodataval)
out_shape = (in_raster.shape[0], ) + out_tile.shape
dst_data = np.empty(out_shape, in_raster.dtype)
in_raster = ma.masked_array(
data=in_raster.filled(), mask=in_raster.mask, fill_value=nodataval
)
reproject(
in_raster,
dst_data,
src_transform=in_affine,
src_crs=in_crs if in_crs else out_tile.crs,
dst_transform=out_tile.affine,
dst_crs=out_tile.crs,
resampling=Resampling[resampling]
)
return ma.MaskedArray(dst_data, mask=dst_data == nodataval) | python | {
"resource": ""
} |
q257428 | bounds_to_ranges | validation | def bounds_to_ranges(out_bounds=None, in_affine=None, in_shape=None):
"""
Return bounds range values from geolocated input.
Parameters
----------
out_bounds : tuple
left, bottom, right, top
in_affine : Affine
input geolocation
in_shape : tuple
input shape
Returns
-------
minrow, maxrow, mincol, maxcol
"""
return itertools.chain(
*from_bounds(
*out_bounds, transform=in_affine, height=in_shape[-2], width=in_shape[-1]
).round_lengths(pixel_precision=0).round_offsets(pixel_precision=0).toranges()
) | python | {
"resource": ""
} |
q257429 | tiles_to_affine_shape | validation | def tiles_to_affine_shape(tiles):
"""
Return Affine and shape of combined tiles.
Parameters
----------
tiles : iterable
an iterable containing BufferedTiles
Returns
-------
Affine, Shape
"""
if not tiles:
raise TypeError("no tiles provided")
pixel_size = tiles[0].pixel_x_size
left, bottom, right, top = (
min([t.left for t in tiles]),
min([t.bottom for t in tiles]),
max([t.right for t in tiles]),
max([t.top for t in tiles]),
)
return (
Affine(pixel_size, 0, left, 0, -pixel_size, top),
Shape(
width=int(round((right - left) / pixel_size, 0)),
height=int(round((top - bottom) / pixel_size, 0)),
)
) | python | {
"resource": ""
} |
q257430 | _shift_required | validation | def _shift_required(tiles):
"""Determine if distance over antimeridian is shorter than normal distance."""
if tiles[0][0].tile_pyramid.is_global:
# get set of tile columns
tile_cols = sorted(list(set([t[0].col for t in tiles])))
# if tile columns are an unbroken sequence, tiles are connected and are not
# passing the Antimeridian
if tile_cols == list(range(min(tile_cols), max(tile_cols) + 1)):
return False
else:
# look at column gaps and try to determine the smallest distance
def gen_groups(items):
"""Groups tile columns by sequence."""
j = items[0]
group = [j]
for i in items[1:]:
# item is next in expected sequence
if i == j + 1:
group.append(i)
# gap occured, so yield existing group and create new one
else:
yield group
group = [i]
j = i
yield group
groups = list(gen_groups(tile_cols))
# in case there is only one group, don't shift
if len(groups) == 1:
return False
# distance between first column of first group and last column of last group
normal_distance = groups[-1][-1] - groups[0][0]
# distance between last column of first group and last column of first group
# but crossing the antimeridian
antimeridian_distance = (
groups[0][-1] + tiles[0][0].tile_pyramid.matrix_width(tiles[0][0].zoom)
) - groups[-1][0]
# return whether distance over antimeridian is shorter
return antimeridian_distance < normal_distance
else:
return False | python | {
"resource": ""
} |
q257431 | memory_file | validation | def memory_file(data=None, profile=None):
"""
Return a rasterio.io.MemoryFile instance from input.
Parameters
----------
data : array
array to be written
profile : dict
rasterio profile for MemoryFile
"""
memfile = MemoryFile()
profile.update(width=data.shape[-2], height=data.shape[-1])
with memfile.open(**profile) as dataset:
dataset.write(data)
return memfile | python | {
"resource": ""
} |
q257432 | prepare_array | validation | def prepare_array(data, masked=True, nodata=0, dtype="int16"):
"""
Turn input data into a proper array for further usage.
Outut array is always 3-dimensional with the given data type. If the output
is masked, the fill_value corresponds to the given nodata value and the
nodata value will be burned into the data array.
Parameters
----------
data : array or iterable
array (masked or normal) or iterable containing arrays
nodata : integer or float
nodata value (default: 0) used if input is not a masked array and
for output array
masked : bool
return a NumPy Array or a NumPy MaskedArray (default: True)
dtype : string
data type of output array (default: "int16")
Returns
-------
array : array
"""
# input is iterable
if isinstance(data, (list, tuple)):
return _prepare_iterable(data, masked, nodata, dtype)
# special case if a 2D single band is provided
elif isinstance(data, np.ndarray) and data.ndim == 2:
data = ma.expand_dims(data, axis=0)
# input is a masked array
if isinstance(data, ma.MaskedArray):
return _prepare_masked(data, masked, nodata, dtype)
# input is a NumPy array
elif isinstance(data, np.ndarray):
if masked:
return ma.masked_values(data.astype(dtype, copy=False), nodata, copy=False)
else:
return data.astype(dtype, copy=False)
else:
raise ValueError(
"data must be array, masked array or iterable containing arrays."
) | python | {
"resource": ""
} |
q257433 | reproject_geometry | validation | def reproject_geometry(
geometry, src_crs=None, dst_crs=None, error_on_clip=False, validity_check=True,
antimeridian_cutting=False
):
"""
Reproject a geometry to target CRS.
Also, clips geometry if it lies outside the destination CRS boundary.
Supported destination CRSes for clipping: 4326 (WGS84), 3857 (Spherical
Mercator) and 3035 (ETRS89 / ETRS-LAEA).
Parameters
----------
geometry : ``shapely.geometry``
src_crs : ``rasterio.crs.CRS`` or EPSG code
CRS of source data
dst_crs : ``rasterio.crs.CRS`` or EPSG code
target CRS
error_on_clip : bool
raises a ``RuntimeError`` if a geometry is outside of CRS bounds
(default: False)
validity_check : bool
checks if reprojected geometry is valid and throws ``TopologicalError``
if invalid (default: True)
antimeridian_cutting : bool
cut geometry at Antimeridian; can result in a multipart output geometry
Returns
-------
geometry : ``shapely.geometry``
"""
src_crs = _validated_crs(src_crs)
dst_crs = _validated_crs(dst_crs)
def _reproject_geom(geometry, src_crs, dst_crs):
if geometry.is_empty:
return geometry
else:
out_geom = to_shape(
transform_geom(
src_crs.to_dict(),
dst_crs.to_dict(),
mapping(geometry),
antimeridian_cutting=antimeridian_cutting
)
)
return _repair(out_geom) if validity_check else out_geom
# return repaired geometry if no reprojection needed
if src_crs == dst_crs or geometry.is_empty:
return _repair(geometry)
# geometry needs to be clipped to its CRS bounds
elif (
dst_crs.is_epsg_code and # just in case for an CRS with EPSG code
dst_crs.get("init") in CRS_BOUNDS and # if CRS has defined bounds
dst_crs.get("init") != "epsg:4326" # and is not WGS84 (does not need clipping)
):
wgs84_crs = CRS().from_epsg(4326)
# get dst_crs boundaries
crs_bbox = box(*CRS_BOUNDS[dst_crs.get("init")])
# reproject geometry to WGS84
geometry_4326 = _reproject_geom(geometry, src_crs, wgs84_crs)
# raise error if geometry has to be clipped
if error_on_clip and not geometry_4326.within(crs_bbox):
raise RuntimeError("geometry outside target CRS bounds")
# clip geometry dst_crs boundaries and return
return _reproject_geom(crs_bbox.intersection(geometry_4326), wgs84_crs, dst_crs)
# return without clipping if destination CRS does not have defined bounds
else:
return _reproject_geom(geometry, src_crs, dst_crs) | python | {
"resource": ""
} |
q257434 | segmentize_geometry | validation | def segmentize_geometry(geometry, segmentize_value):
"""
Segmentize Polygon outer ring by segmentize value.
Just Polygon geometry type supported.
Parameters
----------
geometry : ``shapely.geometry``
segmentize_value: float
Returns
-------
geometry : ``shapely.geometry``
"""
if geometry.geom_type != "Polygon":
raise TypeError("segmentize geometry type must be Polygon")
return Polygon(
LinearRing([
p
# pick polygon linestrings
for l in map(
lambda x: LineString([x[0], x[1]]),
zip(geometry.exterior.coords[:-1], geometry.exterior.coords[1:])
)
# interpolate additional points in between and don't forget end point
for p in [
l.interpolate(segmentize_value * i).coords[0]
for i in range(int(l.length / segmentize_value))
] + [l.coords[1]]
])
) | python | {
"resource": ""
} |
q257435 | read_vector_window | validation | def read_vector_window(input_files, tile, validity_check=True):
"""
Read a window of an input vector dataset.
Also clips geometry.
Parameters:
-----------
input_file : string
path to vector file
tile : ``Tile``
tile extent to read data from
validity_check : bool
checks if reprojected geometry is valid and throws ``RuntimeError`` if
invalid (default: True)
Returns
-------
features : list
a list of reprojected GeoJSON-like features
"""
if not isinstance(input_files, list):
input_files = [input_files]
return [
feature
for feature in chain.from_iterable([
_read_vector_window(path, tile, validity_check=validity_check)
for path in input_files
])
] | python | {
"resource": ""
} |
q257436 | write_vector_window | validation | def write_vector_window(
in_data=None, out_schema=None, out_tile=None, out_path=None, bucket_resource=None
):
"""
Write features to GeoJSON file.
Parameters
----------
in_data : features
out_schema : dictionary
output schema for fiona
out_tile : ``BufferedTile``
tile used for output extent
out_path : string
output path for GeoJSON file
"""
# Delete existing file.
try:
os.remove(out_path)
except OSError:
pass
out_features = []
for feature in in_data:
try:
# clip feature geometry to tile bounding box and append for writing
# if clipped feature still
for out_geom in multipart_to_singleparts(
clean_geometry_type(
to_shape(feature["geometry"]).intersection(out_tile.bbox),
out_schema["geometry"]
)
):
out_features.append({
"geometry": mapping(out_geom),
"properties": feature["properties"]
})
except Exception as e:
logger.warning("failed to prepare geometry for writing: %s", e)
continue
# write if there are output features
if out_features:
try:
if out_path.startswith("s3://"):
# write data to remote file
with VectorWindowMemoryFile(
tile=out_tile,
features=out_features,
schema=out_schema,
driver="GeoJSON"
) as memfile:
logger.debug((out_tile.id, "upload tile", out_path))
bucket_resource.put_object(
Key="/".join(out_path.split("/")[3:]),
Body=memfile
)
else:
# write data to local file
with fiona.open(
out_path, 'w', schema=out_schema, driver="GeoJSON",
crs=out_tile.crs.to_dict()
) as dst:
logger.debug((out_tile.id, "write tile", out_path))
dst.writerecords(out_features)
except Exception as e:
logger.error("error while writing file %s: %s", out_path, e)
raise
else:
logger.debug((out_tile.id, "nothing to write", out_path)) | python | {
"resource": ""
} |
q257437 | clean_geometry_type | validation | def clean_geometry_type(geometry, target_type, allow_multipart=True):
"""
Return geometry of a specific type if possible.
Filters and splits up GeometryCollection into target types. This is
necessary when after clipping and/or reprojecting the geometry types from
source geometries change (i.e. a Polygon becomes a LineString or a
LineString becomes Point) in some edge cases.
Parameters
----------
geometry : ``shapely.geometry``
target_type : string
target geometry type
allow_multipart : bool
allow multipart geometries (default: True)
Returns
-------
cleaned geometry : ``shapely.geometry``
returns None if input geometry type differs from target type
Raises
------
GeometryTypeError : if geometry type does not match target_type
"""
multipart_geoms = {
"Point": MultiPoint,
"LineString": MultiLineString,
"Polygon": MultiPolygon,
"MultiPoint": MultiPoint,
"MultiLineString": MultiLineString,
"MultiPolygon": MultiPolygon
}
if target_type not in multipart_geoms.keys():
raise TypeError("target type is not supported: %s" % target_type)
if geometry.geom_type == target_type:
return geometry
elif allow_multipart:
target_multipart_type = multipart_geoms[target_type]
if geometry.geom_type == "GeometryCollection":
return target_multipart_type([
clean_geometry_type(g, target_type, allow_multipart)
for g in geometry])
elif any([
isinstance(geometry, target_multipart_type),
multipart_geoms[geometry.geom_type] == target_multipart_type
]):
return geometry
raise GeometryTypeError(
"geometry type does not match: %s, %s" % (geometry.geom_type, target_type)
) | python | {
"resource": ""
} |
q257438 | multipart_to_singleparts | validation | def multipart_to_singleparts(geom):
"""
Yield single part geometries if geom is multipart, otherwise yield geom.
Parameters:
-----------
geom : shapely geometry
Returns:
--------
shapely single part geometries
"""
if isinstance(geom, base.BaseGeometry):
if hasattr(geom, "geoms"):
for subgeom in geom:
yield subgeom
else:
yield geom | python | {
"resource": ""
} |
q257439 | execute | validation | def execute(
mp,
td_resampling="nearest",
td_matching_method="gdal",
td_matching_max_zoom=None,
td_matching_precision=8,
td_fallback_to_higher_zoom=False,
clip_pixelbuffer=0,
**kwargs
):
"""
Convert and optionally clip input raster data.
Inputs:
-------
raster
singleband or multiband data input
clip (optional)
vector data used to clip output
Parameters
----------
td_resampling : str (default: 'nearest')
Resampling used when reading from TileDirectory.
td_matching_method : str ('gdal' or 'min') (default: 'gdal')
gdal: Uses GDAL's standard method. Here, the target resolution is
calculated by averaging the extent's pixel sizes over both x and y
axes. This approach returns a zoom level which may not have the
best quality but will speed up reading significantly.
min: Returns the zoom level which matches the minimum resolution of the
extents four corner pixels. This approach returns the zoom level
with the best possible quality but with low performance. If the
tile extent is outside of the destination pyramid, a
TopologicalError will be raised.
td_matching_max_zoom : int (optional, default: None)
If set, it will prevent reading from zoom levels above the maximum.
td_matching_precision : int (default: 8)
Round resolutions to n digits before comparing.
td_fallback_to_higher_zoom : bool (default: False)
In case no data is found at zoom level, try to read data from higher
zoom levels. Enabling this setting can lead to many IO requests in
areas with no data.
clip_pixelbuffer : int
Use pixelbuffer when clipping output by geometry. (default: 0)
Output
------
np.ndarray
"""
# read clip geometry
if "clip" in mp.params["input"]:
clip_geom = mp.open("clip").read()
if not clip_geom:
logger.debug("no clip data over tile")
return "empty"
else:
clip_geom = []
with mp.open(
"raster",
matching_method=td_matching_method,
matching_max_zoom=td_matching_max_zoom,
matching_precision=td_matching_precision,
fallback_to_higher_zoom=td_fallback_to_higher_zoom,
resampling=td_resampling
) as raster:
raster_data = raster.read()
if raster.is_empty() or raster_data[0].mask.all():
logger.debug("raster empty")
return "empty"
if clip_geom:
# apply original nodata mask and clip
clipped = mp.clip(
np.where(raster_data[0].mask, mp.params["output"].nodata, raster_data),
clip_geom,
clip_buffer=clip_pixelbuffer,
inverted=True
)
return np.where(clipped.mask, clipped, mp.params["output"].nodata)
else:
return np.where(raster_data[0].mask, mp.params["output"].nodata, raster_data) | python | {
"resource": ""
} |
q257440 | get_best_zoom_level | validation | def get_best_zoom_level(input_file, tile_pyramid_type):
"""
Determine the best base zoom level for a raster.
"Best" means the maximum zoom level where no oversampling has to be done.
Parameters
----------
input_file : path to raster file
tile_pyramid_type : ``TilePyramid`` projection (``geodetic`` or``mercator``)
Returns
-------
zoom : integer
"""
tile_pyramid = BufferedTilePyramid(tile_pyramid_type)
with rasterio.open(input_file, "r") as src:
xmin, ymin, xmax, ymax = reproject_geometry(
segmentize_geometry(
box(
src.bounds.left, src.bounds.bottom, src.bounds.right,
src.bounds.top
),
get_segmentize_value(input_file, tile_pyramid)
),
src_crs=src.crs, dst_crs=tile_pyramid.crs
).bounds
x_dif = xmax - xmin
y_dif = ymax - ymin
size = float(src.width + src.height)
avg_resolution = (
(x_dif / float(src.width)) * (float(src.width) / size) +
(y_dif / float(src.height)) * (float(src.height) / size)
)
for zoom in range(0, 40):
if tile_pyramid.pixel_x_size(zoom) <= avg_resolution:
return zoom-1 | python | {
"resource": ""
} |
q257441 | tile_to_zoom_level | validation | def tile_to_zoom_level(tile, dst_pyramid=None, matching_method="gdal", precision=8):
"""
Determine the best zoom level in target TilePyramid from given Tile.
Parameters
----------
tile : BufferedTile
dst_pyramid : BufferedTilePyramid
matching_method : str ('gdal' or 'min')
gdal: Uses GDAL's standard method. Here, the target resolution is calculated by
averaging the extent's pixel sizes over both x and y axes. This approach
returns a zoom level which may not have the best quality but will speed up
reading significantly.
min: Returns the zoom level which matches the minimum resolution of the extent's
four corner pixels. This approach returns the zoom level with the best
possible quality but with low performance. If the tile extent is outside of
the destination pyramid, a TopologicalError will be raised.
precision : int
Round resolutions to n digits before comparing.
Returns
-------
zoom : int
"""
def width_height(bounds):
try:
l, b, r, t = reproject_geometry(
box(*bounds), src_crs=tile.crs, dst_crs=dst_pyramid.crs
).bounds
except ValueError:
raise TopologicalError("bounds cannot be translated into target CRS")
return r - l, t - b
if tile.tp.crs == dst_pyramid.crs:
return tile.zoom
else:
if matching_method == "gdal":
# use rasterio/GDAL method to calculate default warp target properties
transform, width, height = calculate_default_transform(
tile.tp.crs,
dst_pyramid.crs,
tile.width,
tile.height,
*tile.bounds
)
# this is the resolution the tile would have in destination TilePyramid CRS
tile_resolution = round(transform[0], precision)
elif matching_method == "min":
# calculate the minimum pixel size from the four tile corner pixels
l, b, r, t = tile.bounds
x = tile.pixel_x_size
y = tile.pixel_y_size
res = []
for bounds in [
(l, t - y, l + x, t), # left top
(l, b, l + x, b + y), # left bottom
(r - x, b, r, b + y), # right bottom
(r - x, t - y, r, t) # right top
]:
try:
w, h = width_height(bounds)
res.extend([w, h])
except TopologicalError:
logger.debug("pixel outside of destination pyramid")
if res:
tile_resolution = round(min(res), precision)
else:
raise TopologicalError("tile outside of destination pyramid")
else:
raise ValueError("invalid method given: %s", matching_method)
logger.debug(
"we are looking for a zoom level interpolating to %s resolution",
tile_resolution
)
zoom = 0
while True:
td_resolution = round(dst_pyramid.pixel_x_size(zoom), precision)
if td_resolution <= tile_resolution:
break
zoom += 1
logger.debug("target zoom for %s: %s (%s)", tile_resolution, zoom, td_resolution)
return zoom | python | {
"resource": ""
} |
q257442 | path_is_remote | validation | def path_is_remote(path, s3=True):
"""
Determine whether file path is remote or local.
Parameters
----------
path : path to file
Returns
-------
is_remote : bool
"""
prefixes = ("http://", "https://", "/vsicurl/")
if s3:
prefixes += ("s3://", "/vsis3/")
return path.startswith(prefixes) | python | {
"resource": ""
} |
q257443 | path_exists | validation | def path_exists(path):
"""
Check if file exists either remote or local.
Parameters:
-----------
path : path to file
Returns:
--------
exists : bool
"""
if path.startswith(("http://", "https://")):
try:
urlopen(path).info()
return True
except HTTPError as e:
if e.code == 404:
return False
else:
raise
elif path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
return True
else:
return False
else:
logger.debug("%s exists: %s", path, os.path.exists(path))
return os.path.exists(path) | python | {
"resource": ""
} |
q257444 | absolute_path | validation | def absolute_path(path=None, base_dir=None):
"""
Return absolute path if path is local.
Parameters:
-----------
path : path to file
base_dir : base directory used for absolute path
Returns:
--------
absolute path
"""
if path_is_remote(path):
return path
else:
if os.path.isabs(path):
return path
else:
if base_dir is None or not os.path.isabs(base_dir):
raise TypeError("base_dir must be an absolute path.")
return os.path.abspath(os.path.join(base_dir, path)) | python | {
"resource": ""
} |
q257445 | relative_path | validation | def relative_path(path=None, base_dir=None):
"""
Return relative path if path is local.
Parameters:
-----------
path : path to file
base_dir : directory where path sould be relative to
Returns:
--------
relative path
"""
if path_is_remote(path) or not os.path.isabs(path):
return path
else:
return os.path.relpath(path, base_dir) | python | {
"resource": ""
} |
q257446 | write_json | validation | def write_json(path, params):
"""Write local or remote."""
logger.debug("write %s to %s", params, path)
if path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
logger.debug("upload %s", key)
bucket.put_object(
Key=key,
Body=json.dumps(params, sort_keys=True, indent=4)
)
else:
makedirs(os.path.dirname(path))
with open(path, 'w') as dst:
json.dump(params, dst, sort_keys=True, indent=4) | python | {
"resource": ""
} |
q257447 | read_json | validation | def read_json(path):
"""Read local or remote."""
if path.startswith(("http://", "https://")):
try:
return json.loads(urlopen(path).read().decode())
except HTTPError:
raise FileNotFoundError("%s not found", path)
elif path.startswith("s3://"):
bucket = get_boto3_bucket(path.split("/")[2])
key = "/".join(path.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
return json.loads(obj.get()['Body'].read().decode())
raise FileNotFoundError("%s not found", path)
else:
try:
with open(path, "r") as src:
return json.loads(src.read())
except:
raise FileNotFoundError("%s not found", path) | python | {
"resource": ""
} |
q257448 | Webhook.hook | validation | def hook(self, event_type='push'):
"""
Registers a function as a hook. Multiple hooks can be registered for a given type, but the
order in which they are invoke is unspecified.
:param event_type: The event type this hook will be invoked for.
"""
def decorator(func):
self._hooks[event_type].append(func)
return func
return decorator | python | {
"resource": ""
} |
q257449 | Webhook._get_digest | validation | def _get_digest(self):
"""Return message digest if a secret key was provided"""
return hmac.new(
self._secret, request.data, hashlib.sha1).hexdigest() if self._secret else None | python | {
"resource": ""
} |
q257450 | Webhook._postreceive | validation | def _postreceive(self):
"""Callback from Flask"""
digest = self._get_digest()
if digest is not None:
sig_parts = _get_header('X-Hub-Signature').split('=', 1)
if not isinstance(digest, six.text_type):
digest = six.text_type(digest)
if (len(sig_parts) < 2 or sig_parts[0] != 'sha1'
or not hmac.compare_digest(sig_parts[1], digest)):
abort(400, 'Invalid signature')
event_type = _get_header('X-Github-Event')
data = request.get_json()
if data is None:
abort(400, 'Request body must contain json')
self._logger.info(
'%s (%s)', _format_event(event_type, data), _get_header('X-Github-Delivery'))
for hook in self._hooks.get(event_type, []):
hook(data)
return '', 204 | python | {
"resource": ""
} |
q257451 | long_description | validation | def long_description():
"""Generate .rst document for PyPi."""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--doc', dest="doc",
action="store_true", default=False)
args, sys.argv = parser.parse_known_args(sys.argv)
if args.doc:
import doc2md, pypandoc
md = doc2md.doc2md(doc2md.__doc__, "doc2md", toc=False)
long_description = pypandoc.convert(md, 'rst', format='md')
else:
return None | python | {
"resource": ""
} |
q257452 | unindent | validation | def unindent(lines):
"""
Remove common indentation from string.
Unlike doctrim there is no special treatment of the first line.
"""
try:
# Determine minimum indentation:
indent = min(len(line) - len(line.lstrip())
for line in lines if line)
except ValueError:
return lines
else:
return [line[indent:] for line in lines] | python | {
"resource": ""
} |
q257453 | find_sections | validation | def find_sections(lines):
"""
Find all section names and return a list with their names.
"""
sections = []
for line in lines:
if is_heading(line):
sections.append(get_heading(line))
return sections | python | {
"resource": ""
} |
q257454 | make_toc | validation | def make_toc(sections, maxdepth=0):
"""
Generate table of contents for array of section names.
"""
if not sections:
return []
outer = min(n for n,t in sections)
refs = []
for ind,sec in sections:
if maxdepth and ind-outer+1 > maxdepth:
continue
ref = sec.lower()
ref = ref.replace('`', '')
ref = ref.replace(' ', '-')
ref = ref.replace('?', '')
refs.append(" "*(ind-outer) + "- [%s](#%s)" % (sec, ref))
return refs | python | {
"resource": ""
} |
q257455 | doc2md | validation | def doc2md(docstr, title, min_level=1, more_info=False, toc=True, maxdepth=0):
"""
Convert a docstring to a markdown text.
"""
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
shiftlevel = 0
if level < min_level:
shiftlevel = min_level - level
level = min_level
sections = [(lev+shiftlevel, tit) for lev,tit in sections]
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:], shiftlevel)
if more_info:
return (md, sections)
else:
return "\n".join(md) | python | {
"resource": ""
} |
q257456 | mod2md | validation | def mod2md(module, title, title_api_section, toc=True, maxdepth=0):
"""
Generate markdown document from module, including API section.
"""
docstr = module.__doc__
text = doctrim(docstr)
lines = text.split('\n')
sections = find_sections(lines)
if sections:
level = min(n for n,t in sections) - 1
else:
level = 1
api_md = []
api_sec = []
if title_api_section and module.__all__:
sections.append((level+1, title_api_section))
for name in module.__all__:
api_sec.append((level+2, "`" + name + "`"))
api_md += ['', '']
entry = module.__dict__[name]
if entry.__doc__:
md, sec = doc2md(entry.__doc__, "`" + name + "`",
min_level=level+2, more_info=True, toc=False)
api_sec += sec
api_md += md
sections += api_sec
# headline
head = next((i for i, l in enumerate(lines) if is_heading(l)), 0)
md = [
make_heading(level, title),
"",
] + lines[:head]
# main sections
if toc:
md += make_toc(sections, maxdepth)
md += ['']
md += _doc2md(lines[head:])
# API section
md += [
'',
'',
make_heading(level+1, title_api_section),
]
if toc:
md += ['']
md += make_toc(api_sec, 1)
md += api_md
return "\n".join(md) | python | {
"resource": ""
} |
q257457 | ProfileBlockAnalyzer.largest_finite_distance | validation | def largest_finite_distance(self):
"""
Compute the maximum temporal distance.
Returns
-------
max_temporal_distance : float
"""
block_start_distances = [block.distance_start for block in self._profile_blocks if
block.distance_start < float('inf')]
block_end_distances = [block.distance_end for block in self._profile_blocks if
block.distance_end < float('inf')]
distances = block_start_distances + block_end_distances
if len(distances) > 0:
return max(distances)
else:
return None | python | {
"resource": ""
} |
q257458 | ProfileBlockAnalyzer._temporal_distance_cdf | validation | def _temporal_distance_cdf(self):
"""
Temporal distance cumulative density function.
Returns
-------
x_values: numpy.array
values for the x-axis
cdf: numpy.array
cdf values
"""
distance_split_points = set()
for block in self._profile_blocks:
if block.distance_start != float('inf'):
distance_split_points.add(block.distance_end)
distance_split_points.add(block.distance_start)
distance_split_points_ordered = numpy.array(sorted(list(distance_split_points)))
temporal_distance_split_widths = distance_split_points_ordered[1:] - distance_split_points_ordered[:-1]
trip_counts = numpy.zeros(len(temporal_distance_split_widths))
delta_peaks = defaultdict(lambda: 0)
for block in self._profile_blocks:
if block.distance_start == block.distance_end:
delta_peaks[block.distance_end] += block.width()
else:
start_index = numpy.searchsorted(distance_split_points_ordered, block.distance_end)
end_index = numpy.searchsorted(distance_split_points_ordered, block.distance_start)
trip_counts[start_index:end_index] += 1
unnormalized_cdf = numpy.array([0] + list(numpy.cumsum(temporal_distance_split_widths * trip_counts)))
if not (numpy.isclose(
[unnormalized_cdf[-1]],
[self._end_time - self._start_time - sum(delta_peaks.values())], atol=1E-4
).all()):
print(unnormalized_cdf[-1], self._end_time - self._start_time - sum(delta_peaks.values()))
raise RuntimeError("Something went wrong with cdf computation!")
if len(delta_peaks) > 0:
for peak in delta_peaks.keys():
if peak == float('inf'):
continue
index = numpy.nonzero(distance_split_points_ordered == peak)[0][0]
unnormalized_cdf = numpy.insert(unnormalized_cdf, index, unnormalized_cdf[index])
distance_split_points_ordered = numpy.insert(distance_split_points_ordered, index,
distance_split_points_ordered[index])
# walk_waiting_time_fraction = walk_total_time / (self.end_time_dep - self.start_time_dep)
unnormalized_cdf[(index + 1):] = unnormalized_cdf[(index + 1):] + delta_peaks[peak]
norm_cdf = unnormalized_cdf / (unnormalized_cdf[-1] + delta_peaks[float('inf')])
return distance_split_points_ordered, norm_cdf | python | {
"resource": ""
} |
q257459 | ProfileBlockAnalyzer._temporal_distance_pdf | validation | def _temporal_distance_pdf(self):
"""
Temporal distance probability density function.
Returns
-------
non_delta_peak_split_points: numpy.array
non_delta_peak_densities: numpy.array
len(density) == len(temporal_distance_split_points_ordered) -1
delta_peak_loc_to_probability_mass : dict
"""
temporal_distance_split_points_ordered, norm_cdf = self._temporal_distance_cdf()
delta_peak_loc_to_probability_mass = {}
non_delta_peak_split_points = [temporal_distance_split_points_ordered[0]]
non_delta_peak_densities = []
for i in range(0, len(temporal_distance_split_points_ordered) - 1):
left = temporal_distance_split_points_ordered[i]
right = temporal_distance_split_points_ordered[i + 1]
width = right - left
prob_mass = norm_cdf[i + 1] - norm_cdf[i]
if width == 0.0:
delta_peak_loc_to_probability_mass[left] = prob_mass
else:
non_delta_peak_split_points.append(right)
non_delta_peak_densities.append(prob_mass / float(width))
assert (len(non_delta_peak_densities) == len(non_delta_peak_split_points) - 1)
return numpy.array(non_delta_peak_split_points), \
numpy.array(non_delta_peak_densities), delta_peak_loc_to_probability_mass | python | {
"resource": ""
} |
q257460 | remove_all_trips_fully_outside_buffer | validation | def remove_all_trips_fully_outside_buffer(db_conn, center_lat, center_lon, buffer_km, update_secondary_data=True):
"""
Not used in the regular filter process for the time being.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
center_lat: float
center_lon: float
buffer_km: float
"""
distance_function_str = add_wgs84_distance_function_to_db(db_conn)
stops_within_buffer_query_sql = "SELECT stop_I FROM stops WHERE CAST(" + distance_function_str + \
"(lat, lon, {lat} , {lon}) AS INT) < {d_m}"\
.format(lat=float(center_lat), lon=float(center_lon), d_m=int(1000*buffer_km))
select_all_trip_Is_where_stop_I_is_within_buffer_sql = "SELECT distinct(trip_I) FROM stop_times WHERE stop_I IN (" + stops_within_buffer_query_sql + ")"
trip_Is_to_remove_sql = "SELECT trip_I FROM trips WHERE trip_I NOT IN ( " + select_all_trip_Is_where_stop_I_is_within_buffer_sql + ")"
trip_Is_to_remove = pandas.read_sql(trip_Is_to_remove_sql, db_conn)["trip_I"].values
trip_Is_to_remove_string = ",".join([str(trip_I) for trip_I in trip_Is_to_remove])
remove_all_trips_fully_outside_buffer_sql = "DELETE FROM trips WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
remove_all_stop_times_where_trip_I_fully_outside_buffer_sql = "DELETE FROM stop_times WHERE trip_I IN (" + trip_Is_to_remove_string + ")"
db_conn.execute(remove_all_trips_fully_outside_buffer_sql)
db_conn.execute(remove_all_stop_times_where_trip_I_fully_outside_buffer_sql)
delete_stops_not_in_stop_times_and_not_as_parent_stop(db_conn)
db_conn.execute(DELETE_ROUTES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAYS_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_DAY_TRIPS2_ENTRIES_NOT_PRESENT_IN_TRIPS_SQL)
db_conn.execute(DELETE_CALENDAR_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_CALENDAR_DATES_ENTRIES_FOR_NON_REFERENCE_SERVICE_IS_SQL)
db_conn.execute(DELETE_FREQUENCIES_ENTRIES_NOT_PRESENT_IN_TRIPS)
db_conn.execute(DELETE_AGENCIES_NOT_REFERENCED_IN_ROUTES_SQL)
if update_secondary_data:
update_secondary_data_copies(db_conn) | python | {
"resource": ""
} |
q257461 | remove_dangling_shapes | validation | def remove_dangling_shapes(db_conn):
"""
Remove dangling entries from the shapes directory.
Parameters
----------
db_conn: sqlite3.Connection
connection to the GTFS object
"""
db_conn.execute(DELETE_SHAPES_NOT_REFERENCED_IN_TRIPS_SQL)
SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL = \
"SELECT trips.trip_I, shape_id, min(shape_break) as min_shape_break, max(shape_break) as max_shape_break FROM trips, stop_times WHERE trips.trip_I=stop_times.trip_I GROUP BY trips.trip_I"
trip_min_max_shape_seqs= pandas.read_sql(SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL, db_conn)
rows = []
for row in trip_min_max_shape_seqs.itertuples():
shape_id, min_shape_break, max_shape_break = row.shape_id, row.min_shape_break, row.max_shape_break
if min_shape_break is None or max_shape_break is None:
min_shape_break = float('-inf')
max_shape_break = float('-inf')
rows.append( (shape_id, min_shape_break, max_shape_break) )
DELETE_SQL_BASE = "DELETE FROM shapes WHERE shape_id=? AND (seq<? OR seq>?)"
db_conn.executemany(DELETE_SQL_BASE, rows)
remove_dangling_shapes_references(db_conn) | python | {
"resource": ""
} |
q257462 | compute_pseudo_connections | validation | def compute_pseudo_connections(transit_connections, start_time_dep,
end_time_dep, transfer_margin,
walk_network, walk_speed):
"""
Given a set of transit events and the static walk network,
"transform" the static walking network into a set of "pseudo-connections".
As a first approximation, we add pseudo-connections to depart after each arrival of a transit connection
to it's arrival stop.
Parameters
----------
transit_connections: list[Connection]
start_time_dep : int
start time in unixtime seconds
end_time_dep: int
end time in unixtime seconds (no new connections will be scanned after this time)
transfer_margin: int
required extra margin required for transfers in seconds
walk_speed: float
walking speed between stops in meters / second
walk_network: networkx.Graph
each edge should have the walking distance as a data attribute ("d_walk") expressed in meters
Returns
-------
pseudo_connections: set[Connection]
"""
# A pseudo-connection should be created after (each) arrival to a transit_connection's arrival stop.
pseudo_connection_set = set() # use a set to ignore possible duplicates
for c in transit_connections:
if start_time_dep <= c.departure_time <= end_time_dep:
walk_arr_stop = c.departure_stop
walk_arr_time = c.departure_time - transfer_margin
for _, walk_dep_stop, data in walk_network.edges(nbunch=[walk_arr_stop], data=True):
walk_dep_time = walk_arr_time - data['d_walk'] / float(walk_speed)
if walk_dep_time > end_time_dep or walk_dep_time < start_time_dep:
continue
pseudo_connection = Connection(walk_dep_stop,
walk_arr_stop,
walk_dep_time,
walk_arr_time,
Connection.WALK_TRIP_ID,
Connection.WALK_SEQ,
is_walk=True)
pseudo_connection_set.add(pseudo_connection)
return pseudo_connection_set | python | {
"resource": ""
} |
q257463 | SpreadingStop.get_min_visit_time | validation | def get_min_visit_time(self):
"""
Get the earliest visit time of the stop.
"""
if not self.visit_events:
return float('inf')
else:
return min(self.visit_events, key=lambda event: event.arr_time_ut).arr_time_ut | python | {
"resource": ""
} |
q257464 | SpreadingStop.can_infect | validation | def can_infect(self, event):
"""
Whether the spreading stop can infect using this event.
"""
if event.from_stop_I != self.stop_I:
return False
if not self.has_been_visited():
return False
else:
time_sep = event.dep_time_ut-self.get_min_visit_time()
# if the gap between the earliest visit_time and current time is
# smaller than the min. transfer time, the stop can pass the spreading
# forward
if (time_sep >= self.min_transfer_time) or (event.trip_I == -1 and time_sep >= 0):
return True
else:
for visit in self.visit_events:
# if no transfer, please hop-on
if (event.trip_I == visit.trip_I) and (time_sep >= 0):
return True
return False | python | {
"resource": ""
} |
q257465 | DayTripsMaterializer.make_views | validation | def make_views(cls, conn):
"""Create day_trips and day_stop_times views.
day_trips: day_trips2 x trips = days x trips
day_stop_times: day_trips2 x trips x stop_times = days x trips x stop_times
"""
conn.execute('DROP VIEW IF EXISTS main.day_trips')
conn.execute('CREATE VIEW day_trips AS '
'SELECT day_trips2.*, trips.* '
#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '
#'days.day_start_ut+trips.end_time_ds AS end_time_ut '
'FROM day_trips2 JOIN trips USING (trip_I);')
conn.commit()
conn.execute('DROP VIEW IF EXISTS main.day_stop_times')
conn.execute('CREATE VIEW day_stop_times AS '
'SELECT day_trips2.*, trips.*, stop_times.*, '
#'days.day_start_ut+trips.start_time_ds AS start_time_ut, '
#'days.day_start_ut+trips.end_time_ds AS end_time_ut, '
'day_trips2.day_start_ut+stop_times.arr_time_ds AS arr_time_ut, '
'day_trips2.day_start_ut+stop_times.dep_time_ds AS dep_time_ut '
'FROM day_trips2 '
'JOIN trips USING (trip_I) '
'JOIN stop_times USING (trip_I)')
conn.commit() | python | {
"resource": ""
} |
q257466 | createcolorbar | validation | def createcolorbar(cmap, norm):
"""Create a colourbar with limits of lwr and upr"""
cax, kw = matplotlib.colorbar.make_axes(matplotlib.pyplot.gca())
c = matplotlib.colorbar.ColorbarBase(cax, cmap=cmap, norm=norm)
return c | python | {
"resource": ""
} |
q257467 | write_temporal_networks_by_route_type | validation | def write_temporal_networks_by_route_type(gtfs, extract_output_dir):
"""
Write temporal networks by route type to disk.
Parameters
----------
gtfs: gtfspy.GTFS
extract_output_dir: str
"""
util.makedirs(extract_output_dir)
for route_type in route_types.TRANSIT_ROUTE_TYPES:
pandas_data_frame = temporal_network(gtfs, start_time_ut=None, end_time_ut=None, route_type=route_type)
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
out_file_name = os.path.join(extract_output_dir, tag + ".tnet")
pandas_data_frame.to_csv(out_file_name, encoding='utf-8', index=False) | python | {
"resource": ""
} |
q257468 | _write_stop_to_stop_network_edges | validation | def _write_stop_to_stop_network_edges(net, file_name, data=True, fmt=None):
"""
Write out a network
Parameters
----------
net: networkx.DiGraph
base_name: str
path to the filename (without extension)
data: bool, optional
whether or not to write out any edge data present
fmt: str, optional
If "csv" write out the network in csv format.
"""
if fmt is None:
fmt = "edg"
if fmt == "edg":
if data:
networkx.write_edgelist(net, file_name, data=True)
else:
networkx.write_edgelist(net, file_name)
elif fmt == "csv":
with open(file_name, 'w') as f:
# writing out the header
edge_iter = net.edges_iter(data=True)
_, _, edg_data = next(edge_iter)
edg_data_keys = list(sorted(edg_data.keys()))
header = ";".join(["from_stop_I", "to_stop_I"] + edg_data_keys)
f.write(header)
for from_node_I, to_node_I, data in net.edges_iter(data=True):
f.write("\n")
values = [str(from_node_I), str(to_node_I)]
data_values = []
for key in edg_data_keys:
if key == "route_I_counts":
route_I_counts_string = str(data[key]).replace(" ", "")[1:-1]
data_values.append(route_I_counts_string)
else:
data_values.append(str(data[key]))
all_values = values + data_values
f.write(";".join(all_values)) | python | {
"resource": ""
} |
q257469 | write_gtfs | validation | def write_gtfs(gtfs, output):
"""
Write out the database according to the GTFS format.
Parameters
----------
gtfs: gtfspy.GTFS
output: str
Path where to put the GTFS files
if output ends with ".zip" a ZIP-file is created instead.
Returns
-------
None
"""
output = os.path.abspath(output)
uuid_str = "tmp_" + str(uuid.uuid1())
if output[-4:] == '.zip':
zip = True
out_basepath = os.path.dirname(os.path.abspath(output))
if not os.path.exists(out_basepath):
raise IOError(out_basepath + " does not exist, cannot write gtfs as a zip")
tmp_dir = os.path.join(out_basepath, str(uuid_str))
# zip_file_na,e = ../out_basedir + ".zip
else:
zip = False
out_basepath = output
tmp_dir = os.path.join(out_basepath + "_" + str(uuid_str))
os.makedirs(tmp_dir, exist_ok=True)
gtfs_table_to_writer = {
"agency": _write_gtfs_agencies,
"calendar": _write_gtfs_calendar,
"calendar_dates": _write_gtfs_calendar_dates,
# fare attributes and fare_rules omitted (seldomly used)
"feed_info": _write_gtfs_feed_info,
# "frequencies": not written, as they are incorporated into trips and routes,
# Frequencies table is expanded into other tables on initial import. -> Thus frequencies.txt is not created
"routes": _write_gtfs_routes,
"shapes": _write_gtfs_shapes,
"stops": _write_gtfs_stops,
"stop_times": _write_gtfs_stop_times,
"transfers": _write_gtfs_transfers,
"trips": _write_gtfs_trips,
}
for table, writer in gtfs_table_to_writer.items():
fname_to_write = os.path.join(tmp_dir, table + '.txt')
print(fname_to_write)
writer(gtfs, open(os.path.join(tmp_dir, table + '.txt'), 'w'))
if zip:
shutil.make_archive(output[:-4], 'zip', tmp_dir)
shutil.rmtree(tmp_dir)
else:
print("moving " + str(tmp_dir) + " to " + out_basepath)
os.rename(tmp_dir, out_basepath) | python | {
"resource": ""
} |
q257470 | _remove_I_columns | validation | def _remove_I_columns(df):
"""
Remove columns ending with I from a pandas.DataFrame
Parameters
----------
df: dataFrame
Returns
-------
None
"""
all_columns = list(filter(lambda el: el[-2:] == "_I", df.columns))
for column in all_columns:
del df[column] | python | {
"resource": ""
} |
q257471 | ConnectionScanProfiler._scan_footpaths_to_departure_stop | validation | def _scan_footpaths_to_departure_stop(self, connection_dep_stop, connection_dep_time, arrival_time_target):
""" A helper method for scanning the footpaths. Updates self._stop_profiles accordingly"""
for _, neighbor, data in self._walk_network.edges_iter(nbunch=[connection_dep_stop],
data=True):
d_walk = data['d_walk']
neighbor_dep_time = connection_dep_time - d_walk / self._walk_speed
pt = LabelTimeSimple(departure_time=neighbor_dep_time, arrival_time_target=arrival_time_target)
self._stop_profiles[neighbor].update_pareto_optimal_tuples(pt) | python | {
"resource": ""
} |
q257472 | create_file | validation | def create_file(fname=None, fname_tmp=None, tmpdir=None,
save_tmpfile=False, keepext=False):
"""Context manager for making files with possibility of failure.
If you are creating a file, it is possible that the code will fail
and leave a corrupt intermediate file. This is especially damaging
if this is used as automatic input to another process. This context
manager helps by creating a temporary filename, your code runs and
creates that temporary file, and then if no exceptions are raised,
the context manager will move the temporary file to the original
filename you intended to open.
Parameters
----------
fname : str
Target filename, this file will be created if all goes well
fname_tmp : str
If given, this is used as the temporary filename.
tmpdir : str or bool
If given, put temporary files in this directory. If `True`,
then find a good tmpdir that is not on local filesystem.
save_tmpfile : bool
If true, the temporary file is not deleteted if an exception
is raised.
keepext : bool, default False
If true, have tmpfile have same extension as final file.
Returns (as context manager value)
----------------------------------
fname_tmp: str
Temporary filename to be used. Same as `fname_tmp`
if given as an argument.
Raises
------
Re-raises any except occuring during the context block.
"""
# Do nothing if requesting sqlite memory DB.
if fname == ':memory:':
yield fname
return
if fname_tmp is None:
# no tmpfile name given - compute some basic info
basename = os.path.basename(fname)
root, ext = os.path.splitext(basename)
dir_ = this_dir = os.path.dirname(fname)
# Remove filename extension, in case this matters for
# automatic things itself.
if not keepext:
root = root + ext
ext = ''
if tmpdir:
# we should use a different temporary directory
if tmpdir is True:
# Find a directory ourself, searching some common
# places.
for dir__ in possible_tmpdirs:
if os.access(dir__, os.F_OK):
dir_ = dir__
break
# Make the actual tmpfile, with our chosen tmpdir, directory,
# extension. Set it to not delete automatically, since on
# success we will move it to elsewhere.
tmpfile = tempfile.NamedTemporaryFile(
prefix='tmp-' + root + '-', suffix=ext, dir=dir_, delete=False)
fname_tmp = tmpfile.name
try:
yield fname_tmp
except Exception as e:
if save_tmpfile:
print("Temporary file is '%s'" % fname_tmp)
else:
os.unlink(fname_tmp)
raise
# Move the file back to the original location.
try:
os.rename(fname_tmp, fname)
# We have to manually set permissions. tempfile does not use
# umask, for obvious reasons.
os.chmod(fname, 0o777 & ~current_umask)
# 'Invalid cross-device link' - you can't rename files across
# filesystems. So, we have to fallback to moving it. But, we
# want to move it using tmpfiles also, so that the final file
# appearing is atomic. We use... tmpfiles.
except OSError as e:
# New temporary file in same directory
tmpfile2 = tempfile.NamedTemporaryFile(
prefix='tmp-' + root + '-', suffix=ext, dir=this_dir, delete=False)
# Copy contents over
shutil.copy(fname_tmp, tmpfile2.name)
# Rename new tmpfile, unlink old one on other filesystem.
os.rename(tmpfile2.name, fname)
os.chmod(fname, 0o666 & ~current_umask)
os.unlink(fname_tmp) | python | {
"resource": ""
} |
q257473 | execute | validation | def execute(cur, *args):
"""Utility function to print sqlite queries before executing.
Use instead of cur.execute(). First argument is cursor.
cur.execute(stmt)
becomes
util.execute(cur, stmt)
"""
stmt = args[0]
if len(args) > 1:
stmt = stmt.replace('%', '%%').replace('?', '%r')
print(stmt % (args[1]))
return cur.execute(*args) | python | {
"resource": ""
} |
q257474 | makedirs | validation | def makedirs(path):
"""
Create directories if they do not exist, otherwise do nothing.
Return path for convenience
"""
if not os.path.isdir(path):
os.makedirs(path)
return path | python | {
"resource": ""
} |
q257475 | MultiObjectivePseudoCSAProfiler._finalize_profiles | validation | def _finalize_profiles(self):
"""
Deal with the first walks by joining profiles to other stops within walking distance.
"""
for stop, stop_profile in self._stop_profiles.items():
assert (isinstance(stop_profile, NodeProfileMultiObjective))
neighbor_label_bags = []
walk_durations_to_neighbors = []
departure_arrival_stop_pairs = []
if stop_profile.get_walk_to_target_duration() != 0 and stop in self._walk_network.node:
neighbors = networkx.all_neighbors(self._walk_network, stop)
for neighbor in neighbors:
neighbor_profile = self._stop_profiles[neighbor]
assert (isinstance(neighbor_profile, NodeProfileMultiObjective))
neighbor_real_connection_labels = neighbor_profile.get_labels_for_real_connections()
neighbor_label_bags.append(neighbor_real_connection_labels)
walk_durations_to_neighbors.append(int(self._walk_network.get_edge_data(stop, neighbor)["d_walk"] /
self._walk_speed))
departure_arrival_stop_pairs.append((stop, neighbor))
stop_profile.finalize(neighbor_label_bags, walk_durations_to_neighbors, departure_arrival_stop_pairs) | python | {
"resource": ""
} |
q257476 | validate_day_start_ut | validation | def validate_day_start_ut(conn):
"""This validates the day_start_ut of the days table."""
G = GTFS(conn)
cur = conn.execute('SELECT date, day_start_ut FROM days')
for date, day_start_ut in cur:
#print date, day_start_ut
assert day_start_ut == G.get_day_start_ut(date) | python | {
"resource": ""
} |
q257477 | main_make_views | validation | def main_make_views(gtfs_fname):
"""Re-create all views.
"""
print("creating views")
conn = GTFS(fname_or_conn=gtfs_fname).conn
for L in Loaders:
L(None).make_views(conn)
conn.commit() | python | {
"resource": ""
} |
q257478 | ImportValidator._validate_no_null_values | validation | def _validate_no_null_values(self):
"""
Loads the tables from the gtfs object and counts the number of rows that have null values in
fields that should not be null. Stores the number of null rows in warnings_container
"""
for table in DB_TABLE_NAMES:
null_not_ok_warning = "Null values in must-have columns in table {table}".format(table=table)
null_warn_warning = "Null values in good-to-have columns in table {table}".format(table=table)
null_not_ok_fields = DB_TABLE_NAME_TO_FIELDS_WHERE_NULL_NOT_OK[table]
null_warn_fields = DB_TABLE_NAME_TO_FIELDS_WHERE_NULL_OK_BUT_WARN[table]
# CW, TODO: make this validation source by source
df = self.gtfs.get_table(table)
for warning, fields in zip([null_not_ok_warning, null_warn_warning], [null_not_ok_fields, null_warn_fields]):
null_unwanted_df = df[fields]
rows_having_null = null_unwanted_df.isnull().any(1)
if sum(rows_having_null) > 0:
rows_having_unwanted_null = df[rows_having_null.values]
self.warnings_container.add_warning(warning, rows_having_unwanted_null, len(rows_having_unwanted_null)) | python | {
"resource": ""
} |
q257479 | ImportValidator._validate_danglers | validation | def _validate_danglers(self):
"""
Checks for rows that are not referenced in the the tables that should be linked
stops <> stop_times using stop_I
stop_times <> trips <> days, using trip_I
trips <> routes, using route_I
:return:
"""
for query, warning in zip(DANGLER_QUERIES, DANGLER_WARNINGS):
dangler_count = self.gtfs.execute_custom_query(query).fetchone()[0]
if dangler_count > 0:
if self.verbose:
print(str(dangler_count) + " " + warning)
self.warnings_container.add_warning(warning, self.location, count=dangler_count) | python | {
"resource": ""
} |
q257480 | print_coords | validation | def print_coords(rows, prefix=''):
"""Print coordinates within a sequence.
This is only used for debugging. Printed in a form that can be
pasted into Python for visualization."""
lat = [row['lat'] for row in rows]
lon = [row['lon'] for row in rows]
print('COORDS'+'-' * 5)
print("%slat, %slon = %r, %r" % (prefix, prefix, lat, lon))
print('-'*5) | python | {
"resource": ""
} |
q257481 | find_segments | validation | def find_segments(stops, shape):
"""Find corresponding shape points for a list of stops and create shape break points.
Parameters
----------
stops: stop-sequence (list)
List of stop points
shape: list of shape points
shape-sequence of shape points
Returns
-------
break_points: list[int]
stops[i] corresponds to shape[break_points[i]]. This list can
be used to partition the shape points into segments between
one stop and the next.
badness: float
Lower indicates better fit to the shape. This is the sum of
distances (in meters) between every each stop and its closest
shape point. This is not needed in normal use, but in the
cases where you must determine the best-fitting shape for a
stop-sequence, use this.
"""
if not shape:
return [], 0
break_points = []
last_i = 0
cumul_d = 0
badness = 0
d_last_stop = float('inf')
lstlat, lstlon = None, None
break_shape_points = []
for stop in stops:
stlat, stlon = stop['lat'], stop['lon']
best_d = float('inf')
# print stop
if badness > 500 and badness > 30 * len(break_points):
return [], badness
for i in range(last_i, len(shape)):
d = wgs84_distance(stlat, stlon, shape[i]['lat'], shape[i]['lon'])
if lstlat:
d_last_stop = wgs84_distance(lstlat, lstlon, shape[i]['lat'], shape[i]['lon'])
# If we are getting closer to next stop, record this as
# the best stop so far.continue
if d < best_d:
best_d = d
best_i = i
# print best_d, i, last_i, len(shape)
cumul_d += d
# We have to be very careful about our stop condition.
# This is trial and error, basically.
if (d_last_stop < d) or (d > 500) or (i < best_i + 100):
continue
# We have decided our best stop, stop looking and continue
# the outer loop.
else:
badness += best_d
break_points.append(best_i)
last_i = best_i
lstlat, lstlon = stlat, stlon
break_shape_points.append(shape[best_i])
break
else:
# Executed if we did *not* break the inner loop
badness += best_d
break_points.append(best_i)
last_i = best_i
lstlat, lstlon = stlat, stlon
break_shape_points.append(shape[best_i])
pass
# print "Badness:", badness
# print_coords(stops, 'stop')
# print_coords(shape, 'shape')
# print_coords(break_shape_points, 'break')
return break_points, badness | python | {
"resource": ""
} |
q257482 | return_segments | validation | def return_segments(shape, break_points):
"""Break a shape into segments between stops using break_points.
This function can use the `break_points` outputs from
`find_segments`, and cuts the shape-sequence into pieces
corresponding to each stop.
"""
# print 'xxx'
# print stops
# print shape
# print break_points
# assert len(stops) == len(break_points)
segs = []
bp = 0 # not used
bp2 = 0
for i in range(len(break_points)-1):
bp = break_points[i] if break_points[i] is not None else bp2
bp2 = break_points[i+1] if break_points[i+1] is not None else bp
segs.append(shape[bp:bp2+1])
segs.append([])
return segs | python | {
"resource": ""
} |
q257483 | get_trip_points | validation | def get_trip_points(cur, route_id, offset=0, tripid_glob=''):
"""Get all scheduled stops on a particular route_id.
Given a route_id, return the trip-stop-list with
latitude/longitudes. This is a bit more tricky than it seems,
because we have to go from table route->trips->stop_times. This
functions finds an arbitrary trip (in trip table) with this route ID
and, and then returns all stop points for that trip.
Parameters
----------
cur : sqlite3.Cursor
cursor to sqlite3 DB containing GTFS
route_id : string or any
route_id to get stop points of
offset : int
LIMIT offset if you don't want the first trip returned.
tripid_glob : string
If given, allows you to limit tripids which can be selected.
Mainly useful in debugging.
Returns
-------
stop-list
List of stops in stop-seq format.
"""
extra_where = ''
if tripid_glob:
extra_where = "AND trip_id GLOB '%s'" % tripid_glob
cur.execute('SELECT seq, lat, lon '
'FROM (select trip_I from route '
' LEFT JOIN trips USING (route_I) '
' WHERE route_id=? %s limit 1 offset ? ) '
'JOIN stop_times USING (trip_I) '
'LEFT JOIN stop USING (stop_id) '
'ORDER BY seq' % extra_where, (route_id, offset))
stop_points = [dict(seq=row[0], lat=row[1], lon=row[2]) for row in cur]
return stop_points | python | {
"resource": ""
} |
q257484 | interpolate_shape_times | validation | def interpolate_shape_times(shape_distances, shape_breaks, stop_times):
"""
Interpolate passage times for shape points.
Parameters
----------
shape_distances: list
list of cumulative distances along the shape
shape_breaks: list
list of shape_breaks
stop_times: list
list of stop_times
Returns
-------
shape_times: list of ints (seconds) / numpy array
interpolated shape passage times
The values of stop times before the first shape-break are given the first
stopping time, and the any shape points after the last break point are
given the value of the last shape point.
"""
shape_times = np.zeros(len(shape_distances))
shape_times[:shape_breaks[0]] = stop_times[0]
for i in range(len(shape_breaks)-1):
cur_break = shape_breaks[i]
cur_time = stop_times[i]
next_break = shape_breaks[i+1]
next_time = stop_times[i+1]
if cur_break == next_break:
shape_times[cur_break] = stop_times[i]
else:
cur_distances = shape_distances[cur_break:next_break+1]
norm_distances = ((np.array(cur_distances)-float(cur_distances[0])) /
float(cur_distances[-1] - cur_distances[0]))
times = (1.-norm_distances)*cur_time+norm_distances*next_time
shape_times[cur_break:next_break] = times[:-1]
# deal final ones separately:
shape_times[shape_breaks[-1]:] = stop_times[-1]
return list(shape_times) | python | {
"resource": ""
} |
q257485 | NodeProfileSimple.evaluate_earliest_arrival_time_at_target | validation | def evaluate_earliest_arrival_time_at_target(self, dep_time, transfer_margin):
"""
Get the earliest arrival time at the target, given a departure time.
Parameters
----------
dep_time : float, int
time in unix seconds
transfer_margin: float, int
transfer margin in seconds
Returns
-------
arrival_time : float
Arrival time in the given time unit (seconds after unix epoch).
"""
minimum = dep_time + self._walk_to_target_duration
dep_time_plus_transfer_margin = dep_time + transfer_margin
for label in self._labels:
if label.departure_time >= dep_time_plus_transfer_margin and label.arrival_time_target < minimum:
minimum = label.arrival_time_target
return float(minimum) | python | {
"resource": ""
} |
q257486 | Spreader._run | validation | def _run(self):
"""
Run the actual simulation.
"""
if self._has_run:
raise RuntimeError("This spreader instance has already been run: "
"create a new Spreader object for a new run.")
i = 1
while self.event_heap.size() > 0 and len(self._uninfected_stops) > 0:
event = self.event_heap.pop_next_event()
this_stop = self._stop_I_to_spreading_stop[event.from_stop_I]
if event.arr_time_ut > self.start_time_ut + self.max_duration_ut:
break
if this_stop.can_infect(event):
target_stop = self._stop_I_to_spreading_stop[event.to_stop_I]
already_visited = target_stop.has_been_visited()
target_stop.visit(event)
if not already_visited:
self._uninfected_stops.remove(event.to_stop_I)
print(i, self.event_heap.size())
transfer_distances = self.gtfs.get_straight_line_transfer_distances(event.to_stop_I)
self.event_heap.add_walk_events_to_heap(transfer_distances, event, self.start_time_ut,
self.walk_speed, self._uninfected_stops,
self.max_duration_ut)
i += 1
self._has_run = True | python | {
"resource": ""
} |
q257487 | add_walk_distances_to_db_python | validation | def add_walk_distances_to_db_python(gtfs, osm_path, cutoff_distance_m=1000):
"""
Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java
"""
if isinstance(gtfs, str):
gtfs = GTFS(gtfs)
assert (isinstance(gtfs, GTFS))
print("Reading in walk network")
walk_network = create_walk_network_from_osm(osm_path)
print("Matching stops to the OSM network")
stop_I_to_nearest_osm_node, stop_I_to_nearest_osm_node_distance = match_stops_to_nodes(gtfs, walk_network)
transfers = gtfs.get_straight_line_transfer_distances()
from_I_to_to_stop_Is = {stop_I: set() for stop_I in stop_I_to_nearest_osm_node}
for transfer_tuple in transfers.itertuples():
from_I = transfer_tuple.from_stop_I
to_I = transfer_tuple.to_stop_I
from_I_to_to_stop_Is[from_I].add(to_I)
print("Computing walking distances")
for from_I, to_stop_Is in from_I_to_to_stop_Is.items():
from_node = stop_I_to_nearest_osm_node[from_I]
from_dist = stop_I_to_nearest_osm_node_distance[from_I]
shortest_paths = networkx.single_source_dijkstra_path_length(walk_network,
from_node,
cutoff=cutoff_distance_m - from_dist,
weight="distance")
for to_I in to_stop_Is:
to_distance = stop_I_to_nearest_osm_node_distance[to_I]
to_node = stop_I_to_nearest_osm_node[to_I]
osm_distance = shortest_paths.get(to_node, float('inf'))
total_distance = from_dist + osm_distance + to_distance
from_stop_I_transfers = transfers[transfers['from_stop_I'] == from_I]
straigth_distance = from_stop_I_transfers[from_stop_I_transfers["to_stop_I"] == to_I]["d"].values[0]
assert (straigth_distance < total_distance + 2) # allow for a maximum of 2 meters in calculations
if total_distance <= cutoff_distance_m:
gtfs.conn.execute("UPDATE stop_distances "
"SET d_walk = " + str(int(total_distance)) +
" WHERE from_stop_I=" + str(from_I) + " AND to_stop_I=" + str(to_I))
gtfs.conn.commit() | python | {
"resource": ""
} |
q257488 | stop_to_stop_network_for_route_type | validation | def stop_to_stop_network_for_route_type(gtfs,
route_type,
link_attributes=None,
start_time_ut=None,
end_time_ut=None):
"""
Get a stop-to-stop network describing a single mode of travel.
Parameters
----------
gtfs : gtfspy.GTFS
route_type : int
See gtfspy.route_types.TRANSIT_ROUTE_TYPES for the list of possible types.
link_attributes: list[str], optional
defaulting to use the following link attributes:
"n_vehicles" : Number of vehicles passed
"duration_min" : minimum travel time between stops
"duration_max" : maximum travel time between stops
"duration_median" : median travel time between stops
"duration_avg" : average travel time between stops
"d" : distance along straight line (wgs84_distance)
"distance_shape" : minimum distance along shape
"capacity_estimate" : approximate capacity passed through the stop
"route_I_counts" : dict from route_I to counts
start_time_ut: int
start time of the time span (in unix time)
end_time_ut: int
end time of the time span (in unix time)
Returns
-------
net: networkx.DiGraph
A directed graph Directed graph
"""
if link_attributes is None:
link_attributes = DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTES
assert(route_type in route_types.TRANSIT_ROUTE_TYPES)
stops_dataframe = gtfs.get_stops_for_route_type(route_type)
net = networkx.DiGraph()
_add_stops_to_net(net, stops_dataframe)
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
if len(net.nodes()) < 2:
assert events_df.shape[0] == 0
# group events by links, and loop over them (i.e. each link):
link_event_groups = events_df.groupby(['from_stop_I', 'to_stop_I'], sort=False)
for key, link_events in link_event_groups:
from_stop_I, to_stop_I = key
assert isinstance(link_events, pd.DataFrame)
# 'dep_time_ut' 'arr_time_ut' 'shape_id' 'route_type' 'trip_I' 'duration' 'from_seq' 'to_seq'
if link_attributes is None:
net.add_edge(from_stop_I, to_stop_I)
else:
link_data = {}
if "duration_min" in link_attributes:
link_data['duration_min'] = float(link_events['duration'].min())
if "duration_max" in link_attributes:
link_data['duration_max'] = float(link_events['duration'].max())
if "duration_median" in link_attributes:
link_data['duration_median'] = float(link_events['duration'].median())
if "duration_avg" in link_attributes:
link_data['duration_avg'] = float(link_events['duration'].mean())
# statistics on numbers of vehicles:
if "n_vehicles" in link_attributes:
link_data['n_vehicles'] = int(link_events.shape[0])
if "capacity_estimate" in link_attributes:
link_data['capacity_estimate'] = route_types.ROUTE_TYPE_TO_APPROXIMATE_CAPACITY[route_type] \
* int(link_events.shape[0])
if "d" in link_attributes:
from_lat = net.node[from_stop_I]['lat']
from_lon = net.node[from_stop_I]['lon']
to_lat = net.node[to_stop_I]['lat']
to_lon = net.node[to_stop_I]['lon']
distance = wgs84_distance(from_lat, from_lon, to_lat, to_lon)
link_data['d'] = int(distance)
if "distance_shape" in link_attributes:
assert "shape_id" in link_events.columns.values
found = None
for i, shape_id in enumerate(link_events["shape_id"].values):
if shape_id is not None:
found = i
break
if found is None:
link_data["distance_shape"] = None
else:
link_event = link_events.iloc[found]
distance = gtfs.get_shape_distance_between_stops(
link_event["trip_I"],
int(link_event["from_seq"]),
int(link_event["to_seq"])
)
link_data['distance_shape'] = distance
if "route_I_counts" in link_attributes:
link_data["route_I_counts"] = link_events.groupby("route_I").size().to_dict()
net.add_edge(from_stop_I, to_stop_I, attr_dict=link_data)
return net | python | {
"resource": ""
} |
q257489 | combined_stop_to_stop_transit_network | validation | def combined_stop_to_stop_transit_network(gtfs, start_time_ut=None, end_time_ut=None):
"""
Compute stop-to-stop networks for all travel modes and combine them into a single network.
The modes of transport are encoded to a single network.
The network consists of multiple links corresponding to each travel mode.
Walk mode is not included.
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
net: networkx.MultiDiGraph
keys should be one of route_types.TRANSIT_ROUTE_TYPES (i.e. GTFS route_types)
"""
multi_di_graph = networkx.MultiDiGraph()
for route_type in route_types.TRANSIT_ROUTE_TYPES:
graph = stop_to_stop_network_for_route_type(gtfs, route_type,
start_time_ut=start_time_ut, end_time_ut=end_time_ut)
for from_node, to_node, data in graph.edges(data=True):
data['route_type'] = route_type
multi_di_graph.add_edges_from(graph.edges(data=True))
multi_di_graph.add_nodes_from(graph.nodes(data=True))
return multi_di_graph | python | {
"resource": ""
} |
q257490 | temporal_network | validation | def temporal_network(gtfs,
start_time_ut=None,
end_time_ut=None,
route_type=None):
"""
Compute the temporal network of the data, and return it as a pandas.DataFrame
Parameters
----------
gtfs : gtfspy.GTFS
start_time_ut: int | None
start time of the time span (in unix time)
end_time_ut: int | None
end time of the time span (in unix time)
route_type: int | None
Specifies which mode of public transport are included, or whether all modes should be included.
The int should be one of the standard GTFS route_types:
(see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )
If route_type is not specified, all modes are included.
Returns
-------
events_df: pandas.DataFrame
Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I
"""
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
events_df.drop('to_seq', 1, inplace=True)
events_df.drop('shape_id', 1, inplace=True)
events_df.drop('duration', 1, inplace=True)
events_df.drop('route_id', 1, inplace=True)
events_df.rename(
columns={
'from_seq': "seq"
},
inplace=True
)
return events_df | python | {
"resource": ""
} |
q257491 | NodeProfileAnalyzerTime.plot_temporal_distance_cdf | validation | def plot_temporal_distance_cdf(self):
"""
Plot the temporal distance cumulative density function.
Returns
-------
fig: matplotlib.Figure
"""
xvalues, cdf = self.profile_block_analyzer._temporal_distance_cdf()
fig = plt.figure()
ax = fig.add_subplot(111)
xvalues = numpy.array(xvalues) / 60.0
ax.plot(xvalues, cdf, "-k")
ax.fill_between(xvalues, cdf, color="red", alpha=0.2)
ax.set_ylabel("CDF(t)")
ax.set_xlabel("Temporal distance t (min)")
return fig | python | {
"resource": ""
} |
q257492 | ForwardJourney.get_transfer_stop_pairs | validation | def get_transfer_stop_pairs(self):
"""
Get stop pairs through which transfers take place
Returns
-------
transfer_stop_pairs: list
"""
transfer_stop_pairs = []
previous_arrival_stop = None
current_trip_id = None
for leg in self.legs:
if leg.trip_id is not None and leg.trip_id != current_trip_id and previous_arrival_stop is not None:
transfer_stop_pair = (previous_arrival_stop, leg.departure_stop)
transfer_stop_pairs.append(transfer_stop_pair)
previous_arrival_stop = leg.arrival_stop
current_trip_id = leg.trip_id
return transfer_stop_pairs | python | {
"resource": ""
} |
q257493 | GTFS.from_directory_as_inmemory_db | validation | def from_directory_as_inmemory_db(cls, gtfs_directory):
"""
Instantiate a GTFS object by computing
Parameters
----------
gtfs_directory: str
path to the directory for importing the database
"""
# this import is here to avoid circular imports (which turned out to be a problem)
from gtfspy.import_gtfs import import_gtfs
conn = sqlite3.connect(":memory:")
import_gtfs(gtfs_directory,
conn,
preserve_connection=True,
print_progress=False)
return cls(conn) | python | {
"resource": ""
} |
q257494 | GTFS.get_main_database_path | validation | def get_main_database_path(self):
"""
Should return the path to the database
Returns
-------
path : unicode
path to the database, empty string for in-memory databases
"""
cur = self.conn.cursor()
cur.execute("PRAGMA database_list")
rows = cur.fetchall()
for row in rows:
if row[1] == str("main"):
return row[2] | python | {
"resource": ""
} |
q257495 | GTFS.get_shape_distance_between_stops | validation | def get_shape_distance_between_stops(self, trip_I, from_stop_seq, to_stop_seq):
"""
Get the distance along a shape between stops
Parameters
----------
trip_I : int
trip_ID along which we travel
from_stop_seq : int
the sequence number of the 'origin' stop
to_stop_seq : int
the sequence number of the 'destination' stop
Returns
-------
distance : float, None
If the shape calculation succeeded, return a float, otherwise return None
(i.e. in the case where the shapes table is empty)
"""
query_template = "SELECT shape_break FROM stop_times WHERE trip_I={trip_I} AND seq={seq} "
stop_seqs = [from_stop_seq, to_stop_seq]
shape_breaks = []
for seq in stop_seqs:
q = query_template.format(seq=seq, trip_I=trip_I)
shape_breaks.append(self.conn.execute(q).fetchone())
query_template = "SELECT max(d) - min(d) " \
"FROM shapes JOIN trips ON(trips.shape_id=shapes.shape_id) " \
"WHERE trip_I={trip_I} AND shapes.seq>={from_stop_seq} AND shapes.seq<={to_stop_seq};"
distance_query = query_template.format(trip_I=trip_I, from_stop_seq=from_stop_seq, to_stop_seq=to_stop_seq)
return self.conn.execute(distance_query).fetchone()[0] | python | {
"resource": ""
} |
q257496 | GTFS.get_timezone_name | validation | def get_timezone_name(self):
"""
Get name of the GTFS timezone
Returns
-------
timezone_name : str
name of the time zone, e.g. "Europe/Helsinki"
"""
tz_name = self.conn.execute('SELECT timezone FROM agencies LIMIT 1').fetchone()
if tz_name is None:
raise ValueError("This database does not have a timezone defined.")
return tz_name[0] | python | {
"resource": ""
} |
q257497 | GTFS.get_trip_trajectories_within_timespan | validation | def get_trip_trajectories_within_timespan(self, start, end, use_shapes=True, filter_name=None):
"""
Get complete trip data for visualizing public transport operation based on gtfs.
Parameters
----------
start: number
Earliest position data to return (in unix time)
end: number
Latest position data to return (in unix time)
use_shapes: bool, optional
Whether or not shapes should be included
filter_name: str
Pick only routes having this name.
Returns
-------
trips: dict
trips['trips'] is a list whose each element (e.g. el = trips['trips'][0])
is a dict with the following properties:
el['lats'] -- list of latitudes
el['lons'] -- list of longitudes
el['times'] -- list of passage_times
el['route_type'] -- type of vehicle as specified by GTFS
el['name'] -- name of the route
"""
trips = []
trip_df = self.get_tripIs_active_in_range(start, end)
print("gtfs_viz.py: fetched " + str(len(trip_df)) + " trip ids")
shape_cache = {}
# loop over all trips:
for row in trip_df.itertuples():
trip_I = row.trip_I
day_start_ut = row.day_start_ut
shape_id = row.shape_id
trip = {}
name, route_type = self.get_route_name_and_type_of_tripI(trip_I)
trip['route_type'] = int(route_type)
trip['name'] = str(name)
if filter_name and (name != filter_name):
continue
stop_lats = []
stop_lons = []
stop_dep_times = []
shape_breaks = []
stop_seqs = []
# get stop_data and store it:
stop_time_df = self.get_trip_stop_time_data(trip_I, day_start_ut)
for stop_row in stop_time_df.itertuples():
stop_lats.append(float(stop_row.lat))
stop_lons.append(float(stop_row.lon))
stop_dep_times.append(float(stop_row.dep_time_ut))
try:
stop_seqs.append(int(stop_row.seq))
except TypeError:
stop_seqs.append(None)
if use_shapes:
try:
shape_breaks.append(int(stop_row.shape_break))
except (TypeError, ValueError):
shape_breaks.append(None)
if use_shapes:
# get shape data (from cache, if possible)
if shape_id not in shape_cache:
shape_cache[shape_id] = shapes.get_shape_points2(self.conn.cursor(), shape_id)
shape_data = shape_cache[shape_id]
# noinspection PyBroadException
try:
trip['times'] = shapes.interpolate_shape_times(shape_data['d'], shape_breaks, stop_dep_times)
trip['lats'] = shape_data['lats']
trip['lons'] = shape_data['lons']
start_break = shape_breaks[0]
end_break = shape_breaks[-1]
trip['times'] = trip['times'][start_break:end_break + 1]
trip['lats'] = trip['lats'][start_break:end_break + 1]
trip['lons'] = trip['lons'][start_break:end_break + 1]
except:
# In case interpolation fails:
trip['times'] = stop_dep_times
trip['lats'] = stop_lats
trip['lons'] = stop_lons
else:
trip['times'] = stop_dep_times
trip['lats'] = stop_lats
trip['lons'] = stop_lons
trips.append(trip)
return {"trips": trips} | python | {
"resource": ""
} |
q257498 | GTFS.get_stop_count_data | validation | def get_stop_count_data(self, start_ut, end_ut):
"""
Get stop count data.
Parameters
----------
start_ut : int
start time in unixtime
end_ut : int
end time in unixtime
Returns
-------
stopData : pandas.DataFrame
each row in the stopData dataFrame is a dictionary with the following elements
stop_I, count, lat, lon, name
with data types
(int, int, float, float, str)
"""
# TODO! this function could perhaps be made a single sql query now with the new tables?
trips_df = self.get_tripIs_active_in_range(start_ut, end_ut)
# stop_I -> count, lat, lon, name
stop_counts = Counter()
# loop over all trips:
for row in trips_df.itertuples():
# get stop_data and store it:
stops_seq = self.get_trip_stop_time_data(row.trip_I, row.day_start_ut)
for stop_time_row in stops_seq.itertuples(index=False):
if (stop_time_row.dep_time_ut >= start_ut) and (stop_time_row.dep_time_ut <= end_ut):
stop_counts[stop_time_row.stop_I] += 1
all_stop_data = self.stops()
counts = [stop_counts[stop_I] for stop_I in all_stop_data["stop_I"].values]
all_stop_data.loc[:, "count"] = pd.Series(counts, index=all_stop_data.index)
return all_stop_data | python | {
"resource": ""
} |
q257499 | GTFS.get_all_route_shapes | validation | def get_all_route_shapes(self, use_shapes=True):
"""
Get the shapes of all routes.
Parameters
----------
use_shapes : bool, optional
by default True (i.e. use shapes as the name of the function indicates)
if False (fall back to lats and longitudes)
Returns
-------
routeShapes: list of dicts that should have the following keys
name, type, agency, lats, lons
with types
list, list, str, list, list
"""
cur = self.conn.cursor()
# all shape_id:s corresponding to a route_I:
# query = "SELECT DISTINCT name, shape_id, trips.route_I, route_type
# FROM trips LEFT JOIN routes USING(route_I)"
# data1 = pd.read_sql_query(query, self.conn)
# one (arbitrary) shape_id per route_I ("one direction") -> less than half of the routes
query = "SELECT routes.name as name, shape_id, route_I, trip_I, routes.type, " \
" agency_id, agencies.name as agency_name, max(end_time_ds-start_time_ds) as trip_duration " \
"FROM trips " \
"LEFT JOIN routes " \
"USING(route_I) " \
"LEFT JOIN agencies " \
"USING(agency_I) " \
"GROUP BY routes.route_I"
data = pd.read_sql_query(query, self.conn)
routeShapes = []
for i, row in enumerate(data.itertuples()):
datum = {"name": str(row.name), "type": int(row.type), "route_I": row.route_I, "agency": str(row.agency_id),
"agency_name": str(row.agency_name)}
# this function should be made also non-shape friendly (at this point)
if use_shapes and row.shape_id:
shape = shapes.get_shape_points2(cur, row.shape_id)
lats = shape['lats']
lons = shape['lons']
else:
stop_shape = self.get_trip_stop_coordinates(row.trip_I)
lats = list(stop_shape['lat'])
lons = list(stop_shape['lon'])
datum['lats'] = [float(lat) for lat in lats]
datum['lons'] = [float(lon) for lon in lons]
routeShapes.append(datum)
return routeShapes | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.