Search is not available for this dataset
text
stringlengths 75
104k
|
|---|
def index_from_id(self,Id):
"""Return the row of given Id if it'exists, otherwise None. Only works with pseudo-acces"""
try:
return [a.Id for a in self].index(Id)
except IndexError:
return
|
def append(self, acces, **kwargs):
"""Append acces to list. Quite slow since it checks uniqueness.
kwargs may set `info` for this acces.
"""
if acces.Id in set(ac.Id for ac in self):
raise ValueError("Acces id already in list !")
list.append(self, acces)
if kwargs:
self.infos[acces.Id] = kwargs
|
def remove_id(self,key):
"""Suppress acces with id = key"""
self.infos.pop(key, "")
new_l = [a for a in self if not (a.Id == key)]
list.__init__(self, new_l)
|
def get_info(self, key=None, Id=None) -> dict:
"""Returns information associated with Id or list index"""
if key is not None:
Id = self[key].Id
return self.infos.get(Id,{})
|
def recherche(self, pattern, entete):
"""Performs a search field by field, using functions defined in formats.
Matchs are marked with info[`font`]
:param pattern: String to look for
:param entete: Fields to look into
:return: Nothing. The collection is changed in place
"""
new_liste = []
sub_patterns = pattern.split(" ")
for p in self:
d_font = {att: False for att in entete}
row_valid = True
for sub_pattern in sub_patterns:
found = False
for att in entete:
fonction_recherche = formats.ASSOCIATION[att][1]
attr_found = bool(fonction_recherche(p[att], sub_pattern))
if attr_found:
found = True
d_font[att] = True
if not found:
row_valid = False
break
if row_valid:
new_liste.append(p)
info = dict(self.get_info(Id=p.Id),font=d_font)
self.infos[p.Id] = info
list.__init__(self, new_liste)
|
def extend(self, collection):
"""Merges collections. Ensure uniqueness of ids"""
l_ids = set([a.Id for a in self])
for acces in collection:
if not acces.Id in l_ids:
list.append(self,acces)
info = collection.get_info(Id=acces.Id)
if info:
self.infos[acces.Id] = info
|
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
|
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
|
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
|
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
|
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
|
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
|
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
|
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
|
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(utcnow.override_time is not None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
|
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
|
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
|
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
|
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:params dt: the time
:params window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
|
def download_file_powershell(url, target):
'''
Download the file at url to target using Powershell (which will validate
trust). Raise an exception if the command cannot complete.
'''
target = os.path.abspath(target)
cmd = [
'powershell',
'-Command',
'(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)' % vars(),
]
subprocess.check_call(cmd)
|
def download_file_insecure(url, target):
'''
Use Python to download the file, even though it cannot authenticate the
connection.
'''
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
src = dst = None
try:
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(target, 'wb')
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
|
def _build_install_args(options):
'''
Build the arguments to 'python setup.py install' on the setuptools package
'''
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn('--user requires Python 2.6 or later')
raise SystemExit(1)
install_args.append('--user')
return install_args
|
def write(name, value):
"""Temporarily change or set the environment variable during the execution of a function.
Args:
name: The name of the environment variable
value: A value to set for the environment variable
Returns:
The function return value.
"""
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
existing_env = core.read(name, allow_none=True)
core.write(name, value)
func_val = func(*args, **kwargs)
core.write(name, existing_env)
return func_val
return _decorator
return wrapped
|
def isset(name):
"""Only execute the function if the variable is set.
Args:
name: The name of the environment variable
Returns:
The function return value or `None` if the function was skipped.
"""
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
if core.isset(name):
return func(*args, **kwargs)
return _decorator
return wrapped
|
def bool(name, execute_bool=True, default=None):
"""Only execute the function if the boolean variable is set.
Args:
name: The name of the environment variable
execute_bool: The boolean value to execute the function on
default: The default value if the environment variable is not set (respects `execute_bool`)
Returns:
The function return value or `None` if the function was skipped.
"""
def wrapped(func):
@functools.wraps(func)
def _decorator(*args, **kwargs):
if core.isset(name) and core.bool(name) == execute_bool:
return func(*args, **kwargs)
elif default is not None and default == execute_bool:
return func(*args, **kwargs)
return _decorator
return wrapped
|
def read_cell(self, x, y):
"""
Reads the cell at position x+1 and y+1; return value
:param x: line index
:param y: coll index
:return: {header: value}
"""
if isinstance(self.header[y], tuple):
header = self.header[y][0]
else:
header = self.header[y]
x += 1
y += 1
if self.strip:
self._sheet.cell(x, y).value = self._sheet.cell(x, y).value.strip()
else:
return {header: self._sheet.cell(x, y).value}
|
def write_cell(self, x, y, value):
"""
Writing value in the cell of x+1 and y+1 position
:param x: line index
:param y: coll index
:param value: value to be written
:return:
"""
x += 1
y += 1
self._sheet.update_cell(x, y, value)
|
def _open(self):
"""
Open the file; get sheets
:return:
"""
if not hasattr(self, '_file'):
self._file = self.gc.open(self.name)
self.sheet_names = self._file.worksheets()
|
def _open_sheet(self):
"""
Read the sheet, get value the header, get number columns and rows
:return:
"""
if self.sheet_name and not self.header:
self._sheet = self._file.worksheet(self.sheet_name.title)
self.ncols = self._sheet.col_count
self.nrows = self._sheet.row_count
for i in range(1, self.ncols+1):
self.header = self.header + [self._sheet.cell(1, i).value]
|
def _import(self):
"""
Makes imports
:return:
"""
import os.path
import gspread
self.path = os.path
self.gspread = gspread
self._login()
|
def _login(self):
"""
Login with your Google account
:return:
"""
# TODO(dmvieira) login changed to oauth2
self.gc = self.gspread.login(self.email, self.password)
|
def flags(self, index: QModelIndex):
"""All fields are selectable"""
if self.IS_EDITABLE and self.header[index.column()] in self.EDITABLE_FIELDS:
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
else:
return super().flags(index) | Qt.ItemIsSelectable
|
def sort(self, section: int, order=None):
"""Order is defined by the current state of sorting"""
attr = self.header[section]
old_i, old_sort = self.sort_state
self.beginResetModel()
if section == old_i:
self.collection.sort(attr, not old_sort)
self.sort_state = (section, not old_sort)
else:
self.collection.sort(attr, True)
self.sort_state = (section, True)
self.endResetModel()
|
def remove_line(self, section):
"""Base implementation just pops the item from collection.
Re-implements to add global behaviour
"""
self.beginResetModel()
self.collection.pop(section)
self.endResetModel()
|
def _update(self):
"""Emit dataChanged signal on all cells"""
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(
len(self.collection), len(self.header)))
|
def get_item(self, index):
""" Acces shortcut
:param index: Number of row or index of cell
:return: Dict-like item
"""
row = index.row() if hasattr(index, "row") else index
try:
return self.collection[row]
except IndexError: # invalid index for exemple
return None
|
def set_collection(self, collection):
"""Reset sort state, set collection and emit resetModel signal"""
self.beginResetModel()
self.collection = collection
self.sort_state = (-1, False)
self.endResetModel()
|
def set_item(self, index, new_item):
""" Changes item at index in collection. Emit dataChanged signal.
:param index: Number of row or index of cell
:param new_item: Dict-like object
"""
row = index.row() if hasattr(index, "row") else index
self.collection[row] = new_item
self.dataChanged.emit(self.index(
row, 0), self.index(row, self.rowCount() - 1))
|
def set_data(self, index, value):
"""Uses given data setter, and emit modelReset signal"""
acces, field = self.get_item(index), self.header[index.column()]
self.beginResetModel()
self.set_data_hook(acces, field, value)
self.endResetModel()
|
def _set_id(self, Id, is_added, index):
"""Update selected_ids and emit dataChanged"""
if is_added:
self.selected_ids.add(Id)
else:
self.selected_ids.remove(Id)
self.dataChanged.emit(index, index)
|
def setData(self, index: QModelIndex, value, role=None):
"""Update selected_ids on click on index cell."""
if not (index.isValid() and role == Qt.CheckStateRole):
return False
c_id = self.get_item(index).Id
self._set_id(c_id, value == Qt.Checked, index)
return True
|
def set_by_Id(self, Id, is_added):
"""Update selected_ids with given Id"""
row = self.collection.index_from_id(Id)
if row is None:
return
self._set_id(Id, is_added, self.index(row, 0))
|
def _setup_delegate(self):
"""Add resize behavior on edit"""
delegate = self.DELEGATE_CLASS(self)
self.setItemDelegate(delegate)
delegate.sizeHintChanged.connect(
lambda index: self.resizeRowToContents(index.row()))
if self.RESIZE_COLUMN:
delegate.sizeHintChanged.connect(
lambda index: self.resizeColumnToContents(index.column()))
delegate.closeEditor.connect(
lambda ed: self.resizeRowToContents(delegate.row_done_))
|
def _draw_placeholder(self):
"""To be used in QTreeView"""
if self.model().rowCount() == 0:
painter = QPainter(self.viewport())
painter.setFont(_custom_font(is_italic=True))
painter.drawText(self.rect().adjusted(0, 0, -5, -5), Qt.AlignCenter | Qt.TextWordWrap,
self.PLACEHOLDER)
|
def get_current_item(self):
"""Returns (first) selected item or None"""
l = self.selectedIndexes()
if len(l) > 0:
return self.model().get_item(l[0])
|
def model_from_list(l, header):
"""Return a model with a collection from a list of entry"""
col = groups.sortableListe(PseudoAccesCategorie(n) for n in l)
return MultiSelectModel(col, header)
|
def _parse_status_code(response):
"""
Return error string code if the response is an error, otherwise ``"OK"``
"""
# This happens when a status response is expected
if isinstance(response, string_types):
return response
# This happens when a list of structs are expected
is_single_list = isinstance(response, list) and len(response) == 1
if is_single_list and isinstance(response[0], string_types):
return response[0]
# This happens when a struct of any kind is returned
return "OK"
|
def remove_zone_record(self, id, domain, subdomain=None):
"""
Remove the zone record with the given ID that belongs to the given
domain and sub domain. If no sub domain is given the wildcard sub-domain
is assumed.
"""
if subdomain is None:
subdomain = "@"
_validate_int("id", id)
self._call("removeZoneRecord", domain, subdomain, id)
|
def parse_module_class(self):
"""Parse the module and class name part of the fully qualifed class name.
"""
cname = self.class_name
match = re.match(self.CLASS_REGEX, cname)
if not match:
raise ValueError(f'not a fully qualified class name: {cname}')
return match.groups()
|
def get_module_class(self):
"""Return the module and class as a tuple of the given class in the
initializer.
:param reload: if ``True`` then reload the module before returning the
class
"""
pkg, cname = self.parse_module_class()
logger.debug(f'pkg: {pkg}, class: {cname}')
pkg = pkg.split('.')
mod = reduce(lambda m, n: getattr(m, n), pkg[1:], __import__(pkg[0]))
logger.debug(f'mod: {mod}')
if self.reload:
importlib.reload(mod)
cls = getattr(mod, cname)
logger.debug(f'class: {cls}')
return mod, cls
|
def instance(self, *args, **kwargs):
"""Create an instance of the specified class in the initializer.
:param args: the arguments given to the initializer of the new class
:param kwargs: the keyword arguments given to the initializer of the
new class
"""
mod, cls = self.get_module_class()
inst = cls(*args, **kwargs)
logger.debug(f'inst: {inst}')
return inst
|
def set_log_level(self, level=logging.INFO):
"""Convenciene method to set the log level of the module given in the
initializer of this class.
:param level: and instance of ``logging.<level>``
"""
mod, cls = self.parse_module_class()
logging.getLogger(mod).setLevel(level)
|
def register(cls, instance_class, name=None):
"""Register a class with the factory.
:param instance_class: the class to register with the factory (not a
string)
:param name: the name to use as the key for instance class lookups;
defaults to the name of the class
"""
if name is None:
name = instance_class.__name__
cls.INSTANCE_CLASSES[name] = instance_class
|
def _find_class(self, class_name):
"Resolve the class from the name."
classes = {}
classes.update(globals())
classes.update(self.INSTANCE_CLASSES)
logger.debug(f'looking up class: {class_name}')
cls = classes[class_name]
logger.debug(f'found class: {cls}')
return cls
|
def _class_name_params(self, name):
"Get the class name and parameters to use for ``__init__``."
sec = self.pattern.format(**{'name': name})
logger.debug(f'section: {sec}')
params = {}
params.update(self.config.populate({}, section=sec))
class_name = params['class_name']
del params['class_name']
return class_name, params
|
def _has_init_config(self, cls):
"""Return whether the class has a ``config`` parameter in the ``__init__``
method.
"""
args = inspect.signature(cls.__init__)
return self.config_param_name in args.parameters
|
def _has_init_name(self, cls):
"""Return whether the class has a ``name`` parameter in the ``__init__``
method.
"""
args = inspect.signature(cls.__init__)
return self.name_param_name in args.parameters
|
def _instance(self, cls, *args, **kwargs):
"""Return the instance.
:param cls: the class to create the instance from
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
"""
logger.debug(f'args: {args}, kwargs: {kwargs}')
return cls(*args, **kwargs)
|
def instance(self, name=None, *args, **kwargs):
"""Create a new instance using key ``name``.
:param name: the name of the class (by default) or the key name of the
class used to find the class
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
"""
logger.info(f'new instance of {name}')
t0 = time()
name = self.default_name if name is None else name
logger.debug(f'creating instance of {name}')
class_name, params = self._class_name_params(name)
cls = self._find_class(class_name)
params.update(kwargs)
if self._has_init_config(cls):
logger.debug(f'found config parameter')
params['config'] = self.config
if self._has_init_name(cls):
logger.debug(f'found name parameter')
params['name'] = name
if logger.level >= logging.DEBUG:
for k, v in params.items():
logger.debug(f'populating {k} -> {v} ({type(v)})')
inst = self._instance(cls, *args, **params)
logger.info(f'created {name} instance of {cls.__name__} ' +
f'in {(time() - t0):.2f}s')
return inst
|
def load(self, name=None, *args, **kwargs):
"Load the instance of the object from the stash."
inst = self.stash.load(name)
if inst is None:
inst = self.instance(name, *args, **kwargs)
logger.debug(f'loaded (conf mng) instance: {inst}')
return inst
|
def dump(self, name: str, inst):
"Save the object instance to the stash."
self.stash.dump(name, inst)
|
def from_env(cls, default_timeout=DEFAULT_TIMEOUT_SECONDS):
"""Return a client configured from environment variables.
Essentially copying this:
https://github.com/docker/docker-py/blob/master/docker/client.py#L43.
The environment variables looked for are the following:
.. envvar:: SALTANT_API_URL
The URL of the saltant API. For example,
https://shahlabjobs.ca/api/.
.. envvar:: SALTANT_AUTH_TOKEN
The registered saltant user's authentication token.
Example:
>>> from saltant.client import from_env
>>> client = from_env()
Args:
default_timeout (int, optional): The maximum number of
seconds to wait for a request to complete. Defaults to
90 seconds.
Returns:
:class:`Client`: A saltant API client object.
Raises:
:class:`saltant.exceptions.BadEnvironmentError`: The user
has an incorrectly configured environment.
"""
# Get variables from environment
try:
base_api_url = os.environ["SALTANT_API_URL"]
except KeyError:
raise BadEnvironmentError("SALTANT_API_URL not defined!")
try:
# Try to get an auth token
auth_token = os.environ["SALTANT_AUTH_TOKEN"]
except KeyError:
raise BadEnvironmentError("SALTANT_AUTH_TOKEN not defined!")
# Return the configured client
return cls(
base_api_url=base_api_url,
auth_token=auth_token,
default_timeout=default_timeout,
)
|
def clear_global(self):
"""Clear only any cached global data.
"""
vname = self.varname
logger.debug(f'global clearning {vname}')
if vname in globals():
logger.debug('removing global instance var: {}'.format(vname))
del globals()[vname]
|
def clear(self):
"""Clear the data, and thus, force it to be created on the next fetch. This is
done by removing the attribute from ``owner``, deleting it from globals
and removing the file from the disk.
"""
vname = self.varname
if self.path.exists():
logger.debug('deleting cached work: {}'.format(self.path))
self.path.unlink()
if self.owner is not None and hasattr(self.owner, vname):
logger.debug('removing instance var: {}'.format(vname))
delattr(self.owner, vname)
self.clear_global()
|
def _load_or_create(self, *argv, **kwargs):
"""Invoke the file system operations to get the data, or create work.
If the file does not exist, calling ``__do_work__`` and save it.
"""
if self.path.exists():
self._info('loading work from {}'.format(self.path))
with open(self.path, 'rb') as f:
obj = pickle.load(f)
else:
self._info('saving work to {}'.format(self.path))
with open(self.path, 'wb') as f:
obj = self._do_work(*argv, **kwargs)
pickle.dump(obj, f)
return obj
|
def has_data(self):
"""Return whether or not the stash has any data available or not."""
if not hasattr(self, '_has_data'):
try:
next(iter(self.delegate.keys()))
self._has_data = True
except StopIteration:
self._has_data = False
return self._has_data
|
def _get_instance_path(self, name):
"Return a path to the pickled data with key ``name``."
fname = self.pattern.format(**{'name': name})
logger.debug(f'path {self.create_path}: {self.create_path.exists()}')
self._create_path_dir()
return Path(self.create_path, fname)
|
def shelve(self):
"""Return an opened shelve object.
"""
logger.info('creating shelve data')
fname = str(self.create_path.absolute())
inst = sh.open(fname, writeback=self.writeback)
self.is_open = True
return inst
|
def delete(self, name=None):
"Delete the shelve data file."
logger.info('clearing shelve data')
self.close()
for path in Path(self.create_path.parent, self.create_path.name), \
Path(self.create_path.parent, self.create_path.name + '.db'):
logger.debug(f'clearing {path} if exists: {path.exists()}')
if path.exists():
path.unlink()
break
|
def close(self):
"Close the shelve object, which is needed for data consistency."
if self.is_open:
logger.info('closing shelve data')
try:
self.shelve.close()
self._shelve.clear()
except Exception:
self.is_open = False
|
def _map(self, data_item):
"Map ``data_item`` separately in each thread."
delegate = self.delegate
logger.debug(f'mapping: {data_item}')
if self.clobber or not self.exists(data_item.id):
logger.debug(f'exist: {data_item.id}: {self.exists(data_item.id)}')
delegate.dump(data_item.id, data_item)
|
def load_all(self, workers=None, limit=None, n_expected=None):
"""Load all instances witih multiple threads.
:param workers: number of workers to use to load instances, which
defaults to what was given in the class initializer
:param limit: return a maximum, which defaults to no limit
:param n_expected: rerun the iteration on the data if we didn't find
enough data, or more specifically, number of found
data points is less than ``n_expected``; defaults to
all
"""
if not self.has_data:
self._preempt(True)
# we did the best we could (avoid repeat later in this method)
n_expected = 0
keys = tuple(self.delegate.keys())
if n_expected is not None and len(keys) < n_expected:
self._preempt(True)
keys = self.delegate.keys()
keys = it.islice(limit, keys) if limit is not None else keys
pool = self._create_thread_pool(workers)
logger.debug(f'workers={workers}, keys: {keys}')
try:
return iter(pool.map(self.delegate.load, keys))
finally:
pool.close()
|
def _make_persistent(self, model_name, pkg_name):
"""Monkey-patch object persistence (ex: to/from database) into a
bravado-core model class"""
#
# WARNING: ugly piece of monkey-patching below. Hopefully will replace
# with native bravado-core code in the future...
#
# Load class at path pkg_name
c = get_function(pkg_name)
for name in ('load_from_db', 'save_to_db'):
if not hasattr(c, name):
raise PyMacaronCoreException("Class %s has no static method '%s'" % (pkg_name, name))
log.info("Making %s persistent via %s" % (model_name, pkg_name))
# Replace model generator with one that adds 'save_to_db' to every instance
model = getattr(self.model, model_name)
n = self._wrap_bravado_model_generator(model, c.save_to_db, pkg_name)
setattr(self.model, model_name, n)
# Add class method load_from_db to model generator
model = getattr(self.model, model_name)
setattr(model, 'load_from_db', c.load_from_db)
|
def spawn_api(self, app, decorator=None):
"""Auto-generate server endpoints implementing the API into this Flask app"""
if decorator:
assert type(decorator).__name__ == 'function'
self.is_server = True
self.app = app
if self.local:
# Re-generate client callers, this time as local and passing them the app
self._generate_client_callers(app)
return spawn_server_api(self.name, app, self.api_spec, self.error_callback, decorator)
|
def json_to_model(self, model_name, j, validate=False):
"""Take a json strust and a model name, and return a model instance"""
if validate:
self.api_spec.validate(model_name, j)
return self.api_spec.json_to_model(model_name, j)
|
def assemble(
iterable, patterns=None, minimum_items=2, case_sensitive=True,
assume_padded_when_ambiguous=False
):
'''Assemble items in *iterable* into discreet collections.
*patterns* may be specified as a list of regular expressions to limit
the returned collection possibilities. Use this when interested in
collections that only match specific patterns. Each pattern must contain
the expression from :py:data:`DIGITS_PATTERN` exactly once.
A selection of common expressions are available in :py:data:`PATTERNS`.
.. note::
If a pattern is supplied as a string it will be automatically compiled
to a :py:class:`re.RegexObject` instance for convenience.
When *patterns* is not specified, collections are formed by examining all
possible groupings of the items in *iterable* based around common numerical
components.
*minimum_items* dictates the minimum number of items a collection must have
in order to be included in the result. The default is 2, filtering out
single item collections.
If *case_sensitive* is False, then items will be treated as part of the same
collection when they only differ in casing. To avoid ambiguity, the
resulting collection will always be lowercase. For example, "item.0001.dpx"
and "Item.0002.dpx" would be part of the same collection, "item.%04d.dpx".
.. note::
Any compiled *patterns* will also respect the set case sensitivity.
For certain collections it may be ambiguous whether they are padded or not.
For example, 1000-1010 can be considered either an unpadded collection or a
four padded collection. By default, Clique is conservative and assumes that
the collection is unpadded. To change this behaviour, set
*assume_padded_when_ambiguous* to True and any ambiguous collection will have
a relevant padding set.
.. note::
*assume_padded_when_ambiguous* has no effect on collections that are
unambiguous. For example, 1-100 will always be considered unpadded
regardless of the *assume_padded_when_ambiguous* setting.
Return tuple of two lists (collections, remainder) where 'collections' is a
list of assembled :py:class:`~clique.collection.Collection` instances and
'remainder' is a list of items that did not belong to any collection.
'''
collection_map = defaultdict(set)
collections = []
remainder = []
# Compile patterns.
flags = 0
if not case_sensitive:
flags |= re.IGNORECASE
compiled_patterns = []
if patterns is not None:
if not patterns:
return collections, list(iterable)
for pattern in patterns:
if isinstance(pattern, basestring):
compiled_patterns.append(re.compile(pattern, flags=flags))
else:
compiled_patterns.append(pattern)
else:
compiled_patterns.append(re.compile(DIGITS_PATTERN, flags=flags))
# Process iterable.
for item in iterable:
matched = False
for pattern in compiled_patterns:
for match in pattern.finditer(item):
index = match.group('index')
head = item[:match.start('index')]
tail = item[match.end('index'):]
if not case_sensitive:
head = head.lower()
tail = tail.lower()
padding = match.group('padding')
if padding:
padding = len(index)
else:
padding = 0
key = (head, tail, padding)
collection_map[key].add(int(index))
matched = True
if not matched:
remainder.append(item)
# Form collections.
merge_candidates = []
for (head, tail, padding), indexes in collection_map.items():
collection = Collection(head, tail, padding, indexes)
collections.append(collection)
if collection.padding == 0:
merge_candidates.append(collection)
# Merge together collections that align on padding boundaries. For example,
# 0998-0999 and 1000-1001 can be merged into 0998-1001. Note that only
# indexes within the padding width limit are merged. If a collection is
# entirely merged into another then it will not be included as a separate
# collection in the results.
fully_merged = []
for collection in collections:
if collection.padding == 0:
continue
for candidate in merge_candidates:
if (
candidate.head == collection.head and
candidate.tail == collection.tail
):
merged_index_count = 0
for index in candidate.indexes:
if len(str(abs(index))) == collection.padding:
collection.indexes.add(index)
merged_index_count += 1
if merged_index_count == len(candidate.indexes):
fully_merged.append(candidate)
# Filter out fully merged collections.
collections = [collection for collection in collections
if collection not in fully_merged]
# Filter out collections that do not have at least as many indexes as
# minimum_items. In addition, add any members of a filtered collection,
# which are not members of an unfiltered collection, to the remainder.
filtered = []
remainder_candidates = []
for collection in collections:
if len(collection.indexes) >= minimum_items:
filtered.append(collection)
else:
for member in collection:
remainder_candidates.append(member)
for candidate in remainder_candidates:
# Check if candidate has already been added to remainder to avoid
# duplicate entries.
if candidate in remainder:
continue
has_membership = False
for collection in filtered:
if candidate in collection:
has_membership = True
break
if not has_membership:
remainder.append(candidate)
# Set padding for all ambiguous collections according to the
# assume_padded_when_ambiguous setting.
if assume_padded_when_ambiguous:
for collection in filtered:
if (
not collection.padding and collection.indexes
):
indexes = list(collection.indexes)
first_index_width = len(str(indexes[0]))
last_index_width = len(str(indexes[-1]))
if first_index_width == last_index_width:
collection.padding = first_index_width
return filtered, remainder
|
def parse(value, pattern='{head}{padding}{tail} [{ranges}]'):
'''Parse *value* into a :py:class:`~clique.collection.Collection`.
Use *pattern* to extract information from *value*. It may make use of the
following keys:
* *head* - Common leading part of the collection.
* *tail* - Common trailing part of the collection.
* *padding* - Padding value in ``%0d`` format.
* *range* - Total range in the form ``start-end``.
* *ranges* - Comma separated ranges of indexes.
* *holes* - Comma separated ranges of missing indexes.
.. note::
*holes* only makes sense if *range* or *ranges* is also present.
'''
# Construct regular expression for given pattern.
expressions = {
'head': '(?P<head>.*)',
'tail': '(?P<tail>.*)',
'padding': '%(?P<padding>\d*)d',
'range': '(?P<range>\d+-\d+)?',
'ranges': '(?P<ranges>[\d ,\-]+)?',
'holes': '(?P<holes>[\d ,\-]+)'
}
pattern_regex = re.escape(pattern)
for key, expression in expressions.items():
pattern_regex = pattern_regex.replace(
'\{{{0}\}}'.format(key),
expression
)
pattern_regex = '^{0}$'.format(pattern_regex)
# Match pattern against value and use results to construct collection.
match = re.search(pattern_regex, value)
if match is None:
raise ValueError('Value did not match pattern.')
groups = match.groupdict()
if 'padding' in groups and groups['padding']:
groups['padding'] = int(groups['padding'])
else:
groups['padding'] = 0
# Create collection and then add indexes.
collection = Collection(
groups.get('head', ''),
groups.get('tail', ''),
groups['padding']
)
if groups.get('range', None) is not None:
start, end = map(int, groups['range'].split('-'))
collection.indexes.update(range(start, end + 1))
if groups.get('ranges', None) is not None:
parts = [part.strip() for part in groups['ranges'].split(',')]
for part in parts:
index_range = list(map(int, part.split('-', 2)))
if len(index_range) > 1:
# Index range.
for index in range(index_range[0], index_range[1] + 1):
collection.indexes.add(index)
else:
# Single index.
collection.indexes.add(index_range[0])
if 'holes' in groups:
parts = [part.strip() for part in groups['holes'].split(',')]
for part in parts:
index_range = map(int, part.split('-', 2))
if len(index_range) > 1:
# Index range.
for index in range(index_range[0], index_range[1] + 1):
collection.indexes.remove(index)
else:
# Single index.
collection.indexes.remove(index_range[0])
return collection
|
def add(self, item):
'''Add *item*.'''
if not item in self:
index = bisect.bisect_right(self._members, item)
self._members.insert(index, item)
|
def discard(self, item):
'''Remove *item*.'''
index = self._index(item)
if index >= 0:
del self._members[index]
|
def _index(self, item):
'''Return index of *item* in member list or -1 if not present.'''
index = bisect.bisect_left(self._members, item)
if index != len(self) and self._members[index] == item:
return index
return -1
|
def _update_expression(self):
'''Update internal expression.'''
self._expression = re.compile(
'^{0}(?P<index>(?P<padding>0*)\d+?){1}$'
.format(re.escape(self.head), re.escape(self.tail))
)
|
def match(self, item):
'''Return whether *item* matches this collection expression.
If a match is successful return data about the match otherwise return
None.
'''
match = self._expression.match(item)
if not match:
return None
index = match.group('index')
padded = False
if match.group('padding'):
padded = True
if self.padding == 0:
if padded:
return None
elif len(index) != self.padding:
return None
return match
|
def add(self, item):
'''Add *item* to collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
added to the collection.
'''
match = self.match(item)
if match is None:
raise clique.error.CollectionError(
'Item does not match collection expression.'
)
self.indexes.add(int(match.group('index')))
|
def remove(self, item):
'''Remove *item* from collection.
raise :py:class:`~clique.error.CollectionError` if *item* cannot be
removed from the collection.
'''
match = self.match(item)
if match is None:
raise clique.error.CollectionError(
'Item not present in collection.'
)
index = int(match.group('index'))
try:
self.indexes.remove(index)
except KeyError:
raise clique.error.CollectionError(
'Item not present in collection.'
)
|
def format(self, pattern='{head}{padding}{tail} [{ranges}]'):
'''Return string representation as specified by *pattern*.
Pattern can be any format accepted by Python's standard format function
and will receive the following keyword arguments as context:
* *head* - Common leading part of the collection.
* *tail* - Common trailing part of the collection.
* *padding* - Padding value in ``%0d`` format.
* *range* - Total range in the form ``start-end``
* *ranges* - Comma separated ranges of indexes.
* *holes* - Comma separated ranges of missing indexes.
'''
data = {}
data['head'] = self.head
data['tail'] = self.tail
if self.padding:
data['padding'] = '%0{0}d'.format(self.padding)
else:
data['padding'] = '%d'
if '{holes}' in pattern:
data['holes'] = self.holes().format('{ranges}')
if '{range}' in pattern or '{ranges}' in pattern:
indexes = list(self.indexes)
indexes_count = len(indexes)
if indexes_count == 0:
data['range'] = ''
elif indexes_count == 1:
data['range'] = '{0}'.format(indexes[0])
else:
data['range'] = '{0}-{1}'.format(
indexes[0], indexes[-1]
)
if '{ranges}' in pattern:
separated = self.separate()
if len(separated) > 1:
ranges = [collection.format('{range}')
for collection in separated]
else:
ranges = [data['range']]
data['ranges'] = ', '.join(ranges)
return pattern.format(**data)
|
def is_contiguous(self):
'''Return whether entire collection is contiguous.'''
previous = None
for index in self.indexes:
if previous is None:
previous = index
continue
if index != (previous + 1):
return False
previous = index
return True
|
def holes(self):
'''Return holes in collection.
Return :py:class:`~clique.collection.Collection` of missing indexes.
'''
missing = set([])
previous = None
for index in self.indexes:
if previous is None:
previous = index
continue
if index != (previous + 1):
missing.update(range(previous + 1, index))
previous = index
return Collection(self.head, self.tail, self.padding, indexes=missing)
|
def is_compatible(self, collection):
'''Return whether *collection* is compatible with this collection.
To be compatible *collection* must have the same head, tail and padding
properties as this collection.
'''
return all([
isinstance(collection, Collection),
collection.head == self.head,
collection.tail == self.tail,
collection.padding == self.padding
])
|
def merge(self, collection):
'''Merge *collection* into this collection.
If the *collection* is compatible with this collection then update
indexes with all indexes in *collection*.
raise :py:class:`~clique.error.CollectionError` if *collection* is not
compatible with this collection.
'''
if not self.is_compatible(collection):
raise clique.error.CollectionError('Collection is not compatible '
'with this collection.')
self.indexes.update(collection.indexes)
|
def separate(self):
'''Return contiguous parts of collection as separate collections.
Return as list of :py:class:`~clique.collection.Collection` instances.
'''
collections = []
start = None
end = None
for index in self.indexes:
if start is None:
start = index
end = start
continue
if index != (end + 1):
collections.append(
Collection(self.head, self.tail, self.padding,
indexes=set(range(start, end + 1)))
)
start = index
end = index
if start is None:
collections.append(
Collection(self.head, self.tail, self.padding)
)
else:
collections.append(
Collection(self.head, self.tail, self.padding,
indexes=range(start, end + 1))
)
return collections
|
def format_check(settings):
"""
Check the format of a osmnet_config object.
Parameters
----------
settings : dict
osmnet_config as a dictionary
Returns
-------
Nothing
"""
valid_keys = ['logs_folder', 'log_file', 'log_console', 'log_name',
'log_filename', 'keep_osm_tags']
for key in list(settings.keys()):
assert key in valid_keys, \
('{} not found in list of valid configuation keys').format(key)
assert isinstance(key, str), ('{} must be a string').format(key)
if key == 'keep_osm_tags':
assert isinstance(settings[key], list), \
('{} must be a list').format(key)
for value in settings[key]:
assert all(isinstance(element, str) for element in value), \
'all elements must be a string'
if key == 'log_file' or key == 'log_console':
assert isinstance(settings[key], bool), \
('{} must be boolean').format(key)
|
def to_dict(self):
"""
Return a dict representation of an osmnet osmnet_config instance.
"""
return {'logs_folder': self.logs_folder,
'log_file': self.log_file,
'log_console': self.log_console,
'log_name': self.log_name,
'log_filename': self.log_filename,
'keep_osm_tags': self.keep_osm_tags
}
|
def great_circle_dist(lat1, lon1, lat2, lon2):
"""
Get the distance (in meters) between two lat/lon points
via the Haversine formula.
Parameters
----------
lat1, lon1, lat2, lon2 : float
Latitude and longitude in degrees.
Returns
-------
dist : float
Distance in meters.
"""
radius = 6372795 # meters
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlat = lat2 - lat1
dlon = lon2 - lon1
# formula from:
# http://en.wikipedia.org/wiki/Haversine_formula#The_haversine_formula
a = math.pow(math.sin(dlat / 2), 2)
b = math.cos(lat1) * math.cos(lat2) * math.pow(math.sin(dlon / 2), 2)
d = 2 * radius * math.asin(math.sqrt(a + b))
return d
|
def osm_filter(network_type):
"""
Create a filter to query Overpass API for the specified OSM network type.
Parameters
----------
network_type : string, {'walk', 'drive'} denoting the type of street
network to extract
Returns
-------
osm_filter : string
"""
filters = {}
# drive: select only roads that are drivable by normal 2 wheel drive
# passenger vehicles both private and public
# roads. Filter out un-drivable roads and service roads tagged as parking,
# driveway, or emergency-access
filters['drive'] = ('["highway"!~"cycleway|footway|path|pedestrian|steps'
'|track|proposed|construction|bridleway|abandoned'
'|platform|raceway|service"]'
'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
'["service"!~"parking|parking_aisle|driveway'
'|emergency_access"]')
# walk: select only roads and pathways that allow pedestrian access both
# private and public pathways and roads.
# Filter out limited access roadways and allow service roads
filters['walk'] = ('["highway"!~"motor|proposed|construction|abandoned'
'|platform|raceway"]["foot"!~"no"]'
'["pedestrians"!~"no"]')
if network_type in filters:
osm_filter = filters[network_type]
else:
raise ValueError('unknown network_type "{}"'.format(network_type))
return osm_filter
|
def osm_net_download(lat_min=None, lng_min=None, lat_max=None, lng_max=None,
network_type='walk', timeout=180, memory=None,
max_query_area_size=50*1000*50*1000,
custom_osm_filter=None):
"""
Download OSM ways and nodes within a bounding box from the Overpass API.
Parameters
----------
lat_min : float
southern latitude of bounding box
lng_min : float
eastern longitude of bounding box
lat_max : float
northern latitude of bounding box
lng_max : float
western longitude of bounding box
network_type : string
Specify the network type where value of 'walk' includes roadways
where pedestrians are allowed and pedestrian
pathways and 'drive' includes driveable roadways.
timeout : int
the timeout interval for requests and to pass to Overpass API
memory : int
server memory allocation size for the query, in bytes. If none,
server will use its default allocation size
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
Overpass API (default is 50,000 * 50,000 units (ie, 50km x 50km in
area, if units are meters))
custom_osm_filter : string, optional
specify custom arguments for the way["highway"] query to OSM. Must
follow Overpass API schema. For
example to request highway ways that are service roads use:
'["highway"="service"]'
Returns
-------
response_json : dict
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type
if custom_osm_filter is None:
request_filter = osm_filter(network_type)
else:
request_filter = custom_osm_filter
response_jsons_list = []
response_jsons = []
# server memory allocation in bytes formatted for Overpass API query
if memory is None:
maxsize = ''
else:
maxsize = '[maxsize:{}]'.format(memory)
# define the Overpass API query
# way["highway"] denotes ways with highway keys and {filters} returns
# ways with the requested key/value. the '>' makes it recurse so we get
# ways and way nodes. maxsize is in bytes.
# turn bbox into a polygon and project to local UTM
polygon = Polygon([(lng_max, lat_min), (lng_min, lat_min),
(lng_min, lat_max), (lng_max, lat_max)])
geometry_proj, crs_proj = project_geometry(polygon,
crs={'init': 'epsg:4326'})
# subdivide the bbox area poly if it exceeds the max area size
# (in meters), then project back to WGS84
geometry_proj_consolidated_subdivided = consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size)
geometry, crs = project_geometry(geometry_proj_consolidated_subdivided,
crs=crs_proj, to_latlong=True)
log('Requesting network data within bounding box from Overpass API '
'in {:,} request(s)'.format(len(geometry)))
start_time = time.time()
# loop through each polygon in the geometry
for poly in geometry:
# represent bbox as lng_max, lat_min, lng_min, lat_max and round
# lat-longs to 8 decimal places to create
# consistent URL strings
lng_max, lat_min, lng_min, lat_max = poly.bounds
query_template = '[out:json][timeout:{timeout}]{maxsize};' \
'(way["highway"]' \
'{filters}({lat_min:.8f},{lng_max:.8f},' \
'{lat_max:.8f},{lng_min:.8f});>;);out;'
query_str = query_template.format(lat_max=lat_max, lat_min=lat_min,
lng_min=lng_min, lng_max=lng_max,
filters=request_filter,
timeout=timeout, maxsize=maxsize)
response_json = overpass_request(data={'data': query_str},
timeout=timeout)
response_jsons_list.append(response_json)
log('Downloaded OSM network data within bounding box from Overpass '
'API in {:,} request(s) and'
' {:,.2f} seconds'.format(len(geometry), time.time()-start_time))
# stitch together individual json results
for json in response_jsons_list:
try:
response_jsons.extend(json['elements'])
except KeyError:
pass
# remove duplicate records resulting from the json stitching
start_time = time.time()
record_count = len(response_jsons)
if record_count == 0:
raise Exception('Query resulted in no data. Check your query '
'parameters: {}'.format(query_str))
else:
response_jsons_df = pd.DataFrame.from_records(response_jsons,
index='id')
nodes = response_jsons_df[response_jsons_df['type'] == 'node']
nodes = nodes[~nodes.index.duplicated(keep='first')]
ways = response_jsons_df[response_jsons_df['type'] == 'way']
ways = ways[~ways.index.duplicated(keep='first')]
response_jsons_df = pd.concat([nodes, ways], axis=0)
response_jsons_df.reset_index(inplace=True)
response_jsons = response_jsons_df.to_dict(orient='records')
if record_count - len(response_jsons) > 0:
log('{:,} duplicate records removed. Took {:,.2f} seconds'.format(
record_count - len(response_jsons), time.time() - start_time))
return {'elements': response_jsons}
|
def overpass_request(data, pause_duration=None, timeout=180,
error_pause_duration=None):
"""
Send a request to the Overpass API via HTTP POST and return the
JSON response
Parameters
----------
data : dict or OrderedDict
key-value pairs of parameters to post to Overpass API
pause_duration : int
how long to pause in seconds before requests, if None, will query
Overpass API status endpoint
to find when next slot is available
timeout : int
the timeout interval for the requests library
error_pause_duration : int
how long to pause in seconds before re-trying requests if error
Returns
-------
response_json : dict
"""
# define the Overpass API URL, then construct a GET-style URL
url = 'http://www.overpass-api.de/api/interpreter'
start_time = time.time()
log('Posting to {} with timeout={}, "{}"'.format(url, timeout, data))
response = requests.post(url, data=data, timeout=timeout)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.
domain = re.findall(r'//(?s)(.*?)/', url)[0]
log('Downloaded {:,.1f}KB from {} in {:,.2f} seconds'
.format(size_kb, domain, time.time()-start_time))
try:
response_json = response.json()
if 'remark' in response_json:
log('Server remark: "{}"'.format(response_json['remark'],
level=lg.WARNING))
except Exception:
# 429 = 'too many requests' and 504 = 'gateway timeout' from server
# overload. handle these errors by recursively
# calling overpass_request until a valid response is achieved
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = get_pause_duration()
log('Server at {} returned status code {} and no JSON data. '
'Re-trying request in {:.2f} seconds.'
.format(domain, response.status_code, error_pause_duration),
level=lg.WARNING)
time.sleep(error_pause_duration)
response_json = overpass_request(data=data,
pause_duration=pause_duration,
timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
log('Server at {} returned status code {} and no JSON data'
.format(domain, response.status_code), level=lg.ERROR)
raise Exception('Server returned no JSON data.\n{} {}\n{}'
.format(response, response.reason, response.text))
return response_json
|
def get_pause_duration(recursive_delay=5, default_duration=10):
"""
Check the Overpass API status endpoint to determine how long to wait until
next slot is available.
Parameters
----------
recursive_delay : int
how long to wait between recursive calls if server is currently
running a query
default_duration : int
if fatal error, function falls back on returning this value
Returns
-------
pause_duration : int
"""
try:
response = requests.get('http://overpass-api.de/api/status')
status = response.text.split('\n')[3]
status_first_token = status.split(' ')[0]
except Exception:
# if status endpoint cannot be reached or output parsed, log error
# and return default duration
log('Unable to query http://overpass-api.de/api/status',
level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it indicates the number of slots
# available - no wait required
available_slots = int(status_first_token)
pause_duration = 0
except Exception:
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == 'Slot':
utc_time_str = status.split(' ')[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause_duration = math.ceil(
(utc_time - dt.datetime.utcnow()).total_seconds())
pause_duration = max(pause_duration, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == 'Currently':
time.sleep(recursive_delay)
pause_duration = get_pause_duration()
else:
# any other status is unrecognized - log an error and return
# default duration
log('Unrecognized server status: "{}"'.format(status),
level=lg.ERROR)
return default_duration
return pause_duration
|
def consolidate_subdivide_geometry(geometry, max_query_area_size):
"""
Consolidate a geometry into a convex hull, then subdivide it into
smaller sub-polygons if its area exceeds max size (in geometry's units).
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to consolidate and subdivide
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is
in: any polygon bigger will get divided up for multiple queries to
the Overpass API (default is 50,000 * 50,000 units
(ie, 50km x 50km in area, if units are meters))
Returns
-------
geometry : Polygon or MultiPolygon
"""
# let the linear length of the quadrats (with which to subdivide the
# geometry) be the square root of max area size
quadrat_width = math.sqrt(max_query_area_size)
if not isinstance(geometry, (Polygon, MultiPolygon)):
raise ValueError('Geometry must be a shapely Polygon or MultiPolygon')
# if geometry is a MultiPolygon OR a single Polygon whose area exceeds
# the max size, get the convex hull around the geometry
if isinstance(
geometry, MultiPolygon) or \
(isinstance(
geometry, Polygon) and geometry.area > max_query_area_size):
geometry = geometry.convex_hull
# if geometry area exceeds max size, subdivide it into smaller sub-polygons
if geometry.area > max_query_area_size:
geometry = quadrat_cut_geometry(geometry, quadrat_width=quadrat_width)
if isinstance(geometry, Polygon):
geometry = MultiPolygon([geometry])
return geometry
|
def quadrat_cut_geometry(geometry, quadrat_width, min_num=3,
buffer_amount=1e-9):
"""
Split a Polygon or MultiPolygon up into sub-polygons of a specified size,
using quadrats.
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to split up into smaller sub-polygons
quadrat_width : float
the linear width of the quadrats with which to cut up the geometry
(in the units the geometry is in)
min_num : float
the minimum number of linear quadrat lines (e.g., min_num=3 would
produce a quadrat grid of 4 squares)
buffer_amount : float
buffer the quadrat grid lines by quadrat_width times buffer_amount
Returns
-------
multipoly : shapely MultiPolygon
"""
# create n evenly spaced points between the min and max x and y bounds
lng_max, lat_min, lng_min, lat_max = geometry.bounds
x_num = math.ceil((lng_min-lng_max) / quadrat_width) + 1
y_num = math.ceil((lat_max-lat_min) / quadrat_width) + 1
x_points = np.linspace(lng_max, lng_min, num=max(x_num, min_num))
y_points = np.linspace(lat_min, lat_max, num=max(y_num, min_num))
# create a quadrat grid of lines at each of the evenly spaced points
vertical_lines = [LineString([(x, y_points[0]), (x, y_points[-1])])
for x in x_points]
horizont_lines = [LineString([(x_points[0], y), (x_points[-1], y)])
for y in y_points]
lines = vertical_lines + horizont_lines
# buffer each line to distance of the quadrat width divided by 1 billion,
# take their union, then cut geometry into pieces by these quadrats
buffer_size = quadrat_width * buffer_amount
lines_buffered = [line.buffer(buffer_size) for line in lines]
quadrats = unary_union(lines_buffered)
multipoly = geometry.difference(quadrats)
return multipoly
|
def project_geometry(geometry, crs, to_latlong=False):
"""
Project a shapely Polygon or MultiPolygon from WGS84 to UTM, or vice-versa
Parameters
----------
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : int
the starting coordinate reference system of the passed-in geometry
to_latlong : bool
if True, project from crs to WGS84, if False, project
from crs to local UTM zone
Returns
-------
geometry_proj, crs : tuple (projected shapely geometry, crs of the
projected geometry)
"""
gdf = gpd.GeoDataFrame()
gdf.crs = crs
gdf.name = 'geometry to project'
gdf['geometry'] = None
gdf.loc[0, 'geometry'] = geometry
gdf_proj = project_gdf(gdf, to_latlong=to_latlong)
geometry_proj = gdf_proj['geometry'].iloc[0]
return geometry_proj, gdf_proj.crs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.