_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q257300 | cancel | validation | def cancel(batch_fn, cancel_fn, ops):
"""Cancel operations.
Args:
batch_fn: API-specific batch function.
cancel_fn: API-specific cancel function.
ops: A list of operations to cancel.
Returns:
A list of operations canceled and a list of error messages.
"""
# Canceling many operations one-by-one can be slow.
# The Pipelines API doesn't directly support a list of operations to cancel,
# but the requests can be performed in batch.
canceled_ops = []
error_messages = []
max_batch = 256
total_ops = len(ops)
for first_op in range(0, total_ops, max_batch):
batch_canceled, batch_messages = _cancel_batch(
batch_fn, cancel_fn, ops[first_op:first_op + max_batch])
canceled_ops.extend(batch_canceled)
error_messages.extend(batch_messages)
return canceled_ops, error_messages | python | {
"resource": ""
} |
q257301 | retry_api_check | validation | def retry_api_check(exception):
"""Return True if we should retry. False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
"""
if isinstance(exception, apiclient.errors.HttpError):
if exception.resp.status in TRANSIENT_HTTP_ERROR_CODES:
_print_error('Retrying...')
return True
if isinstance(exception, socket.error):
if exception.errno in TRANSIENT_SOCKET_ERROR_CODES:
_print_error('Retrying...')
return True
if isinstance(exception, oauth2client.client.AccessTokenRefreshError):
_print_error('Retrying...')
return True
# For a given installation, this could be a permanent error, but has only
# been observed as transient.
if isinstance(exception, SSLError):
_print_error('Retrying...')
return True
# This has been observed as a transient error:
# ServerNotFoundError: Unable to find the server at genomics.googleapis.com
if isinstance(exception, ServerNotFoundError):
_print_error('Retrying...')
return True
return False | python | {
"resource": ""
} |
q257302 | retry_auth_check | validation | def retry_auth_check(exception):
"""Specific check for auth error codes.
Return True if we should retry.
False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
"""
if isinstance(exception, apiclient.errors.HttpError):
if exception.resp.status in HTTP_AUTH_ERROR_CODES:
_print_error('Retrying...')
return True
return False | python | {
"resource": ""
} |
q257303 | setup_service | validation | def setup_service(api_name, api_version, credentials=None):
"""Configures genomics API client.
Args:
api_name: Name of the Google API (for example: "genomics")
api_version: Version of the API (for example: "v2alpha1")
credentials: Credentials to be used for the gcloud API calls.
Returns:
A configured Google Genomics API client with appropriate credentials.
"""
if not credentials:
credentials = oauth2client.client.GoogleCredentials.get_application_default(
)
return apiclient.discovery.build(
api_name, api_version, credentials=credentials) | python | {
"resource": ""
} |
q257304 | Api.execute | validation | def execute(api):
"""Executes operation.
Args:
api: The base API object
Returns:
A response body object
"""
try:
return api.execute()
except Exception as exception:
now = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
_print_error('%s: Exception %s: %s' % (now, type(exception).__name__,
str(exception)))
# Re-raise exception to be handled by retry logic
raise exception | python | {
"resource": ""
} |
q257305 | _eval_arg_type | validation | def _eval_arg_type(arg_type, T=Any, arg=None, sig=None):
"""Returns a type from a snippit of python source. Should normally be
something just like 'str' or 'Object'.
arg_type the source to be evaluated
T the default type
arg context of where this type was extracted
sig context from where the arg was extracted
Returns a type or a Type
"""
try:
T = eval(arg_type)
except Exception as e:
raise ValueError('The type of {0} could not be evaluated in {1} for {2}: {3}' \
.format(arg_type, arg, sig, text_type(e)))
else:
if type(T) not in (type, Type):
raise TypeError('{0} is not a valid type in {1} for {2}' \
.format(repr(T), arg, sig))
return T | python | {
"resource": ""
} |
q257306 | jsonify_status_code | validation | def jsonify_status_code(status_code, *args, **kw):
"""Returns a jsonified response with the specified HTTP status code.
The positional and keyword arguments are passed directly to the
:func:`flask.jsonify` function which creates the response.
"""
is_batch = kw.pop('is_batch', False)
if is_batch:
response = flask_make_response(json.dumps(*args, **kw))
response.mimetype = 'application/json'
response.status_code = status_code
return response
response = jsonify(*args, **kw)
response.status_code = status_code
return response | python | {
"resource": ""
} |
q257307 | ServiceProxy.send_payload | validation | def send_payload(self, params):
"""Performs the actual sending action and returns the result
"""
data = json.dumps({
'jsonrpc': self.version,
'method': self.service_name,
'params': params,
'id': text_type(uuid.uuid4())
})
data_binary = data.encode('utf-8')
url_request = Request(self.service_url, data_binary, headers=self.headers)
return urlopen(url_request).read() | python | {
"resource": ""
} |
q257308 | Error.json_rpc_format | validation | def json_rpc_format(self):
"""Return the Exception data in a format for JSON-RPC
"""
error = {
'name': text_type(self.__class__.__name__),
'code': self.code,
'message': '{0}'.format(text_type(self.message)),
'data': self.data
}
if current_app.config['DEBUG']:
import sys, traceback
error['stack'] = traceback.format_exc()
error['executable'] = sys.executable
return error | python | {
"resource": ""
} |
q257309 | Config.from_file | validation | def from_file(cls, file):
"""Try loading given config file.
:param str file: full path to the config file to load
"""
if not os.path.exists(file):
raise ValueError("Config file not found.")
try:
config_parser = configparser.ConfigParser()
config_parser.read(file)
configuration = cls(file, config_parser)
if not configuration.check_config_sanity():
raise ValueError("Error in config file.")
else:
return configuration
except configparser.Error:
raise ValueError("Config file is invalid.") | python | {
"resource": ""
} |
q257310 | Config.discover | validation | def discover(cls):
"""Make a guess about the config file location an try loading it."""
file = os.path.join(Config.config_dir, Config.config_name)
return cls.from_file(file) | python | {
"resource": ""
} |
q257311 | Config.create_config | validation | def create_config(cls, cfgfile, nick, twtfile, twturl, disclose_identity, add_news):
"""Create a new config file at the default location.
:param str cfgfile: path to the config file
:param str nick: nickname to use for own tweets
:param str twtfile: path to the local twtxt file
:param str twturl: URL to the remote twtxt file
:param bool disclose_identity: if true the users id will be disclosed
:param bool add_news: if true follow twtxt news feed
"""
cfgfile_dir = os.path.dirname(cfgfile)
if not os.path.exists(cfgfile_dir):
os.makedirs(cfgfile_dir)
cfg = configparser.ConfigParser()
cfg.add_section("twtxt")
cfg.set("twtxt", "nick", nick)
cfg.set("twtxt", "twtfile", twtfile)
cfg.set("twtxt", "twturl", twturl)
cfg.set("twtxt", "disclose_identity", str(disclose_identity))
cfg.set("twtxt", "character_limit", "140")
cfg.set("twtxt", "character_warning", "140")
cfg.add_section("following")
if add_news:
cfg.set("following", "twtxt", "https://buckket.org/twtxt_news.txt")
conf = cls(cfgfile, cfg)
conf.write_config()
return conf | python | {
"resource": ""
} |
q257312 | Config.write_config | validation | def write_config(self):
"""Writes `self.cfg` to `self.config_file`."""
with open(self.config_file, "w") as config_file:
self.cfg.write(config_file) | python | {
"resource": ""
} |
q257313 | validate_config_key | validation | def validate_config_key(ctx, param, value):
"""Validate a configuration key according to `section.item`."""
if not value:
return value
try:
section, item = value.split(".", 1)
except ValueError:
raise click.BadArgumentUsage("Given key does not contain a section name.")
else:
return section, item | python | {
"resource": ""
} |
q257314 | expand_mentions | validation | def expand_mentions(text, embed_names=True):
"""Searches the given text for mentions and expands them.
For example:
"@source.nick" will be expanded to "@<source.nick source.url>".
"""
if embed_names:
mention_format = "@<{name} {url}>"
else:
mention_format = "@<{url}>"
def handle_mention(match):
source = get_source_by_name(match.group(1))
if source is None:
return "@{0}".format(match.group(1))
return mention_format.format(
name=source.nick,
url=source.url)
return short_mention_re.sub(handle_mention, text) | python | {
"resource": ""
} |
q257315 | make_aware | validation | def make_aware(dt):
"""Appends tzinfo and assumes UTC, if datetime object has no tzinfo already."""
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc) | python | {
"resource": ""
} |
q257316 | Cache.from_file | validation | def from_file(cls, file, *args, **kwargs):
"""Try loading given cache file."""
try:
cache = shelve.open(file)
return cls(file, cache, *args, **kwargs)
except OSError as e:
logger.debug("Loading {0} failed".format(file))
raise e | python | {
"resource": ""
} |
q257317 | Cache.discover | validation | def discover(cls, *args, **kwargs):
"""Make a guess about the cache file location an try loading it."""
file = os.path.join(Cache.cache_dir, Cache.cache_name)
return cls.from_file(file, *args, **kwargs) | python | {
"resource": ""
} |
q257318 | Cache.is_cached | validation | def is_cached(self, url):
"""Checks if specified URL is cached."""
try:
return True if url in self.cache else False
except TypeError:
return False | python | {
"resource": ""
} |
q257319 | Cache.add_tweets | validation | def add_tweets(self, url, last_modified, tweets):
"""Adds new tweets to the cache."""
try:
self.cache[url] = {"last_modified": last_modified, "tweets": tweets}
self.mark_updated()
return True
except TypeError:
return False | python | {
"resource": ""
} |
q257320 | Cache.get_tweets | validation | def get_tweets(self, url, limit=None):
"""Retrieves tweets from the cache."""
try:
tweets = self.cache[url]["tweets"]
self.mark_updated()
return sorted(tweets, reverse=True)[:limit]
except KeyError:
return [] | python | {
"resource": ""
} |
q257321 | Cache.remove_tweets | validation | def remove_tweets(self, url):
"""Tries to remove cached tweets."""
try:
del self.cache[url]
self.mark_updated()
return True
except KeyError:
return False | python | {
"resource": ""
} |
q257322 | timeline | validation | def timeline(ctx, pager, limit, twtfile, sorting, timeout, porcelain, source, cache, force_update):
"""Retrieve your personal timeline."""
if source:
source_obj = ctx.obj["conf"].get_source_by_nick(source)
if not source_obj:
logger.debug("Not following {0}, trying as URL".format(source))
source_obj = Source(source, source)
sources = [source_obj]
else:
sources = ctx.obj["conf"].following
tweets = []
if cache:
try:
with Cache.discover(update_interval=ctx.obj["conf"].timeline_update_interval) as cache:
force_update = force_update or not cache.is_valid
if force_update:
tweets = get_remote_tweets(sources, limit, timeout, cache)
else:
logger.debug("Multiple calls to 'timeline' within {0} seconds. Skipping update".format(
cache.update_interval))
# Behold, almighty list comprehensions! (I might have gone overboard here…)
tweets = list(chain.from_iterable([cache.get_tweets(source.url) for source in sources]))
except OSError as e:
logger.debug(e)
tweets = get_remote_tweets(sources, limit, timeout)
else:
tweets = get_remote_tweets(sources, limit, timeout)
if twtfile and not source:
source = Source(ctx.obj["conf"].nick, ctx.obj["conf"].twturl, file=twtfile)
tweets.extend(get_local_tweets(source, limit))
if not tweets:
return
tweets = sort_and_truncate_tweets(tweets, sorting, limit)
if pager:
click.echo_via_pager(style_timeline(tweets, porcelain))
else:
click.echo(style_timeline(tweets, porcelain)) | python | {
"resource": ""
} |
q257323 | config | validation | def config(ctx, key, value, remove, edit):
"""Get or set config item."""
conf = ctx.obj["conf"]
if not edit and not key:
raise click.BadArgumentUsage("You have to specify either a key or use --edit.")
if edit:
return click.edit(filename=conf.config_file)
if remove:
try:
conf.cfg.remove_option(key[0], key[1])
except Exception as e:
logger.debug(e)
else:
conf.write_config()
return
if not value:
try:
click.echo(conf.cfg.get(key[0], key[1]))
except Exception as e:
logger.debug(e)
return
if not conf.cfg.has_section(key[0]):
conf.cfg.add_section(key[0])
conf.cfg.set(key[0], key[1], value)
conf.write_config() | python | {
"resource": ""
} |
q257324 | Tweet.relative_datetime | validation | def relative_datetime(self):
"""Return human-readable relative time string."""
now = datetime.now(timezone.utc)
tense = "from now" if self.created_at > now else "ago"
return "{0} {1}".format(humanize.naturaldelta(now - self.created_at), tense) | python | {
"resource": ""
} |
q257325 | save | validation | def save(url, *args, **kwargs):
""" Parse the options, set defaults and then fire up PhantomJS. """
device = heimdallDevice(kwargs.get('device', None))
kwargs['width'] = kwargs.get('width', None) or device.width
kwargs['height'] = kwargs.get('height', None) or device.height
kwargs['user_agent'] = kwargs.get('user_agent', None) or device.user_agent
screenshot_image = screenshot(url, **kwargs)
if kwargs.get('optimize'):
image = Image.open(screenshot_image.path)
image.save(screenshot_image.path, optimize=True)
return screenshot_image | python | {
"resource": ""
} |
q257326 | screenshot | validation | def screenshot(url, *args, **kwargs):
""" Call PhantomJS with the specified flags and options. """
phantomscript = os.path.join(os.path.dirname(__file__),
'take_screenshot.js')
directory = kwargs.get('save_dir', '/tmp')
image_name = kwargs.get('image_name', None) or _image_name_from_url(url)
ext = kwargs.get('format', 'png').lower()
save_path = os.path.join(directory, image_name) + '.' + ext
crop_to_visible = kwargs.get('crop_to_visible', False)
cmd_args = [
'phantomjs',
'--ssl-protocol=any',
phantomscript,
url,
'--width',
str(kwargs['width']),
'--height',
str(kwargs['height']),
'--useragent',
str(kwargs['user_agent']),
'--dir',
directory,
'--ext',
ext,
'--name',
str(image_name),
]
if crop_to_visible:
cmd_args.append('--croptovisible')
# TODO:
# - quality
# - renderafter
# - maxexecutiontime
# - resourcetimeout
output = subprocess.Popen(cmd_args,
stdout=subprocess.PIPE).communicate()[0]
return Screenshot(save_path, directory, image_name + '.' + ext, ext) | python | {
"resource": ""
} |
q257327 | _image_name_from_url | validation | def _image_name_from_url(url):
""" Create a nice image name from the url. """
find = r'https?://|[^\w]'
replace = '_'
return re.sub(find, replace, url).strip('_') | python | {
"resource": ""
} |
q257328 | worker | validation | def worker(f):
"""
Decorator. Abortable worker. If wrapped task will be cancelled by
dispatcher, decorator will send ftp codes of successful interrupt.
::
>>> @worker
... async def worker(self, connection, rest):
... ...
"""
@functools.wraps(f)
async def wrapper(cls, connection, rest):
try:
await f(cls, connection, rest)
except asyncio.CancelledError:
connection.response("426", "transfer aborted")
connection.response("226", "abort successful")
return wrapper | python | {
"resource": ""
} |
q257329 | User.get_permissions | validation | def get_permissions(self, path):
"""
Return nearest parent permission for `path`.
:param path: path which permission you want to know
:type path: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:rtype: :py:class:`aioftp.Permission`
"""
path = pathlib.PurePosixPath(path)
parents = filter(lambda p: p.is_parent(path), self.permissions)
perm = min(
parents,
key=lambda p: len(path.relative_to(p.path).parts),
default=Permission(),
)
return perm | python | {
"resource": ""
} |
q257330 | AvailableConnections.release | validation | def release(self):
"""
Release, incrementing the internal counter by one.
"""
if self.value is not None:
self.value += 1
if self.value > self.maximum_value:
raise ValueError("Too many releases") | python | {
"resource": ""
} |
q257331 | register_memory | validation | def register_memory():
"""Register an approximation of memory used by FTP server process
and all of its children.
"""
# XXX How to get a reliable representation of memory being used is
# not clear. (rss - shared) seems kind of ok but we might also use
# the private working set via get_memory_maps().private*.
def get_mem(proc):
if os.name == 'posix':
mem = proc.memory_info_ex()
counter = mem.rss
if 'shared' in mem._fields:
counter -= mem.shared
return counter
else:
# TODO figure out what to do on Windows
return proc.get_memory_info().rss
if SERVER_PROC is not None:
mem = get_mem(SERVER_PROC)
for child in SERVER_PROC.children():
mem += get_mem(child)
server_memory.append(bytes2human(mem)) | python | {
"resource": ""
} |
q257332 | connect | validation | def connect():
"""Connect to FTP server, login and return an ftplib.FTP instance."""
ftp_class = ftplib.FTP if not SSL else ftplib.FTP_TLS
ftp = ftp_class(timeout=TIMEOUT)
ftp.connect(HOST, PORT)
ftp.login(USER, PASSWORD)
if SSL:
ftp.prot_p() # secure data connection
return ftp | python | {
"resource": ""
} |
q257333 | bytes_per_second | validation | def bytes_per_second(ftp, retr=True):
"""Return the number of bytes transmitted in 1 second."""
tot_bytes = 0
if retr:
def request_file():
ftp.voidcmd('TYPE I')
conn = ftp.transfercmd("retr " + TESTFN)
return conn
with contextlib.closing(request_file()) as conn:
register_memory()
stop_at = time.time() + 1.0
while stop_at > time.time():
chunk = conn.recv(BUFFER_LEN)
if not chunk:
a = time.time()
ftp.voidresp()
conn.close()
conn = request_file()
stop_at += time.time() - a
tot_bytes += len(chunk)
try:
while chunk:
chunk = conn.recv(BUFFER_LEN)
ftp.voidresp()
conn.close()
except (ftplib.error_temp, ftplib.error_perm):
pass
else:
ftp.voidcmd('TYPE I')
with contextlib.closing(ftp.transfercmd("STOR " + TESTFN)) as conn:
register_memory()
chunk = b'x' * BUFFER_LEN
stop_at = time.time() + 1
while stop_at > time.time():
tot_bytes += conn.send(chunk)
ftp.voidresp()
return tot_bytes | python | {
"resource": ""
} |
q257334 | async_enterable | validation | def async_enterable(f):
"""
Decorator. Bring coroutine result up, so it can be used as async context
::
>>> async def foo():
...
... ...
... return AsyncContextInstance(...)
...
... ctx = await foo()
... async with ctx:
...
... # do
::
>>> @async_enterable
... async def foo():
...
... ...
... return AsyncContextInstance(...)
...
... async with foo() as ctx:
...
... # do
...
... ctx = await foo()
... async with ctx:
...
... # do
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
class AsyncEnterableInstance:
async def __aenter__(self):
self.context = await f(*args, **kwargs)
return await self.context.__aenter__()
async def __aexit__(self, *args, **kwargs):
await self.context.__aexit__(*args, **kwargs)
def __await__(self):
return f(*args, **kwargs).__await__()
return AsyncEnterableInstance()
return wrapper | python | {
"resource": ""
} |
q257335 | setlocale | validation | def setlocale(name):
"""
Context manager with threading lock for set locale on enter, and set it
back to original state on exit.
::
>>> with setlocale("C"):
... ...
"""
with LOCALE_LOCK:
old_locale = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, old_locale) | python | {
"resource": ""
} |
q257336 | Throttle.append | validation | def append(self, data, start):
"""
Count `data` for throttle
:param data: bytes of data for count
:type data: :py:class:`bytes`
:param start: start of read/write time from
:py:meth:`asyncio.BaseEventLoop.time`
:type start: :py:class:`float`
"""
if self._limit is not None and self._limit > 0:
if self._start is None:
self._start = start
if start - self._start > self.reset_rate:
self._sum -= round((start - self._start) * self._limit)
self._start = start
self._sum += len(data) | python | {
"resource": ""
} |
q257337 | Throttle.limit | validation | def limit(self, value):
"""
Set throttle limit
:param value: bytes per second
:type value: :py:class:`int` or :py:class:`None`
"""
self._limit = value
self._start = None
self._sum = 0 | python | {
"resource": ""
} |
q257338 | StreamThrottle.clone | validation | def clone(self):
"""
Clone throttles without memory
"""
return StreamThrottle(
read=self.read.clone(),
write=self.write.clone()
) | python | {
"resource": ""
} |
q257339 | ThrottleStreamIO.append | validation | def append(self, name, data, start):
"""
Update timeout for all throttles
:param name: name of throttle to append to ("read" or "write")
:type name: :py:class:`str`
:param data: bytes of data for count
:type data: :py:class:`bytes`
:param start: start of read/write time from
:py:meth:`asyncio.BaseEventLoop.time`
:type start: :py:class:`float`
"""
for throttle in self.throttles.values():
getattr(throttle, name).append(data, start) | python | {
"resource": ""
} |
q257340 | BaseClient.check_codes | validation | def check_codes(self, expected_codes, received_code, info):
"""
Checks if any of expected matches received.
:param expected_codes: tuple of expected codes
:type expected_codes: :py:class:`tuple`
:param received_code: received code for matching
:type received_code: :py:class:`aioftp.Code`
:param info: list of response lines from server
:type info: :py:class:`list`
:raises aioftp.StatusCodeError: if received code does not matches any
expected code
"""
if not any(map(received_code.matches, expected_codes)):
raise errors.StatusCodeError(expected_codes, received_code, info) | python | {
"resource": ""
} |
q257341 | BaseClient.parse_directory_response | validation | def parse_directory_response(s):
"""
Parsing directory server response.
:param s: response line
:type s: :py:class:`str`
:rtype: :py:class:`pathlib.PurePosixPath`
"""
seq_quotes = 0
start = False
directory = ""
for ch in s:
if not start:
if ch == "\"":
start = True
else:
if ch == "\"":
seq_quotes += 1
else:
if seq_quotes == 1:
break
elif seq_quotes == 2:
seq_quotes = 0
directory += '"'
directory += ch
return pathlib.PurePosixPath(directory) | python | {
"resource": ""
} |
q257342 | BaseClient.parse_list_line_windows | validation | def parse_list_line_windows(self, b):
"""
Parsing Microsoft Windows `dir` output
:param b: response line
:type b: :py:class:`bytes` or :py:class:`str`
:return: (path, info)
:rtype: (:py:class:`pathlib.PurePosixPath`, :py:class:`dict`)
"""
line = b.decode(encoding=self.encoding).rstrip("\r\n")
date_time_end = line.index("M")
date_time_str = line[:date_time_end + 1].strip().split(" ")
date_time_str = " ".join([x for x in date_time_str if len(x) > 0])
line = line[date_time_end + 1:].lstrip()
with setlocale("C"):
strptime = datetime.datetime.strptime
date_time = strptime(date_time_str, "%m/%d/%Y %I:%M %p")
info = {}
info["modify"] = self.format_date_time(date_time)
next_space = line.index(" ")
if line.startswith("<DIR>"):
info["type"] = "dir"
else:
info["type"] = "file"
info["size"] = line[:next_space].replace(",", "")
if not info["size"].isdigit():
raise ValueError
# This here could cause a problem if a filename started with
# whitespace, but if we were to try to detect such a condition
# we would have to make strong assumptions about the input format
filename = line[next_space:].lstrip()
if filename == "." or filename == "..":
raise ValueError
return pathlib.PurePosixPath(filename), info | python | {
"resource": ""
} |
q257343 | Client.upload_stream | validation | def upload_stream(self, destination, *, offset=0):
"""
Create stream for write data to `destination` file.
:param destination: destination path of file on server side
:type destination: :py:class:`str` or :py:class:`pathlib.PurePosixPath`
:param offset: byte offset for stream start position
:type offset: :py:class:`int`
:rtype: :py:class:`aioftp.DataConnectionThrottleStreamIO`
"""
return self.get_stream(
"STOR " + str(destination),
"1xx",
offset=offset,
) | python | {
"resource": ""
} |
q257344 | jenks_breaks | validation | def jenks_breaks(values, nb_class):
"""
Compute jenks natural breaks on a sequence of `values`, given `nb_class`,
the number of desired class.
Parameters
----------
values : array-like
The Iterable sequence of numbers (integer/float) to be used.
nb_class : int
The desired number of class (as some other functions requests
a `k` value, `nb_class` is like `k` + 1). Have to be lesser than
the length of `values` and greater than 2.
Returns
-------
breaks : tuple of floats
The computed break values, including minimum and maximum, in order
to have all the bounds for building `nb_class` class,
so the returned tuple has a length of `nb_class` + 1.
Examples
--------
Using nb_class = 3, expecting 4 break values , including min and max :
>>> jenks_breaks(
[1.3, 7.1, 7.3, 2.3, 3.9, 4.1, 7.8, 1.2, 4.3, 7.3, 5.0, 4.3],
nb_class = 3) # Should output (1.2, 2.3, 5.0, 7.8)
"""
if not isinstance(values, Iterable) or isinstance(values, (str, bytes)):
raise TypeError("A sequence of numbers is expected")
if isinstance(nb_class, float) and int(nb_class) == nb_class:
nb_class = int(nb_class)
if not isinstance(nb_class, int):
raise TypeError(
"Number of class have to be a positive integer: "
"expected an instance of 'int' but found {}"
.format(type(nb_class)))
nb_values = len(values)
if np and isinstance(values, np.ndarray):
values = values[np.argwhere(np.isfinite(values)).reshape(-1)]
else:
values = [i for i in values if isfinite(i)]
if len(values) != nb_values:
warnings.warn('Invalid values encountered (NaN or Inf) were ignored')
nb_values = len(values)
if nb_class >= nb_values or nb_class < 2:
raise ValueError("Number of class have to be an integer "
"greater than 2 and "
"smaller than the number of values to use")
return jenks._jenks_breaks(values, nb_class) | python | {
"resource": ""
} |
q257345 | Gdk3PixbufWrapper.grab | validation | def grab(self, bbox=None):
"""Grabs an image directly to a buffer.
:param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates
of sub-region to capture.
:return: PIL RGB image
:raises: ValueError, if image data does not have 3 channels (RGB), each with 8
bits.
:rtype: Image
"""
w = Gdk.get_default_root_window()
if bbox is not None:
g = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
else:
g = w.get_geometry()
pb = Gdk.pixbuf_get_from_window(w, *g)
if pb.get_bits_per_sample() != 8:
raise ValueError('Expected 8 bits per pixel.')
elif pb.get_n_channels() != 3:
raise ValueError('Expected RGB image.')
# Read the entire buffer into a python bytes object.
# read_pixel_bytes: New in version 2.32.
pixel_bytes = pb.read_pixel_bytes().get_data() # type: bytes
width, height = g[2], g[3]
# Probably for SSE alignment reasons, the pixbuf has extra data in each line.
# The args after "raw" help handle this; see
# http://effbot.org/imagingbook/decoder.htm#the-raw-decoder
return Image.frombytes(
'RGB', (width, height), pixel_bytes, 'raw', 'RGB', pb.get_rowstride(), 1) | python | {
"resource": ""
} |
q257346 | grab | validation | def grab(bbox=None, childprocess=None, backend=None):
"""Copy the contents of the screen to PIL image memory.
:param bbox: optional bounding box (x1,y1,x2,y2)
:param childprocess: pyscreenshot can cause an error,
if it is used on more different virtual displays
and back-end is not in different process.
Some back-ends are always different processes: scrot, imagemagick
The default is False if the program was started inside IDLE,
otherwise it is True.
:param backend: back-end can be forced if set (examples:scrot, wx,..),
otherwise back-end is automatic
"""
if childprocess is None:
childprocess = childprocess_default_value()
return _grab(
to_file=False, childprocess=childprocess, backend=backend, bbox=bbox) | python | {
"resource": ""
} |
q257347 | backend_version | validation | def backend_version(backend, childprocess=None):
"""Back-end version.
:param backend: back-end (examples:scrot, wx,..)
:param childprocess: see :py:func:`grab`
:return: version as string
"""
if childprocess is None:
childprocess = childprocess_default_value()
if not childprocess:
return _backend_version(backend)
else:
return run_in_childprocess(_backend_version, None, backend) | python | {
"resource": ""
} |
q257348 | open | validation | def open(
config, mode="continue", zoom=None, bounds=None, single_input_file=None,
with_cache=False, debug=False
):
"""
Open a Mapchete process.
Parameters
----------
config : MapcheteConfig object, config dict or path to mapchete file
Mapchete process configuration
mode : string
* ``memory``: Generate process output on demand without reading
pre-existing data or writing new data.
* ``readonly``: Just read data without processing new data.
* ``continue``: (default) Don't overwrite existing output.
* ``overwrite``: Overwrite existing output.
zoom : list or integer
process zoom level or a pair of minimum and maximum zoom level
bounds : tuple
left, bottom, right, top process boundaries in output pyramid
single_input_file : string
single input file if supported by process
with_cache : bool
process output data cached in memory
Returns
-------
Mapchete
a Mapchete process object
"""
return Mapchete(
MapcheteConfig(
config, mode=mode, zoom=zoom, bounds=bounds,
single_input_file=single_input_file, debug=debug),
with_cache=with_cache) | python | {
"resource": ""
} |
q257349 | _get_zoom_level | validation | def _get_zoom_level(zoom, process):
"""Determine zoom levels."""
if zoom is None:
return reversed(process.config.zoom_levels)
if isinstance(zoom, int):
return [zoom]
elif len(zoom) == 2:
return reversed(range(min(zoom), max(zoom)+1))
elif len(zoom) == 1:
return zoom | python | {
"resource": ""
} |
q257350 | _process_worker | validation | def _process_worker(process, process_tile):
"""Worker function running the process."""
logger.debug((process_tile.id, "running on %s" % current_process().name))
# skip execution if overwrite is disabled and tile exists
if (
process.config.mode == "continue" and
process.config.output.tiles_exist(process_tile)
):
logger.debug((process_tile.id, "tile exists, skipping"))
return ProcessInfo(
tile=process_tile,
processed=False,
process_msg="output already exists",
written=False,
write_msg="nothing written"
)
# execute on process tile
else:
with Timer() as t:
try:
output = process.execute(process_tile, raise_nodata=True)
except MapcheteNodataTile:
output = None
processor_message = "processed in %s" % t
logger.debug((process_tile.id, processor_message))
writer_info = process.write(process_tile, output)
return ProcessInfo(
tile=process_tile,
processed=True,
process_msg=processor_message,
written=writer_info.written,
write_msg=writer_info.write_msg
) | python | {
"resource": ""
} |
q257351 | Mapchete.get_process_tiles | validation | def get_process_tiles(self, zoom=None):
"""
Yield process tiles.
Tiles intersecting with the input data bounding boxes as well as
process bounds, if provided, are considered process tiles. This is to
avoid iterating through empty tiles.
Parameters
----------
zoom : integer
zoom level process tiles should be returned from; if none is given,
return all process tiles
yields
------
BufferedTile objects
"""
if zoom or zoom == 0:
for tile in self.config.process_pyramid.tiles_from_geom(
self.config.area_at_zoom(zoom), zoom
):
yield tile
else:
for zoom in reversed(self.config.zoom_levels):
for tile in self.config.process_pyramid.tiles_from_geom(
self.config.area_at_zoom(zoom), zoom
):
yield tile | python | {
"resource": ""
} |
q257352 | Mapchete.batch_process | validation | def batch_process(
self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=1
):
"""
Process a large batch of tiles.
Parameters
----------
process : MapcheteProcess
process to be run
zoom : list or int
either single zoom level or list of minimum and maximum zoom level;
None processes all (default: None)
tile : tuple
zoom, row and column of tile to be processed (cannot be used with
zoom)
multi : int
number of workers (default: number of CPU cores)
max_chunksize : int
maximum number of process tiles to be queued for each worker;
(default: 1)
"""
list(self.batch_processor(zoom, tile, multi, max_chunksize)) | python | {
"resource": ""
} |
q257353 | Mapchete.batch_processor | validation | def batch_processor(
self, zoom=None, tile=None, multi=cpu_count(), max_chunksize=1
):
"""
Process a large batch of tiles and yield report messages per tile.
Parameters
----------
zoom : list or int
either single zoom level or list of minimum and maximum zoom level;
None processes all (default: None)
tile : tuple
zoom, row and column of tile to be processed (cannot be used with
zoom)
multi : int
number of workers (default: number of CPU cores)
max_chunksize : int
maximum number of process tiles to be queued for each worker;
(default: 1)
"""
if zoom and tile:
raise ValueError("use either zoom or tile")
# run single tile
if tile:
yield _run_on_single_tile(self, tile)
# run concurrently
elif multi > 1:
for process_info in _run_with_multiprocessing(
self, list(_get_zoom_level(zoom, self)), multi, max_chunksize
):
yield process_info
# run sequentially
elif multi == 1:
for process_info in _run_without_multiprocessing(
self, list(_get_zoom_level(zoom, self))
):
yield process_info | python | {
"resource": ""
} |
q257354 | Mapchete.execute | validation | def execute(self, process_tile, raise_nodata=False):
"""
Run the Mapchete process.
Execute, write and return data.
Parameters
----------
process_tile : Tile or tile index tuple
Member of the process tile pyramid (not necessarily the output
pyramid, if output has a different metatiling setting)
Returns
-------
data : NumPy array or features
process output
"""
if self.config.mode not in ["memory", "continue", "overwrite"]:
raise ValueError("process mode must be memory, continue or overwrite")
if isinstance(process_tile, tuple):
process_tile = self.config.process_pyramid.tile(*process_tile)
elif isinstance(process_tile, BufferedTile):
pass
else:
raise TypeError("process_tile must be tuple or BufferedTile")
if process_tile.zoom not in self.config.zoom_levels:
return self.config.output.empty(process_tile)
return self._execute(process_tile, raise_nodata=raise_nodata) | python | {
"resource": ""
} |
q257355 | Mapchete.read | validation | def read(self, output_tile):
"""
Read from written process output.
Parameters
----------
output_tile : BufferedTile or tile index tuple
Member of the output tile pyramid (not necessarily the process
pyramid, if output has a different metatiling setting)
Returns
-------
data : NumPy array or features
process output
"""
if self.config.mode not in ["readonly", "continue", "overwrite"]:
raise ValueError("process mode must be readonly, continue or overwrite")
if isinstance(output_tile, tuple):
output_tile = self.config.output_pyramid.tile(*output_tile)
elif isinstance(output_tile, BufferedTile):
pass
else:
raise TypeError("output_tile must be tuple or BufferedTile")
return self.config.output.read(output_tile) | python | {
"resource": ""
} |
q257356 | Mapchete.write | validation | def write(self, process_tile, data):
"""
Write data into output format.
Parameters
----------
process_tile : BufferedTile or tile index tuple
process tile
data : NumPy array or features
data to be written
"""
if isinstance(process_tile, tuple):
process_tile = self.config.process_pyramid.tile(*process_tile)
elif not isinstance(process_tile, BufferedTile):
raise ValueError("invalid process_tile type: %s" % type(process_tile))
if self.config.mode not in ["continue", "overwrite"]:
raise ValueError("cannot write output in current process mode")
if self.config.mode == "continue" and (
self.config.output.tiles_exist(process_tile)
):
message = "output exists, not overwritten"
logger.debug((process_tile.id, message))
return ProcessInfo(
tile=process_tile,
processed=False,
process_msg=None,
written=False,
write_msg=message
)
elif data is None:
message = "output empty, nothing written"
logger.debug((process_tile.id, message))
return ProcessInfo(
tile=process_tile,
processed=False,
process_msg=None,
written=False,
write_msg=message
)
else:
with Timer() as t:
self.config.output.write(process_tile=process_tile, data=data)
message = "output written in %s" % t
logger.debug((process_tile.id, message))
return ProcessInfo(
tile=process_tile,
processed=False,
process_msg=None,
written=True,
write_msg=message
) | python | {
"resource": ""
} |
q257357 | Mapchete.get_raw_output | validation | def get_raw_output(self, tile, _baselevel_readonly=False):
"""
Get output raw data.
This function won't work with multiprocessing, as it uses the
``threading.Lock()`` class.
Parameters
----------
tile : tuple, Tile or BufferedTile
If a tile index is given, a tile from the output pyramid will be
assumed. Tile cannot be bigger than process tile!
Returns
-------
data : NumPy array or features
process output
"""
if not isinstance(tile, (BufferedTile, tuple)):
raise TypeError("'tile' must be a tuple or BufferedTile")
if isinstance(tile, tuple):
tile = self.config.output_pyramid.tile(*tile)
if _baselevel_readonly:
tile = self.config.baselevels["tile_pyramid"].tile(*tile.id)
# Return empty data if zoom level is outside of process zoom levels.
if tile.zoom not in self.config.zoom_levels:
return self.config.output.empty(tile)
# TODO implement reprojection
if tile.crs != self.config.process_pyramid.crs:
raise NotImplementedError(
"reprojection between processes not yet implemented"
)
if self.config.mode == "memory":
# Determine affected process Tile and check whether it is already
# cached.
process_tile = self.config.process_pyramid.intersecting(tile)[0]
return self._extract(
in_tile=process_tile,
in_data=self._execute_using_cache(process_tile),
out_tile=tile
)
# TODO: cases where tile intersects with multiple process tiles
process_tile = self.config.process_pyramid.intersecting(tile)[0]
# get output_tiles that intersect with current tile
if tile.pixelbuffer > self.config.output.pixelbuffer:
output_tiles = list(self.config.output_pyramid.tiles_from_bounds(
tile.bounds, tile.zoom
))
else:
output_tiles = self.config.output_pyramid.intersecting(tile)
if self.config.mode == "readonly" or _baselevel_readonly:
if self.config.output.tiles_exist(process_tile):
return self._read_existing_output(tile, output_tiles)
else:
return self.config.output.empty(tile)
elif self.config.mode == "continue" and not _baselevel_readonly:
if self.config.output.tiles_exist(process_tile):
return self._read_existing_output(tile, output_tiles)
else:
return self._process_and_overwrite_output(tile, process_tile)
elif self.config.mode == "overwrite" and not _baselevel_readonly:
return self._process_and_overwrite_output(tile, process_tile) | python | {
"resource": ""
} |
q257358 | Mapchete._extract | validation | def _extract(self, in_tile=None, in_data=None, out_tile=None):
"""Extract data from tile."""
return self.config.output.extract_subset(
input_data_tiles=[(in_tile, in_data)],
out_tile=out_tile
) | python | {
"resource": ""
} |
q257359 | MapcheteProcess.read | validation | def read(self, **kwargs):
"""
Read existing output data from a previous run.
Returns
-------
process output : NumPy array (raster) or feature iterator (vector)
"""
if self.tile.pixelbuffer > self.config.output.pixelbuffer:
output_tiles = list(self.config.output_pyramid.tiles_from_bounds(
self.tile.bounds, self.tile.zoom
))
else:
output_tiles = self.config.output_pyramid.intersecting(self.tile)
return self.config.output.extract_subset(
input_data_tiles=[
(output_tile, self.config.output.read(output_tile))
for output_tile in output_tiles
],
out_tile=self.tile,
) | python | {
"resource": ""
} |
q257360 | MapcheteProcess.open | validation | def open(self, input_id, **kwargs):
"""
Open input data.
Parameters
----------
input_id : string
input identifier from configuration file or file path
kwargs : driver specific parameters (e.g. resampling)
Returns
-------
tiled input data : InputTile
reprojected input data within tile
"""
if not isinstance(input_id, str):
return input_id.open(self.tile, **kwargs)
if input_id not in self.params["input"]:
raise ValueError("%s not found in config as input file" % input_id)
return self.params["input"][input_id].open(self.tile, **kwargs) | python | {
"resource": ""
} |
q257361 | MapcheteProcess.hillshade | validation | def hillshade(
self, elevation, azimuth=315.0, altitude=45.0, z=1.0, scale=1.0
):
"""
Calculate hillshading from elevation data.
Parameters
----------
elevation : array
input elevation data
azimuth : float
horizontal angle of light source (315: North-West)
altitude : float
vertical angle of light source (90 would result in slope shading)
z : float
vertical exaggeration factor
scale : float
scale factor of pixel size units versus height units (insert 112000
when having elevation values in meters in a geodetic projection)
Returns
-------
hillshade : array
"""
return commons_hillshade.hillshade(
elevation, self, azimuth, altitude, z, scale) | python | {
"resource": ""
} |
q257362 | MapcheteProcess.contours | validation | def contours(
self, elevation, interval=100, field='elev', base=0
):
"""
Extract contour lines from elevation data.
Parameters
----------
elevation : array
input elevation data
interval : integer
elevation value interval when drawing contour lines
field : string
output field name containing elevation value
base : integer
elevation base value the intervals are computed from
Returns
-------
contours : iterable
contours as GeoJSON-like pairs of properties and geometry
"""
return commons_contours.extract_contours(
elevation, self.tile, interval=interval, field=field, base=base) | python | {
"resource": ""
} |
q257363 | MapcheteProcess.clip | validation | def clip(
self, array, geometries, inverted=False, clip_buffer=0
):
"""
Clip array by geometry.
Parameters
----------
array : array
raster data to be clipped
geometries : iterable
geometries used to clip source array
inverted : bool
invert clipping (default: False)
clip_buffer : int
buffer (in pixels) geometries before applying clip
Returns
-------
clipped array : array
"""
return commons_clip.clip_array_with_vector(
array, self.tile.affine, geometries,
inverted=inverted, clip_buffer=clip_buffer*self.tile.pixel_x_size) | python | {
"resource": ""
} |
q257364 | clip_array_with_vector | validation | def clip_array_with_vector(
array, array_affine, geometries, inverted=False, clip_buffer=0
):
"""
Clip input array with a vector list.
Parameters
----------
array : array
input raster data
array_affine : Affine
Affine object describing the raster's geolocation
geometries : iterable
iterable of dictionaries, where every entry has a 'geometry' and
'properties' key.
inverted : bool
invert clip (default: False)
clip_buffer : integer
buffer (in pixels) geometries before clipping
Returns
-------
clipped array : array
"""
# buffer input geometries and clean up
buffered_geometries = []
for feature in geometries:
feature_geom = to_shape(feature["geometry"])
if feature_geom.is_empty:
continue
if feature_geom.geom_type == "GeometryCollection":
# for GeometryCollections apply buffer to every subgeometry
# and make union
buffered_geom = unary_union([
g.buffer(clip_buffer) for g in feature_geom])
else:
buffered_geom = feature_geom.buffer(clip_buffer)
if not buffered_geom.is_empty:
buffered_geometries.append(buffered_geom)
# mask raster by buffered geometries
if buffered_geometries:
if array.ndim == 2:
return ma.masked_array(
array, geometry_mask(
buffered_geometries, array.shape, array_affine,
invert=inverted))
elif array.ndim == 3:
mask = geometry_mask(
buffered_geometries, (array.shape[1], array.shape[2]),
array_affine, invert=inverted)
return ma.masked_array(
array, mask=np.stack((mask for band in array)))
# if no geometries, return unmasked array
else:
fill = False if inverted else True
return ma.masked_array(
array, mask=np.full(array.shape, fill, dtype=bool)) | python | {
"resource": ""
} |
q257365 | pyramid | validation | def pyramid(
input_raster,
output_dir,
pyramid_type=None,
output_format=None,
resampling_method=None,
scale_method=None,
zoom=None,
bounds=None,
overwrite=False,
debug=False
):
"""Create tile pyramid out of input raster."""
bounds = bounds if bounds else None
options = dict(
pyramid_type=pyramid_type,
scale_method=scale_method,
output_format=output_format,
resampling=resampling_method,
zoom=zoom,
bounds=bounds,
overwrite=overwrite
)
raster2pyramid(input_raster, output_dir, options) | python | {
"resource": ""
} |
q257366 | raster2pyramid | validation | def raster2pyramid(input_file, output_dir, options):
"""Create a tile pyramid out of an input raster dataset."""
pyramid_type = options["pyramid_type"]
scale_method = options["scale_method"]
output_format = options["output_format"]
resampling = options["resampling"]
zoom = options["zoom"]
bounds = options["bounds"]
mode = "overwrite" if options["overwrite"] else "continue"
# Prepare process parameters
minzoom, maxzoom = _get_zoom(zoom, input_file, pyramid_type)
with rasterio.open(input_file, "r") as input_raster:
output_bands = input_raster.count
input_dtype = input_raster.dtypes[0]
output_dtype = input_raster.dtypes[0]
nodataval = input_raster.nodatavals[0]
nodataval = nodataval if nodataval else 0
if output_format == "PNG" and output_bands > 3:
output_bands = 3
output_dtype = 'uint8'
scales_minmax = ()
if scale_method == "dtype_scale":
for index in range(1, output_bands+1):
scales_minmax += (DTYPE_RANGES[input_dtype], )
elif scale_method == "minmax_scale":
for index in range(1, output_bands+1):
band = input_raster.read(index)
scales_minmax += ((band.min(), band.max()), )
elif scale_method == "crop":
for index in range(1, output_bands+1):
scales_minmax += ((0, 255), )
if input_dtype == "uint8":
scale_method = None
scales_minmax = ()
for index in range(1, output_bands+1):
scales_minmax += ((None, None), )
# Create configuration
config = dict(
process="mapchete.processes.pyramid.tilify",
output={
"path": output_dir,
"format": output_format,
"bands": output_bands,
"dtype": output_dtype
},
pyramid=dict(pixelbuffer=5, grid=pyramid_type),
scale_method=scale_method,
scales_minmax=scales_minmax,
input={"raster": input_file},
config_dir=os.getcwd(),
zoom_levels=dict(min=minzoom, max=maxzoom),
nodataval=nodataval,
resampling=resampling,
bounds=bounds,
baselevel={"zoom": maxzoom, "resampling": resampling},
mode=mode
)
# create process
with mapchete.open(config, zoom=zoom, bounds=bounds) as mp:
# prepare output directory
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# run process
mp.batch_process(zoom=[minzoom, maxzoom]) | python | {
"resource": ""
} |
q257367 | _get_zoom | validation | def _get_zoom(zoom, input_raster, pyramid_type):
"""Determine minimum and maximum zoomlevel."""
if not zoom:
minzoom = 1
maxzoom = get_best_zoom_level(input_raster, pyramid_type)
elif len(zoom) == 1:
minzoom = zoom[0]
maxzoom = zoom[0]
elif len(zoom) == 2:
if zoom[0] < zoom[1]:
minzoom = zoom[0]
maxzoom = zoom[1]
else:
minzoom = zoom[1]
maxzoom = zoom[0]
return minzoom, maxzoom | python | {
"resource": ""
} |
q257368 | validate_values | validation | def validate_values(config, values):
"""
Validate whether value is found in config and has the right type.
Parameters
----------
config : dict
configuration dictionary
values : list
list of (str, type) tuples of values and value types expected in config
Returns
-------
True if config is valid.
Raises
------
Exception if value is not found or has the wrong type.
"""
if not isinstance(config, dict):
raise TypeError("config must be a dictionary")
for value, vtype in values:
if value not in config:
raise ValueError("%s not given" % value)
if not isinstance(config[value], vtype):
raise TypeError("%s must be %s" % (value, vtype))
return True | python | {
"resource": ""
} |
q257369 | get_hash | validation | def get_hash(x):
"""Return hash of x."""
if isinstance(x, str):
return hash(x)
elif isinstance(x, dict):
return hash(yaml.dump(x)) | python | {
"resource": ""
} |
q257370 | get_zoom_levels | validation | def get_zoom_levels(process_zoom_levels=None, init_zoom_levels=None):
"""Validate and return zoom levels."""
process_zoom_levels = _validate_zooms(process_zoom_levels)
if init_zoom_levels is None:
return process_zoom_levels
else:
init_zoom_levels = _validate_zooms(init_zoom_levels)
if not set(init_zoom_levels).issubset(set(process_zoom_levels)):
raise MapcheteConfigError(
"init zooms must be a subset of process zoom")
return init_zoom_levels | python | {
"resource": ""
} |
q257371 | snap_bounds | validation | def snap_bounds(bounds=None, pyramid=None, zoom=None):
"""
Snaps bounds to tiles boundaries of specific zoom level.
Parameters
----------
bounds : bounds to be snapped
pyramid : TilePyramid
zoom : int
Returns
-------
Bounds(left, bottom, right, top)
"""
if not isinstance(bounds, (tuple, list)):
raise TypeError("bounds must be either a tuple or a list")
if len(bounds) != 4:
raise ValueError("bounds has to have exactly four values")
if not isinstance(pyramid, BufferedTilePyramid):
raise TypeError("pyramid has to be a BufferedTilePyramid")
bounds = Bounds(*bounds)
lb = pyramid.tile_from_xy(bounds.left, bounds.bottom, zoom, on_edge_use="rt").bounds
rt = pyramid.tile_from_xy(bounds.right, bounds.top, zoom, on_edge_use="lb").bounds
return Bounds(lb.left, lb.bottom, rt.right, rt.top) | python | {
"resource": ""
} |
q257372 | clip_bounds | validation | def clip_bounds(bounds=None, clip=None):
"""
Clips bounds by clip.
Parameters
----------
bounds : bounds to be clipped
clip : clip bounds
Returns
-------
Bounds(left, bottom, right, top)
"""
bounds = Bounds(*bounds)
clip = Bounds(*clip)
return Bounds(
max(bounds.left, clip.left),
max(bounds.bottom, clip.bottom),
min(bounds.right, clip.right),
min(bounds.top, clip.top)
) | python | {
"resource": ""
} |
q257373 | _validate_zooms | validation | def _validate_zooms(zooms):
"""
Return a list of zoom levels.
Following inputs are converted:
- int --> [int]
- dict{min, max} --> range(min, max + 1)
- [int] --> [int]
- [int, int] --> range(smaller int, bigger int + 1)
"""
if isinstance(zooms, dict):
if any([a not in zooms for a in ["min", "max"]]):
raise MapcheteConfigError("min and max zoom required")
zmin = _validate_zoom(zooms["min"])
zmax = _validate_zoom(zooms["max"])
if zmin > zmax:
raise MapcheteConfigError(
"max zoom must not be smaller than min zoom")
return list(range(zmin, zmax + 1))
elif isinstance(zooms, list):
if len(zooms) == 1:
return zooms
elif len(zooms) == 2:
zmin, zmax = sorted([_validate_zoom(z) for z in zooms])
return list(range(zmin, zmax + 1))
else:
return zooms
else:
return [_validate_zoom(zooms)] | python | {
"resource": ""
} |
q257374 | _raw_at_zoom | validation | def _raw_at_zoom(config, zooms):
"""Return parameter dictionary per zoom level."""
params_per_zoom = {}
for zoom in zooms:
params = {}
for name, element in config.items():
if name not in _RESERVED_PARAMETERS:
out_element = _element_at_zoom(name, element, zoom)
if out_element is not None:
params[name] = out_element
params_per_zoom[zoom] = params
return params_per_zoom | python | {
"resource": ""
} |
q257375 | _element_at_zoom | validation | def _element_at_zoom(name, element, zoom):
"""
Return the element filtered by zoom level.
- An input integer or float gets returned as is.
- An input string is checked whether it starts with "zoom". Then, the
provided zoom level gets parsed and compared with the actual zoom
level. If zoom levels match, the element gets returned.
TODOs/gotchas:
- Elements are unordered, which can lead to unexpected results when
defining the YAML config.
- Provided zoom levels for one element in config file are not allowed
to "overlap", i.e. there is not yet a decision mechanism implemented
which handles this case.
"""
# If element is a dictionary, analyze subitems.
if isinstance(element, dict):
if "format" in element:
# we have an input or output driver here
return element
out_elements = {}
for sub_name, sub_element in element.items():
out_element = _element_at_zoom(sub_name, sub_element, zoom)
if name == "input":
out_elements[sub_name] = out_element
elif out_element is not None:
out_elements[sub_name] = out_element
# If there is only one subelement, collapse unless it is
# input. In such case, return a dictionary.
if len(out_elements) == 1 and name != "input":
return next(iter(out_elements.values()))
# If subelement is empty, return None
if len(out_elements) == 0:
return None
return out_elements
# If element is a zoom level statement, filter element.
elif isinstance(name, str):
if name.startswith("zoom"):
return _filter_by_zoom(
conf_string=name.strip("zoom").strip(), zoom=zoom,
element=element)
# If element is a string but not a zoom level statement, return
# element.
else:
return element
# Return all other types as they are.
else:
return element | python | {
"resource": ""
} |
q257376 | _filter_by_zoom | validation | def _filter_by_zoom(element=None, conf_string=None, zoom=None):
"""Return element only if zoom condition matches with config string."""
for op_str, op_func in [
# order of operators is important:
# prematurely return in cases of "<=" or ">=", otherwise
# _strip_zoom() cannot parse config strings starting with "<"
# or ">"
("=", operator.eq),
("<=", operator.le),
(">=", operator.ge),
("<", operator.lt),
(">", operator.gt),
]:
if conf_string.startswith(op_str):
return element if op_func(zoom, _strip_zoom(conf_string, op_str)) else None | python | {
"resource": ""
} |
q257377 | _strip_zoom | validation | def _strip_zoom(input_string, strip_string):
"""Return zoom level as integer or throw error."""
try:
return int(input_string.strip(strip_string))
except Exception as e:
raise MapcheteConfigError("zoom level could not be determined: %s" % e) | python | {
"resource": ""
} |
q257378 | _flatten_tree | validation | def _flatten_tree(tree, old_path=None):
"""Flatten dict tree into dictionary where keys are paths of old dict."""
flat_tree = []
for key, value in tree.items():
new_path = "/".join([old_path, key]) if old_path else key
if isinstance(value, dict) and "format" not in value:
flat_tree.extend(_flatten_tree(value, old_path=new_path))
else:
flat_tree.append((new_path, value))
return flat_tree | python | {
"resource": ""
} |
q257379 | _unflatten_tree | validation | def _unflatten_tree(flat):
"""Reverse tree flattening."""
tree = {}
for key, value in flat.items():
path = key.split("/")
# we are at the end of a branch
if len(path) == 1:
tree[key] = value
# there are more branches
else:
# create new dict
if not path[0] in tree:
tree[path[0]] = _unflatten_tree({"/".join(path[1:]): value})
# add keys to existing dict
else:
branch = _unflatten_tree({"/".join(path[1:]): value})
if not path[1] in tree[path[0]]:
tree[path[0]][path[1]] = branch[path[1]]
else:
tree[path[0]][path[1]].update(branch[path[1]])
return tree | python | {
"resource": ""
} |
q257380 | MapcheteConfig.bounds | validation | def bounds(self):
"""Process bounds as defined in the configuration."""
if self._raw["bounds"] is None:
return self.process_pyramid.bounds
else:
return Bounds(*_validate_bounds(self._raw["bounds"])) | python | {
"resource": ""
} |
q257381 | MapcheteConfig.init_bounds | validation | def init_bounds(self):
"""
Process bounds this process is currently initialized with.
This gets triggered by using the ``init_bounds`` kwarg. If not set, it will
be equal to self.bounds.
"""
if self._raw["init_bounds"] is None:
return self.bounds
else:
return Bounds(*_validate_bounds(self._raw["init_bounds"])) | python | {
"resource": ""
} |
q257382 | MapcheteConfig.effective_bounds | validation | def effective_bounds(self):
"""
Effective process bounds required to initialize inputs.
Process bounds sometimes have to be larger, because all intersecting process
tiles have to be covered as well.
"""
return snap_bounds(
bounds=clip_bounds(bounds=self.init_bounds, clip=self.process_pyramid.bounds),
pyramid=self.process_pyramid,
zoom=min(
self.baselevels["zooms"]
) if self.baselevels else min(
self.init_zoom_levels
)
) | python | {
"resource": ""
} |
q257383 | MapcheteConfig.output | validation | def output(self):
"""Output object of driver."""
output_params = dict(
self._raw["output"],
grid=self.output_pyramid.grid,
pixelbuffer=self.output_pyramid.pixelbuffer,
metatiling=self.output_pyramid.metatiling
)
if "path" in output_params:
output_params.update(
path=absolute_path(path=output_params["path"], base_dir=self.config_dir)
)
if "format" not in output_params:
raise MapcheteConfigError("output format not specified")
if output_params["format"] not in available_output_formats():
raise MapcheteConfigError(
"format %s not available in %s" % (
output_params["format"], str(available_output_formats())
)
)
writer = load_output_writer(output_params)
try:
writer.is_valid_with_config(output_params)
except Exception as e:
logger.exception(e)
raise MapcheteConfigError(
"driver %s not compatible with configuration: %s" % (
writer.METADATA["driver_name"], e
)
)
return writer | python | {
"resource": ""
} |
q257384 | MapcheteConfig.input | validation | def input(self):
"""
Input items used for process stored in a dictionary.
Keys are the hashes of the input parameters, values the respective
InputData classes.
"""
# the delimiters are used by some input drivers
delimiters = dict(
zoom=self.init_zoom_levels,
bounds=self.init_bounds,
process_bounds=self.bounds,
effective_bounds=self.effective_bounds
)
# get input items only of initialized zoom levels
raw_inputs = {
# convert input definition to hash
get_hash(v): v
for zoom in self.init_zoom_levels
if "input" in self._params_at_zoom[zoom]
# to preserve file groups, "flatten" the input tree and use
# the tree paths as keys
for key, v in _flatten_tree(self._params_at_zoom[zoom]["input"])
if v is not None
}
initalized_inputs = {}
for k, v in raw_inputs.items():
# for files and tile directories
if isinstance(v, str):
logger.debug("load input reader for simple input %s", v)
try:
reader = load_input_reader(
dict(
path=absolute_path(path=v, base_dir=self.config_dir),
pyramid=self.process_pyramid,
pixelbuffer=self.process_pyramid.pixelbuffer,
delimiters=delimiters
),
readonly=self.mode == "readonly")
except Exception as e:
logger.exception(e)
raise MapcheteDriverError("error when loading input %s: %s" % (v, e))
logger.debug("input reader for simple input %s is %s", v, reader)
# for abstract inputs
elif isinstance(v, dict):
logger.debug("load input reader for abstract input %s", v)
try:
reader = load_input_reader(
dict(
abstract=deepcopy(v),
pyramid=self.process_pyramid,
pixelbuffer=self.process_pyramid.pixelbuffer,
delimiters=delimiters,
conf_dir=self.config_dir
),
readonly=self.mode == "readonly")
except Exception as e:
logger.exception(e)
raise MapcheteDriverError("error when loading input %s: %s" % (v, e))
logger.debug("input reader for abstract input %s is %s", v, reader)
else:
raise MapcheteConfigError("invalid input type %s", type(v))
# trigger bbox creation
reader.bbox(out_crs=self.process_pyramid.crs)
initalized_inputs[k] = reader
return initalized_inputs | python | {
"resource": ""
} |
q257385 | MapcheteConfig.baselevels | validation | def baselevels(self):
"""
Optional baselevels configuration.
baselevels:
min: <zoom>
max: <zoom>
lower: <resampling method>
higher: <resampling method>
"""
if "baselevels" not in self._raw:
return {}
baselevels = self._raw["baselevels"]
minmax = {k: v for k, v in baselevels.items() if k in ["min", "max"]}
if not minmax:
raise MapcheteConfigError("no min and max values given for baselevels")
for v in minmax.values():
if not isinstance(v, int) or v < 0:
raise MapcheteConfigError(
"invalid baselevel zoom parameter given: %s" % minmax.values()
)
zooms = list(range(
minmax.get("min", min(self.zoom_levels)),
minmax.get("max", max(self.zoom_levels)) + 1)
)
if not set(self.zoom_levels).difference(set(zooms)):
raise MapcheteConfigError("baselevels zooms fully cover process zooms")
return dict(
zooms=zooms,
lower=baselevels.get("lower", "nearest"),
higher=baselevels.get("higher", "nearest"),
tile_pyramid=BufferedTilePyramid(
self.output_pyramid.grid,
pixelbuffer=self.output_pyramid.pixelbuffer,
metatiling=self.process_pyramid.metatiling
)
) | python | {
"resource": ""
} |
q257386 | MapcheteConfig.params_at_zoom | validation | def params_at_zoom(self, zoom):
"""
Return configuration parameters snapshot for zoom as dictionary.
Parameters
----------
zoom : int
zoom level
Returns
-------
configuration snapshot : dictionary
zoom level dependent process configuration
"""
if zoom not in self.init_zoom_levels:
raise ValueError(
"zoom level not available with current configuration")
out = dict(self._params_at_zoom[zoom], input={}, output=self.output)
if "input" in self._params_at_zoom[zoom]:
flat_inputs = {}
for k, v in _flatten_tree(self._params_at_zoom[zoom]["input"]):
if v is None:
flat_inputs[k] = None
else:
flat_inputs[k] = self.input[get_hash(v)]
out["input"] = _unflatten_tree(flat_inputs)
else:
out["input"] = {}
return out | python | {
"resource": ""
} |
q257387 | MapcheteConfig.area_at_zoom | validation | def area_at_zoom(self, zoom=None):
"""
Return process bounding box for zoom level.
Parameters
----------
zoom : int or None
if None, the union of all zoom level areas is returned
Returns
-------
process area : shapely geometry
"""
if zoom is None:
if not self._cache_full_process_area:
logger.debug("calculate process area ...")
self._cache_full_process_area = cascaded_union([
self._area_at_zoom(z) for z in self.init_zoom_levels]
).buffer(0)
return self._cache_full_process_area
else:
if zoom not in self.init_zoom_levels:
raise ValueError(
"zoom level not available with current configuration")
return self._area_at_zoom(zoom) | python | {
"resource": ""
} |
q257388 | MapcheteConfig.bounds_at_zoom | validation | def bounds_at_zoom(self, zoom=None):
"""
Return process bounds for zoom level.
Parameters
----------
zoom : integer or list
Returns
-------
process bounds : tuple
left, bottom, right, top
"""
return () if self.area_at_zoom(zoom).is_empty else Bounds(
*self.area_at_zoom(zoom).bounds) | python | {
"resource": ""
} |
q257389 | zoom_index_gen | validation | def zoom_index_gen(
mp=None,
out_dir=None,
zoom=None,
geojson=False,
gpkg=False,
shapefile=False,
txt=False,
vrt=False,
fieldname="location",
basepath=None,
for_gdal=True,
threading=False,
):
"""
Generate indexes for given zoom level.
Parameters
----------
mp : Mapchete object
process output to be indexed
out_dir : path
optionally override process output directory
zoom : int
zoom level to be processed
geojson : bool
generate GeoJSON index (default: False)
gpkg : bool
generate GeoPackage index (default: False)
shapefile : bool
generate Shapefile index (default: False)
txt : bool
generate tile path list textfile (default: False)
vrt : bool
GDAL-style VRT file (default: False)
fieldname : str
field name which contains paths of tiles (default: "location")
basepath : str
if set, use custom base path instead of output path
for_gdal : bool
use GDAL compatible remote paths, i.e. add "/vsicurl/" before path
(default: True)
"""
for zoom in get_zoom_levels(process_zoom_levels=zoom):
with ExitStack() as es:
# get index writers for all enabled formats
index_writers = []
if geojson:
index_writers.append(
es.enter_context(
VectorFileWriter(
driver="GeoJSON",
out_path=_index_file_path(out_dir, zoom, "geojson"),
crs=mp.config.output_pyramid.crs,
fieldname=fieldname
)
)
)
if gpkg:
index_writers.append(
es.enter_context(
VectorFileWriter(
driver="GPKG",
out_path=_index_file_path(out_dir, zoom, "gpkg"),
crs=mp.config.output_pyramid.crs,
fieldname=fieldname
)
)
)
if shapefile:
index_writers.append(
es.enter_context(
VectorFileWriter(
driver="ESRI Shapefile",
out_path=_index_file_path(out_dir, zoom, "shp"),
crs=mp.config.output_pyramid.crs,
fieldname=fieldname
)
)
)
if txt:
index_writers.append(
es.enter_context(
TextFileWriter(out_path=_index_file_path(out_dir, zoom, "txt"))
)
)
if vrt:
index_writers.append(
es.enter_context(
VRTFileWriter(
out_path=_index_file_path(out_dir, zoom, "vrt"),
output=mp.config.output,
out_pyramid=mp.config.output_pyramid
)
)
)
logger.debug("use the following index writers: %s", index_writers)
def _worker(tile):
# if there are indexes to write to, check if output exists
tile_path = _tile_path(
orig_path=mp.config.output.get_path(tile),
basepath=basepath,
for_gdal=for_gdal
)
indexes = [
i for i in index_writers
if not i.entry_exists(tile=tile, path=tile_path)
]
if indexes:
output_exists = mp.config.output.tiles_exist(output_tile=tile)
else:
output_exists = None
return tile, tile_path, indexes, output_exists
with concurrent.futures.ThreadPoolExecutor() as executor:
for task in concurrent.futures.as_completed(
(
executor.submit(_worker, i)
for i in mp.config.output_pyramid.tiles_from_geom(
mp.config.area_at_zoom(zoom), zoom
)
)
):
tile, tile_path, indexes, output_exists = task.result()
# only write entries if there are indexes to write to and output
# exists
if indexes and output_exists:
logger.debug("%s exists", tile_path)
logger.debug("write to %s indexes" % len(indexes))
for index in indexes:
index.write(tile, tile_path)
# yield tile for progress information
yield tile | python | {
"resource": ""
} |
q257390 | InputData.profile | validation | def profile(self):
"""Return raster metadata."""
with rasterio.open(self.path, "r") as src:
return deepcopy(src.meta) | python | {
"resource": ""
} |
q257391 | execute | validation | def execute(mp):
"""
Example process for testing.
Inputs:
-------
file1
raster file
Parameters:
-----------
Output:
-------
np.ndarray
"""
# Reading and writing data works like this:
with mp.open("file1", resampling="bilinear") as raster_file:
if raster_file.is_empty():
return "empty"
# This assures a transparent tile instead of a pink error tile
# is returned when using mapchete serve.
dem = raster_file.read()
return dem | python | {
"resource": ""
} |
q257392 | OutputData.is_valid_with_config | validation | def is_valid_with_config(self, config):
"""
Check if output format is valid with other process parameters.
Parameters
----------
config : dictionary
output configuration parameters
Returns
-------
is_valid : bool
"""
validate_values(config, [("schema", dict), ("path", str)])
validate_values(config["schema"], [("properties", dict), ("geometry", str)])
if config["schema"]["geometry"] not in [
"Geometry", "Point", "MultiPoint", "Line", "MultiLine",
"Polygon", "MultiPolygon"
]:
raise TypeError("invalid geometry type")
return True | python | {
"resource": ""
} |
q257393 | InputTile.read | validation | def read(self, validity_check=True, no_neighbors=False, **kwargs):
"""
Read data from process output.
Parameters
----------
validity_check : bool
run geometry validity check (default: True)
no_neighbors : bool
don't include neighbor tiles if there is a pixelbuffer (default:
False)
Returns
-------
features : list
GeoJSON-like list of features
"""
if no_neighbors:
raise NotImplementedError()
return self._from_cache(validity_check=validity_check) | python | {
"resource": ""
} |
q257394 | available_output_formats | validation | def available_output_formats():
"""
Return all available output formats.
Returns
-------
formats : list
all available output formats
"""
output_formats = []
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
driver_ = v.load()
if hasattr(driver_, "METADATA") and (
driver_.METADATA["mode"] in ["w", "rw"]
):
output_formats.append(driver_.METADATA["driver_name"])
return output_formats | python | {
"resource": ""
} |
q257395 | available_input_formats | validation | def available_input_formats():
"""
Return all available input formats.
Returns
-------
formats : list
all available input formats
"""
input_formats = []
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
logger.debug("driver found: %s", v)
driver_ = v.load()
if hasattr(driver_, "METADATA") and (driver_.METADATA["mode"] in ["r", "rw"]):
input_formats.append(driver_.METADATA["driver_name"])
return input_formats | python | {
"resource": ""
} |
q257396 | load_output_writer | validation | def load_output_writer(output_params, readonly=False):
"""
Return output class of driver.
Returns
-------
output : ``OutputData``
output writer object
"""
if not isinstance(output_params, dict):
raise TypeError("output_params must be a dictionary")
driver_name = output_params["format"]
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
_driver = v.load()
if all(
[hasattr(_driver, attr) for attr in ["OutputData", "METADATA"]]
) and (
_driver.METADATA["driver_name"] == driver_name
):
return _driver.OutputData(output_params, readonly=readonly)
raise MapcheteDriverError("no loader for driver '%s' could be found." % driver_name) | python | {
"resource": ""
} |
q257397 | load_input_reader | validation | def load_input_reader(input_params, readonly=False):
"""
Return input class of driver.
Returns
-------
input_params : ``InputData``
input parameters
"""
logger.debug("find input reader with params %s", input_params)
if not isinstance(input_params, dict):
raise TypeError("input_params must be a dictionary")
if "abstract" in input_params:
driver_name = input_params["abstract"]["format"]
elif "path" in input_params:
if os.path.splitext(input_params["path"])[1]:
input_file = input_params["path"]
driver_name = driver_from_file(input_file)
else:
logger.debug("%s is a directory", input_params["path"])
driver_name = "TileDirectory"
else:
raise MapcheteDriverError("invalid input parameters %s" % input_params)
for v in pkg_resources.iter_entry_points(DRIVERS_ENTRY_POINT):
driver_ = v.load()
if hasattr(driver_, "METADATA") and (
driver_.METADATA["driver_name"] == driver_name
):
return v.load().InputData(input_params, readonly=readonly)
raise MapcheteDriverError("no loader for driver '%s' could be found." % driver_name) | python | {
"resource": ""
} |
q257398 | driver_from_file | validation | def driver_from_file(input_file):
"""
Guess driver from file extension.
Returns
-------
driver : string
driver name
"""
file_ext = os.path.splitext(input_file)[1].split(".")[1]
if file_ext not in _file_ext_to_driver():
raise MapcheteDriverError(
"no driver could be found for file extension %s" % file_ext
)
driver = _file_ext_to_driver()[file_ext]
if len(driver) > 1:
warnings.warn(
DeprecationWarning(
"more than one driver for file found, taking %s" % driver[0]
)
)
return driver[0] | python | {
"resource": ""
} |
q257399 | write_output_metadata | validation | def write_output_metadata(output_params):
"""Dump output JSON and verify parameters if output metadata exist."""
if "path" in output_params:
metadata_path = os.path.join(output_params["path"], "metadata.json")
logger.debug("check for output %s", metadata_path)
try:
existing_params = read_output_metadata(metadata_path)
logger.debug("%s exists", metadata_path)
logger.debug("existing output parameters: %s", pformat(existing_params))
existing_tp = existing_params["pyramid"]
current_params = params_to_dump(output_params)
logger.debug("current output parameters: %s", pformat(current_params))
current_tp = BufferedTilePyramid(**current_params["pyramid"])
if existing_tp != current_tp:
raise MapcheteConfigError(
"pyramid definitions between existing and new output do not match: "
"%s != %s" % (existing_tp, current_tp)
)
existing_format = existing_params["driver"]["format"]
current_format = current_params["driver"]["format"]
if existing_format != current_format:
raise MapcheteConfigError(
"existing output format does not match new output format: "
"%s != %s" % (
(existing_format, current_format)
)
)
except FileNotFoundError:
logger.debug("%s does not exist", metadata_path)
dump_params = params_to_dump(output_params)
# dump output metadata
write_json(metadata_path, dump_params)
else:
logger.debug("no path parameter found") | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.