repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mozilla/inventory | mozdns/soa/models.py | 2 | 9413 | from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
from django.db.models import Q, F
from mozdns.mixins import ObjectUrlMixin, DisplayMixin
from mozdns.validation import validate_ttl, validate_soa_serial, validate_name
from settings import MOZDNS_BASE_URL
from core.keyvalue.models import KeyValue
from core.keyvalue.utils import AuxAttr
from core.task.models import Task
import reversion
from gettext import gettext as _
from string import Template
import datetime
# TODO, put these defaults in a config file.
ONE_WEEK = 604800
DEFAULT_EXPIRE = ONE_WEEK * 2
DEFAULT_RETRY = 3 * 60 # 3 min
DEFAULT_REFRESH = 180 # 3 min
DEFAULT_MINIMUM = 180 # 3 min
class SOA(models.Model, ObjectUrlMixin, DisplayMixin):
"""
SOA stands for Start of Authority
"An SOA record is required in each *db.DOMAIN* and *db.ADDR* file."
-- O'Reilly DNS and BIND
The structure of an SOA::
<name> [<ttl>] [<class>] SOA <origin> <person> (
<serial>
<refresh>
<retry>
<expire>
<minimum> )
>>> SOA(primary=primary, contact=contact, retry=retry, # noqa
... refresh=refresh, description=description) # noqa
Each DNS zone must have it's own SOA object. Use the description field to
remind yourself which zone an SOA corresponds to if different SOA's have a
similar ``primary`` and ``contact`` value.
"""
id = models.AutoField(primary_key=True)
ttl = models.PositiveIntegerField(
default=3600, blank=True, null=True, validators=[validate_ttl],
help_text='Time to Live of this record'
)
primary = models.CharField(max_length=100, validators=[validate_name])
contact = models.CharField(max_length=100, validators=[validate_name])
serial = models.PositiveIntegerField(
null=False, default=int(datetime.datetime.now().strftime('%Y%m%d01')),
validators=[validate_soa_serial]
)
# Indicates when the zone data is no longer authoritative. Used by slave.
expire = models.PositiveIntegerField(null=False, default=DEFAULT_EXPIRE)
# The time between retries if a slave fails to contact the master
# when refresh (below) has expired.
retry = models.PositiveIntegerField(null=False, default=DEFAULT_RETRY)
# The time when the slave will try to refresh the zone from the master
refresh = models.PositiveIntegerField(null=False, default=DEFAULT_REFRESH)
minimum = models.PositiveIntegerField(null=False, default=DEFAULT_MINIMUM)
description = models.CharField(max_length=200, null=True, blank=True)
# This indicates if this SOA's zone needs to be rebuilt
dirty = models.BooleanField(default=False)
is_signed = models.BooleanField(default=False)
search_fields = ('description',)
template = _("{root_domain}. {ttl_} {rdclass:$rdclass_just} "
"{rdtype:$rdtype_just} {primary}. {contact}. "
"({serial} {refresh} {retry} {expire})")
attrs = None
class Meta:
db_table = 'soa'
# We are using the description field here to stop the same SOA from
# being assigned to multiple zones. See the documentation in the
# Domain models.py file for more info.
unique_together = ('primary', 'contact', 'description')
@classmethod
def calc_serial(cls, cur_serial, date):
"""
"The convention is to use a date based sn (serial) value to
simplify the task of incrementing the sn - the most popular
convention being yyyymmddss where yyyy = year, mm = month and dd =
day ss = a sequence number in case you update it more than once in
the day! Using this date format convention the value 2005021002
indicates the last update was on the 10th February 2005 and it was
the third update that day. The date format is just a convention,
not a requirement, so BIND (or any other DNS software) will not
validate the contents of this field."
-- http://www.zytrax.com/books/dns/ch8/soa.html
Calculate the correct serial given that today is YYYYMMDD and the
current serial is a 10 digit number.
Cases:
cur_serial isn't a date -> +1 serial. Never go backwards.
cur_serial > date -> +1 serial. Never go backwards.
cur_serial == date -> +1 serial
cur_serial < date -> now_date_stamp + '00'
Everything comes in as string and leaves as an int
:param date: A date
:type date: datetime.date object
:param cur_serial: The current 10 digit serial number
:type cur_serial: str
"""
date_serial = int(date.strftime('%Y%m%d00'))
if int(cur_serial) < date_serial:
return date_serial
else:
return int(cur_serial) + 1
@property
def rdtype(self):
return 'SOA'
@property
def root_domain(self):
try:
return self.domain_set.get(
~Q(master_domain__soa=F('soa')), soa__isnull=False
)
except ObjectDoesNotExist:
return None
def get_incremented_serial(self):
return self.__class__.calc_serial(
str(self.serial), datetime.date.today()
)
def bind_render_record(self, show_ttl=False):
template = Template(self.template).substitute(**self.justs)
if show_ttl:
ttl_ = self.ttl
else:
ttl_ = '' if self.ttl is None else self.ttl
return template.format(
root_domain=self.root_domain, rdtype=self.rdtype, rdclass='IN',
ttl_=ttl_, **vars(self)
)
def update_attrs(self):
self.attrs = AuxAttr(SOAKeyValue, self, 'soa')
def details(self):
return (
('Primary', self.primary),
('Contact', self.contact),
('Serial', self.serial),
('Expire', self.expire),
('Retry', self.retry),
('Refresh', self.refresh),
('Description', self.description),
)
def get_debug_build_url(self):
return MOZDNS_BASE_URL + '/bind/build_debug/{0}/'.format(self.pk)
def get_fancy_edit_url(self):
return '/mozdns/soa/{0}/update'.format(self.pk)
def delete(self, *args, **kwargs):
if self.domain_set.exists():
raise ValidationError(
"Domains exist in this SOA's zone. Delete "
"those domains or remove them from this zone before "
"deleting this SOA.")
Task.schedule_all_dns_rebuild(self)
super(SOA, self).delete(*args, **kwargs)
def has_record_set(self, view=None, exclude_ns=False):
for domain in self.domain_set.all():
if domain.has_record_set(view=view, exclude_ns=exclude_ns):
return True
return False
def schedule_rebuild(self, commit=True):
Task.schedule_zone_rebuild(self)
self.dirty = True
if commit:
self.save()
def schedule_full_rebuild(self, commit=True):
Task.schedule_all_dns_rebuild(self)
self.dirty = True
if commit:
self.save()
def save(self, *args, **kwargs):
self.full_clean()
if not self.pk:
new = True
self.dirty = True
elif self.dirty:
new = False
else:
new = False
db_self = SOA.objects.get(pk=self.pk)
fields = [field.name for field in self.__class__._meta.fields]
# Introspec and determine if we need to rebuild
for field in fields:
# Leave out serial and dirty so rebuilds don't cause a never
# ending build cycle
if field in ('serial', 'dirty'):
continue
if getattr(db_self, field) != getattr(self, field):
self.schedule_rebuild(commit=False)
super(SOA, self).save(*args, **kwargs)
if new:
# Need to call this after save because new objects won't have a pk
self.schedule_full_rebuild(commit=False)
def __str__(self):
return self.description
def __repr__(self):
return "<SOA '{0}'>".format(self)
reversion.register(SOA)
class SOAKeyValue(KeyValue):
obj = models.ForeignKey(SOA, related_name='keyvalue_set', null=False)
def _aa_disabled(self):
"""
Disabled - The Value of this Key determines whether or not an SOA will
be asked to build a zone file. Values that represent true are 'True,
TRUE, true, 1' and 'yes'. Values that represent false are 'False,
FALSE, false, 0' and 'no'.
"""
true_values = ["true", "1", "yes"]
false_values = ["false", "0", "no"]
if self.value.lower() in true_values:
self.value = "True"
elif self.value.lower() in false_values:
self.value = "False"
else:
raise ValidationError("Disabled should be set to either {0} OR "
"{1}".format(", ".join(true_values),
", ".join(false_values)))
reversion.register(SOAKeyValue)
| bsd-3-clause | 82dc659a9130e4d18c31b76b3b376d4a | 34.790875 | 78 | 0.598109 | 4.069607 | false | false | false | false |
mozilla/inventory | vendor-local/src/django-extensions/django_extensions/management/commands/runserver_plus.py | 16 | 4895 | from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
import os
import sys
try:
from django.contrib.staticfiles.handlers import StaticFilesHandler
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
except ImportError, e:
USE_STATICFILES = False
def null_technical_500_response(request, exc_type, exc_value, tb):
raise exc_type, exc_value, tb
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.'),
make_option('--browser', action='store_true', dest='open_browser',
help='Tells Django to open a browser.'),
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.'),
make_option('--threaded', action='store_true', dest='threaded',
help='Run in multithreaded mode.'),
)
if USE_STATICFILES:
option_list += (
make_option('--nostatic', action="store_false", dest='use_static_handler', default=True,
help='Tells Django to NOT automatically serve static files at STATIC_URL.'),
make_option('--insecure', action="store_true", dest='insecure_serving', default=False,
help='Allows serving static files even if DEBUG is False.'),
)
help = "Starts a lightweight Web server for development."
args = '[optional port number, or ipaddr:port]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport='', *args, **options):
import django
from django.core.servers.basehttp import run, WSGIServerException
try:
from django.core.servers.basehttp import AdminMediaHandler
USE_ADMINMEDIAHANDLER = True
except ImportError:
USE_ADMINMEDIAHANDLER = False
from django.core.handlers.wsgi import WSGIHandler
try:
from werkzeug import run_simple, DebuggedApplication
except ImportError:
raise CommandError("Werkzeug is required to use runserver_plus. Please visit http://werkzeug.pocoo.org/ or install via pip. (pip install Werkzeug)")
# usurp django's handler
from django.views import debug
debug.technical_500_response = null_technical_500_response
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
threaded = options.get('threaded', False)
use_reloader = options.get('use_reloader', True)
open_browser = options.get('open_browser', False)
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
print "Validating models..."
self.validate(display_num_errors=True)
print "\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE)
print "Development server is running at http://%s:%s/" % (addr, port)
print "Using the Werkzeug debugger (http://werkzeug.pocoo.org/)"
print "Quit the server with %s." % quit_command
path = options.get('admin_media_path', '')
if not path:
admin_media_path = os.path.join(django.__path__[0], 'contrib/admin/static/admin')
if os.path.isdir(admin_media_path):
path = admin_media_path
else:
path = os.path.join(django.__path__[0], 'contrib/admin/media')
handler = WSGIHandler()
if USE_ADMINMEDIAHANDLER:
handler = AdminMediaHandler(handler, path)
if USE_STATICFILES:
use_static_handler = options.get('use_static_handler', True)
insecure_serving = options.get('insecure_serving', False)
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
if open_browser:
import webbrowser
url = "http://%s:%s/" % (addr, port)
webbrowser.open(url)
run_simple(addr, int(port), DebuggedApplication(handler, True),
use_reloader=use_reloader, use_debugger=True, threaded=threaded)
inner_run()
| bsd-3-clause | 3a5174b7087dd57601902b7fb70bf312 | 43.908257 | 161 | 0.60858 | 4.366637 | false | false | false | false |
mozilla/inventory | core/site/views.py | 2 | 1480 | from django.shortcuts import get_object_or_404
from django.shortcuts import render
from core.site.models import Site
from core.site.forms import SiteForm
from core.site.utils import get_vlans
from core.views import CoreDeleteView, CoreListView
from core.views import CoreCreateView, CoreUpdateView
class SiteView(object):
model = Site
queryset = Site.objects.all().order_by('name')
form_class = SiteForm
class SiteDeleteView(SiteView, CoreDeleteView):
success_url = '/core/site/'
def delete_site(request, site_pk):
get_object_or_404(Site, pk=site_pk)
if request.method == 'POST':
return render(request, 'site/site_confirm_delete.html')
else:
return render(request, 'site/site_confirm_delete.html')
class SiteListView(SiteView, CoreListView):
template_name = 'site/site_list.html'
class SiteCreateView(SiteView, CoreCreateView):
template_name = 'core/core_form.html'
class SiteUpdateView(SiteView, CoreUpdateView):
template_name = 'site/site_edit.html'
def site_detail(request, site_pk):
from systems.models import SystemStatus
# TODO, make this a top level import when SystemStatus is in it's own app
site = get_object_or_404(Site, pk=site_pk)
return render(request, 'site/site_detail.html', {
'site': site,
'vlans': get_vlans(site),
'child_sites': site.site_set.all(),
'attrs': site.keyvalue_set.all(),
'statuses': SystemStatus.objects.all()
})
| bsd-3-clause | bcbf5b02d6c56f4a85adbdfd873f05ae | 26.407407 | 77 | 0.703378 | 3.490566 | false | false | false | false |
mozilla/inventory | vendor-local/src/django-piston/piston/oauth.py | 24 | 23013 | """
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occured.'):
self.message = message
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
verifier = self._get_verifier(oauth_request)
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key | bsd-3-clause | a4d95a65fe905496e369a063c0491194 | 34.406154 | 86 | 0.616087 | 4.345355 | false | false | false | false |
mozilla/inventory | vendor-local/src/django-tastypie/tests/core/tests/authorization.py | 14 | 5561 | from django.test import TestCase
from django.http import HttpRequest
from django.contrib.auth.models import User, Permission
from core.models import Note
from tastypie.authorization import Authorization, ReadOnlyAuthorization, DjangoAuthorization
from tastypie import fields
from tastypie.resources import Resource, ModelResource
class NoRulesNoteResource(ModelResource):
class Meta:
resource_name = 'notes'
queryset = Note.objects.filter(is_active=True)
authorization = Authorization()
class ReadOnlyNoteResource(ModelResource):
class Meta:
resource_name = 'notes'
queryset = Note.objects.filter(is_active=True)
authorization = ReadOnlyAuthorization()
class DjangoNoteResource(ModelResource):
class Meta:
resource_name = 'notes'
queryset = Note.objects.filter(is_active=True)
authorization = DjangoAuthorization()
class NotAModel(object):
name = 'Foo'
class NotAModelResource(Resource):
name = fields.CharField(attribute='name')
class Meta:
resource_name = 'notamodel'
object_class = NotAModel
authorization = DjangoAuthorization()
class AuthorizationTestCase(TestCase):
fixtures = ['note_testdata']
def test_no_rules(self):
request = HttpRequest()
for method in ('GET', 'POST', 'PUT', 'DELETE'):
request.method = method
self.assertTrue(NoRulesNoteResource()._meta.authorization.is_authorized(request))
def test_read_only(self):
request = HttpRequest()
request.method = 'GET'
self.assertTrue(ReadOnlyNoteResource()._meta.authorization.is_authorized(request))
for method in ('POST', 'PUT', 'DELETE'):
request = HttpRequest()
request.method = method
self.assertFalse(ReadOnlyNoteResource()._meta.authorization.is_authorized(request))
class DjangoAuthorizationTestCase(TestCase):
fixtures = ['note_testdata']
def setUp(self):
self.add = Permission.objects.get_by_natural_key('add_note', 'core', 'note')
self.change = Permission.objects.get_by_natural_key('change_note', 'core', 'note')
self.delete = Permission.objects.get_by_natural_key('delete_note', 'core', 'note')
self.user = User.objects.all()[0]
self.user.user_permissions.clear()
def test_no_perms(self):
# sanity check: user has no permissions
self.assertFalse(self.user.get_all_permissions())
request = HttpRequest()
request.method = 'GET'
request.user = self.user
# with no permissions, api is read-only
self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request))
for method in ('POST', 'PUT', 'DELETE'):
request.method = method
self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request))
def test_add_perm(self):
request = HttpRequest()
request.user = self.user
# give add permission
request.user.user_permissions.add(self.add)
request.method = 'POST'
self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request))
def test_change_perm(self):
request = HttpRequest()
request.user = self.user
# give change permission
request.user.user_permissions.add(self.change)
request.method = 'PUT'
self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request))
def test_delete_perm(self):
request = HttpRequest()
request.user = self.user
# give delete permission
request.user.user_permissions.add(self.delete)
request.method = 'DELETE'
self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request))
def test_all(self):
request = HttpRequest()
request.user = self.user
request.user.user_permissions.add(self.add)
request.user.user_permissions.add(self.change)
request.user.user_permissions.add(self.delete)
for method in ('GET', 'OPTIONS', 'HEAD', 'POST', 'PUT', 'DELETE', 'PATCH'):
request.method = method
self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request))
def test_not_a_model(self):
request = HttpRequest()
request.user = self.user
# give add permission
request.user.user_permissions.add(self.add)
request.method = 'POST'
self.assertTrue(NotAModelResource()._meta.authorization.is_authorized(request))
def test_patch_perms(self):
request = HttpRequest()
request.user = self.user
request.method = 'PATCH'
# Not enough.
request.user.user_permissions.add(self.add)
self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request))
# Still not enough.
request.user.user_permissions.add(self.change)
self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request))
# Much better.
request.user.user_permissions.add(self.delete)
# Nuke the perm cache. :/
del request.user._perm_cache
self.assertTrue(DjangoNoteResource()._meta.authorization.is_authorized(request))
def test_unrecognized_method(self):
request = HttpRequest()
request.user = self.user
# Check a non-existent HTTP method.
request.method = 'EXPLODE'
self.assertFalse(DjangoNoteResource()._meta.authorization.is_authorized(request))
| bsd-3-clause | b12e023b71dd592e428f55d2b63acdaa | 33.75625 | 95 | 0.664449 | 4.2418 | false | true | false | false |
mozilla/inventory | vendor-local/src/django-extensions/django_extensions/utils/dia2django.py | 44 | 10246 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
##Author Igor Támara igor@tamarapatino.org
##Use this little program as you wish, if you
#include it in your work, let others know you
#are using it preserving this note, you have
#the right to make derivative works, Use it
#at your own risk.
#Tested to work on(etch testing 13-08-2007):
# Python 2.4.4 (#2, Jul 17 2007, 11:56:54)
# [GCC 4.1.3 20070629 (prerelease) (Debian 4.1.2-13)] on linux2
dependclasses = ["User", "Group", "Permission", "Message"]
import codecs
import sys
import gzip
from xml.dom.minidom import *
import re
#Type dictionary translation types SQL -> Django
tsd = {
"text": "TextField",
"date": "DateField",
"varchar": "CharField",
"int": "IntegerField",
"float": "FloatField",
"serial": "AutoField",
"boolean": "BooleanField",
"numeric": "FloatField",
"timestamp": "DateTimeField",
"bigint": "IntegerField",
"datetime": "DateTimeField",
"date": "DateField",
"time": "TimeField",
"bool": "BooleanField",
"int": "IntegerField",
}
#convert varchar -> CharField
v2c = re.compile('varchar\((\d+)\)')
def index(fks, id):
"""Looks for the id on fks, fks is an array of arrays, each array has on [1]
the id of the class in a dia diagram. When not present returns None, else
it returns the position of the class with id on fks"""
for i, j in fks.items():
if fks[i][1] == id:
return i
return None
def addparentstofks(rels, fks):
"""Gets a list of relations, between parents and sons and a dict of
clases named in dia, and modifies the fks to add the parent as fk to get
order on the output of classes and replaces the base class of the son, to
put the class parent name.
"""
for j in rels:
son = index(fks, j[1])
parent = index(fks, j[0])
fks[son][2] = fks[son][2].replace("models.Model", parent)
if parent not in fks[son][0]:
fks[son][0].append(parent)
def dia2django(archivo):
models_txt = ''
f = codecs.open(archivo, "rb")
#dia files are gzipped
data = gzip.GzipFile(fileobj=f).read()
ppal = parseString(data)
#diagram -> layer -> object -> UML - Class -> name, (attribs : composite -> name,type)
datos = ppal.getElementsByTagName("dia:diagram")[0].getElementsByTagName("dia:layer")[0].getElementsByTagName("dia:object")
clases = {}
herit = []
imports = u""
for i in datos:
#Look for the classes
if i.getAttribute("type") == "UML - Class":
myid = i.getAttribute("id")
for j in i.childNodes:
if j.nodeType == Node.ELEMENT_NODE and j.hasAttributes():
if j.getAttribute("name") == "name":
actclas = j.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
myname = "\nclass %s(models.Model) :\n" % actclas
clases[actclas] = [[], myid, myname, 0]
if j.getAttribute("name") == "attributes":
for l in j.getElementsByTagName("dia:composite"):
if l.getAttribute("type") == "umlattribute":
#Look for the attribute name and type
for k in l.getElementsByTagName("dia:attribute"):
if k.getAttribute("name") == "name":
nc = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
elif k.getAttribute("name") == "type":
tc = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
elif k.getAttribute("name") == "value":
val = k.getElementsByTagName("dia:string")[0].childNodes[0].data[1:-1]
if val == '##':
val = ''
elif k.getAttribute("name") == "visibility" and k.getElementsByTagName("dia:enum")[0].getAttribute("val") == "2":
if tc.replace(" ", "").lower().startswith("manytomanyfield("):
#If we find a class not in our model that is marked as being to another model
newc = tc.replace(" ", "")[16:-1]
if dependclasses.count(newc) == 0:
dependclasses.append(newc)
if tc.replace(" ", "").lower().startswith("foreignkey("):
#If we find a class not in our model that is marked as being to another model
newc = tc.replace(" ", "")[11:-1]
if dependclasses.count(newc) == 0:
dependclasses.append(newc)
#Mapping SQL types to Django
varch = v2c.search(tc)
if tc.replace(" ", "").startswith("ManyToManyField("):
myfor = tc.replace(" ", "")[16:-1]
if actclas == myfor:
#In case of a recursive type, we use 'self'
tc = tc.replace(myfor, "'self'")
elif clases[actclas][0].count(myfor) == 0:
#Adding related class
if myfor not in dependclasses:
#In case we are using Auth classes or external via protected dia visibility
clases[actclas][0].append(myfor)
tc = "models." + tc
if len(val) > 0:
tc = tc.replace(")", "," + val + ")")
elif tc.find("Field") != -1:
if tc.count("()") > 0 and len(val) > 0:
tc = "models.%s" % tc.replace(")", "," + val + ")")
else:
tc = "models.%s(%s)" % (tc, val)
elif tc.replace(" ", "").startswith("ForeignKey("):
myfor = tc.replace(" ", "")[11:-1]
if actclas == myfor:
#In case of a recursive type, we use 'self'
tc = tc.replace(myfor, "'self'")
elif clases[actclas][0].count(myfor) == 0:
#Adding foreign classes
if myfor not in dependclasses:
#In case we are using Auth classes
clases[actclas][0].append(myfor)
tc = "models." + tc
if len(val) > 0:
tc = tc.replace(")", "," + val + ")")
elif varch == None:
tc = "models." + tsd[tc.strip().lower()] + "(" + val + ")"
else:
tc = "models.CharField(max_length=" + varch.group(1) + ")"
if len(val) > 0:
tc = tc.replace(")", ", " + val + " )")
if not (nc == "id" and tc == "AutoField()"):
clases[actclas][2] = clases[actclas][2] + (" %s = %s\n" % (nc, tc))
elif i.getAttribute("type") == "UML - Generalization":
mycons = ['A', 'A']
a = i.getElementsByTagName("dia:connection")
for j in a:
if len(j.getAttribute("to")):
mycons[int(j.getAttribute("handle"))] = j.getAttribute("to")
print mycons
if not 'A' in mycons:
herit.append(mycons)
elif i.getAttribute("type") == "UML - SmallPackage":
a = i.getElementsByTagName("dia:string")
for j in a:
if len(j.childNodes[0].data[1:-1]):
imports += u"from %s.models import *" % j.childNodes[0].data[1:-1]
addparentstofks(herit, clases)
#Ordering the appearance of classes
#First we make a list of the classes each classs is related to.
ordered = []
for j, k in clases.iteritems():
k[2] = k[2] + "\n def __unicode__(self):\n return u\"\"\n"
for fk in k[0]:
if fk not in dependclasses:
clases[fk][3] += 1
ordered.append([j] + k)
i = 0
while i < len(ordered):
mark = i
j = i + 1
while j < len(ordered):
if ordered[i][0] in ordered[j][1]:
mark = j
j += 1
if mark == i:
i += 1
else:
# swap %s in %s" % ( ordered[i] , ordered[mark]) to make ordered[i] to be at the end
if ordered[i][0] in ordered[mark][1] and ordered[mark][0] in ordered[i][1]:
#Resolving simplistic circular ForeignKeys
print "Not able to resolve circular ForeignKeys between %s and %s" % (ordered[i][1], ordered[mark][0])
break
a = ordered[i]
ordered[i] = ordered[mark]
ordered[mark] = a
if i == len(ordered) - 1:
break
ordered.reverse()
if imports:
models_txt = str(imports)
for i in ordered:
models_txt += '%s\n' % str(i[3])
return models_txt
if __name__ == '__main__':
if len(sys.argv) == 2:
dia2django(sys.argv[1])
else:
print " Use:\n \n " + sys.argv[0] + " diagram.dia\n\n"
| bsd-3-clause | 4f3699a0734ce05b021fe2e66e3ef834 | 46.873832 | 149 | 0.445193 | 4.342942 | false | false | false | false |
mozilla/inventory | vendor-local/src/django-tastypie/tastypie/authentication.py | 14 | 16908 | import base64
import hmac
import time
import uuid
from django.conf import settings
from django.contrib.auth import authenticate
from django.core.exceptions import ImproperlyConfigured
from django.middleware.csrf import _sanitize_token, constant_time_compare
from django.utils.http import same_origin
from django.utils.translation import ugettext as _
from tastypie.http import HttpUnauthorized
try:
from hashlib import sha1
except ImportError:
import sha
sha1 = sha.sha
try:
import python_digest
except ImportError:
python_digest = None
try:
import oauth2
except ImportError:
oauth2 = None
try:
import oauth_provider
except ImportError:
oauth_provider = None
class Authentication(object):
"""
A simple base class to establish the protocol for auth.
By default, this indicates the user is always authenticated.
"""
def __init__(self, require_active=True):
self.require_active = require_active
def is_authenticated(self, request, **kwargs):
"""
Identifies if the user is authenticated to continue or not.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
return True
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns a combination of IP address and hostname.
"""
return "%s_%s" % (request.META.get('REMOTE_ADDR', 'noaddr'), request.META.get('REMOTE_HOST', 'nohost'))
def check_active(self, user):
"""
Ensures the user has an active account.
Optimized for the ``django.contrib.auth.models.User`` case.
"""
if not self.require_active:
# Ignore & move on.
return True
return user.is_active
class BasicAuthentication(Authentication):
"""
Handles HTTP Basic auth against a specific auth backend if provided,
or against all configured authentication backends using the
``authenticate`` method from ``django.contrib.auth``.
Optional keyword arguments:
``backend``
If specified, use a specific ``django.contrib.auth`` backend instead
of checking all backends specified in the ``AUTHENTICATION_BACKENDS``
setting.
``realm``
The realm to use in the ``HttpUnauthorized`` response. Default:
``django-tastypie``.
"""
def __init__(self, backend=None, realm='django-tastypie', **kwargs):
super(BasicAuthentication, self).__init__(**kwargs)
self.backend = backend
self.realm = realm
def _unauthorized(self):
response = HttpUnauthorized()
# FIXME: Sanitize realm.
response['WWW-Authenticate'] = 'Basic Realm="%s"' % self.realm
return response
def is_authenticated(self, request, **kwargs):
"""
Checks a user's basic auth credentials against the current
Django auth backend.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
if not request.META.get('HTTP_AUTHORIZATION'):
return self._unauthorized()
try:
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
if auth_type.lower() != 'basic':
return self._unauthorized()
user_pass = base64.b64decode(data)
except:
return self._unauthorized()
bits = user_pass.split(':', 1)
if len(bits) != 2:
return self._unauthorized()
if self.backend:
user = self.backend.authenticate(username=bits[0], password=bits[1])
else:
user = authenticate(username=bits[0], password=bits[1])
if user is None:
return self._unauthorized()
if not self.check_active(user):
return False
request.user = user
return True
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's basic auth username.
"""
return request.META.get('REMOTE_USER', 'nouser')
class ApiKeyAuthentication(Authentication):
"""
Handles API key auth, in which a user provides a username & API key.
Uses the ``ApiKey`` model that ships with tastypie. If you wish to use
a different model, override the ``get_key`` method to perform the key check
as suits your needs.
"""
def _unauthorized(self):
return HttpUnauthorized()
def extract_credentials(self, request):
if request.META.get('HTTP_AUTHORIZATION') and request.META['HTTP_AUTHORIZATION'].lower().startswith('apikey '):
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()
if auth_type.lower() != 'apikey':
raise ValueError("Incorrect authorization header.")
username, api_key = data.split(':', 1)
else:
username = request.GET.get('username') or request.POST.get('username')
api_key = request.GET.get('api_key') or request.POST.get('api_key')
return username, api_key
def is_authenticated(self, request, **kwargs):
"""
Finds the user and checks their API key.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
from django.contrib.auth.models import User
try:
username, api_key = self.extract_credentials(request)
except ValueError:
return self._unauthorized()
if not username or not api_key:
return self._unauthorized()
try:
user = User.objects.get(username=username)
except (User.DoesNotExist, User.MultipleObjectsReturned):
return self._unauthorized()
if not self.check_active(user):
return False
request.user = user
return self.get_key(user, api_key)
def get_key(self, user, api_key):
"""
Attempts to find the API key for the user. Uses ``ApiKey`` by default
but can be overridden.
"""
from tastypie.models import ApiKey
try:
ApiKey.objects.get(user=user, key=api_key)
except ApiKey.DoesNotExist:
return self._unauthorized()
return True
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's username.
"""
username, api_key = self.extract_credentials(request)
return username or 'nouser'
class SessionAuthentication(Authentication):
"""
An authentication mechanism that piggy-backs on Django sessions.
This is useful when the API is talking to Javascript on the same site.
Relies on the user being logged in through the standard Django login
setup.
Requires a valid CSRF token.
"""
def is_authenticated(self, request, **kwargs):
"""
Checks to make sure the user is logged in & has a Django session.
"""
# Cargo-culted from Django 1.3/1.4's ``django/middleware/csrf.py``.
# We can't just use what's there, since the return values will be
# wrong.
# We also can't risk accessing ``request.POST``, which will break with
# the serialized bodies.
if request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
return request.user.is_authenticated()
if getattr(request, '_dont_enforce_csrf_checks', False):
return request.user.is_authenticated()
csrf_token = _sanitize_token(request.COOKIES.get(settings.CSRF_COOKIE_NAME, ''))
if request.is_secure():
referer = request.META.get('HTTP_REFERER')
if referer is None:
return False
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
return False
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
return False
return request.user.is_authenticated()
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's username.
"""
return request.user.username
class DigestAuthentication(Authentication):
"""
Handles HTTP Digest auth against a specific auth backend if provided,
or against all configured authentication backends using the
``authenticate`` method from ``django.contrib.auth``. However, instead of
the user's password, their API key should be used.
Optional keyword arguments:
``backend``
If specified, use a specific ``django.contrib.auth`` backend instead
of checking all backends specified in the ``AUTHENTICATION_BACKENDS``
setting.
``realm``
The realm to use in the ``HttpUnauthorized`` response. Default:
``django-tastypie``.
"""
def __init__(self, backend=None, realm='django-tastypie', **kwargs):
super(DigestAuthentication, self).__init__(**kwargs)
self.backend = backend
self.realm = realm
if python_digest is None:
raise ImproperlyConfigured("The 'python_digest' package could not be imported. It is required for use with the 'DigestAuthentication' class.")
def _unauthorized(self):
response = HttpUnauthorized()
new_uuid = uuid.uuid4()
opaque = hmac.new(str(new_uuid), digestmod=sha1).hexdigest()
response['WWW-Authenticate'] = python_digest.build_digest_challenge(time.time(), getattr(settings, 'SECRET_KEY', ''), self.realm, opaque, False)
return response
def is_authenticated(self, request, **kwargs):
"""
Finds the user and checks their API key.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
if not request.META.get('HTTP_AUTHORIZATION'):
return self._unauthorized()
try:
(auth_type, data) = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if auth_type.lower() != 'digest':
return self._unauthorized()
except:
return self._unauthorized()
digest_response = python_digest.parse_digest_credentials(request.META['HTTP_AUTHORIZATION'])
# FIXME: Should the nonce be per-user?
if not python_digest.validate_nonce(digest_response.nonce, getattr(settings, 'SECRET_KEY', '')):
return self._unauthorized()
user = self.get_user(digest_response.username)
api_key = self.get_key(user)
if user is False or api_key is False:
return self._unauthorized()
expected = python_digest.calculate_request_digest(
request.method,
python_digest.calculate_partial_digest(digest_response.username, self.realm, api_key),
digest_response)
if not digest_response.response == expected:
return self._unauthorized()
if not self.check_active(user):
return False
request.user = user
return True
def get_user(self, username):
from django.contrib.auth.models import User
try:
user = User.objects.get(username=username)
except (User.DoesNotExist, User.MultipleObjectsReturned):
return False
return user
def get_key(self, user):
"""
Attempts to find the API key for the user. Uses ``ApiKey`` by default
but can be overridden.
Note that this behaves differently than the ``ApiKeyAuthentication``
method of the same name.
"""
from tastypie.models import ApiKey
try:
key = ApiKey.objects.get(user=user)
except ApiKey.DoesNotExist:
return False
return key.key
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns the user's username.
"""
if hasattr(request, 'user'):
if hasattr(request.user, 'username'):
return request.user.username
return 'nouser'
class OAuthAuthentication(Authentication):
"""
Handles OAuth, which checks a user's credentials against a separate service.
Currently verifies against OAuth 1.0a services.
This does *NOT* provide OAuth authentication in your API, strictly
consumption.
"""
def __init__(self, **kwargs):
super(OAuthAuthentication, self).__init__(**kwargs)
if oauth2 is None:
raise ImproperlyConfigured("The 'python-oauth2' package could not be imported. It is required for use with the 'OAuthAuthentication' class.")
if oauth_provider is None:
raise ImproperlyConfigured("The 'django-oauth-plus' package could not be imported. It is required for use with the 'OAuthAuthentication' class.")
def is_authenticated(self, request, **kwargs):
from oauth_provider.store import store, InvalidTokenError
if self.is_valid_request(request):
oauth_request = oauth_provider.utils.get_oauth_request(request)
consumer = store.get_consumer(request, oauth_request, oauth_request.get_parameter('oauth_consumer_key'))
try:
token = store.get_access_token(request, oauth_request, consumer, oauth_request.get_parameter('oauth_token'))
except oauth_provider.store.InvalidTokenError:
return oauth_provider.utils.send_oauth_error(oauth2.Error(_('Invalid access token: %s') % oauth_request.get_parameter('oauth_token')))
try:
self.validate_token(request, consumer, token)
except oauth2.Error, e:
return oauth_provider.utils.send_oauth_error(e)
if consumer and token:
if not self.check_active(token.user):
return False
request.user = token.user
return True
return oauth_provider.utils.send_oauth_error(oauth2.Error(_('You are not allowed to access this resource.')))
return oauth_provider.utils.send_oauth_error(oauth2.Error(_('Invalid request parameters.')))
def is_in(self, params):
"""
Checks to ensure that all the OAuth parameter names are in the
provided ``params``.
"""
from oauth_provider.consts import OAUTH_PARAMETERS_NAMES
for param_name in OAUTH_PARAMETERS_NAMES:
if param_name not in params:
return False
return True
def is_valid_request(self, request):
"""
Checks whether the required parameters are either in the HTTP
``Authorization`` header sent by some clients (the preferred method
according to OAuth spec) or fall back to ``GET/POST``.
"""
auth_params = request.META.get("HTTP_AUTHORIZATION", [])
return self.is_in(auth_params) or self.is_in(request.REQUEST)
def validate_token(self, request, consumer, token):
oauth_server, oauth_request = oauth_provider.utils.initialize_server_request(request)
return oauth_server.verify_request(oauth_request, consumer, token)
class MultiAuthentication(object):
"""
An authentication backend that tries a number of backends in order.
"""
def __init__(self, *backends, **kwargs):
super(MultiAuthentication, self).__init__(**kwargs)
self.backends = backends
def is_authenticated(self, request, **kwargs):
"""
Identifies if the user is authenticated to continue or not.
Should return either ``True`` if allowed, ``False`` if not or an
``HttpResponse`` if you need something custom.
"""
unauthorized = False
for backend in self.backends:
check = backend.is_authenticated(request, **kwargs)
if check:
if isinstance(check, HttpUnauthorized):
unauthorized = unauthorized or check
else:
request._authentication_backend = backend
return check
return unauthorized
def get_identifier(self, request):
"""
Provides a unique string identifier for the requestor.
This implementation returns a combination of IP address and hostname.
"""
try:
return request._authentication_backend.get_identifier(request)
except AttributeError:
return 'nouser'
| bsd-3-clause | ef6d748fb8311677ab42b885878ed689 | 32.41502 | 157 | 0.625148 | 4.559871 | false | false | false | false |
mozilla/inventory | mozdns/views.py | 2 | 2820 | from mozdns.utils import slim_form
from base.views import BaseListView, BaseDetailView, BaseCreateView
from base.views import BaseUpdateView, BaseDeleteView
class MozdnsListView(BaseListView):
""" """
template_name = 'mozdns/mozdns_list.html'
class MozdnsDetailView(BaseDetailView):
""" """
template_name = 'mozdns/mozdns_detail.html'
class MozdnsCreateView(BaseCreateView):
""" """
template_name = 'mozdns/mozdns_form.html'
def get_form(self, form_class):
form = super(MozdnsCreateView, self).get_form(form_class)
domain_pk = self.kwargs.get('domain', False)
# The use of slim_form makes my eyes bleed and stomach churn.
if domain_pk:
form = slim_form(domain_pk=domain_pk, form=form)
reverse_domain_pk = self.kwargs.get('reverse_domain', False)
if reverse_domain_pk:
slim_form(reverse_domain_pk=reverse_domain_pk, form=form)
# This is where filtering domain selection should take place.
# form.fields['domain'].queryset = Domain.objects.filter(name =
# 'foo.com') This ^ line will change the query set to something
# controllable find user credentials in self.request
"""This removes the unhelpful "Hold down the...." help texts for the
specified fields for a form."""
remove_message = unicode(' Hold down "Control", or "Command" on a Mac,'
'to select more than one.')
for field in form.fields:
if field in form.base_fields:
if form.base_fields[field].help_text:
new_text = form.base_fields[field].help_text.replace(
remove_message, '')
new_text = new_text.strip()
form.base_fields[field].help_text = new_text
return form
class MozdnsUpdateView(BaseUpdateView):
template_name = 'mozdns/mozdns_form.html'
def get_form(self, form_class):
form = super(MozdnsUpdateView, self).get_form(form_class)
"""This removes the unhelpful "Hold down the...." help texts for the
specified fields for a form."""
remove_message = unicode(' Hold down "Control", or "Command" on a Mac,'
'to select more than one.')
for field in form.fields:
if field in form.base_fields:
if form.base_fields[field].help_text:
new_text = form.base_fields[field].help_text.replace(
remove_message, '')
new_text = new_text.strip()
form.base_fields[field].help_text = new_text
return form
class MozdnsDeleteView(BaseDeleteView):
""" """
template_name = 'mozdns/mozdns_confirm_delete.html'
succcess_url = '/mozdns/'
| bsd-3-clause | fd3a566ec8af649624233f3b21d450f2 | 38.166667 | 79 | 0.606738 | 3.916667 | false | false | false | false |
mozilla/inventory | migrate_dns/inventory_build.py | 3 | 3602 | from migrate_dns.build_nics import *
from migrate_dns.utils import *
import ipaddr
import pprint
pp = pprint.PrettyPrinter(indent=2)
def generate_hostname(nic, site_name):
"""
Determine the correct fqdn from the nic's hostname key, site the name is
going into, and any other additional options in nic_meta_data.
:param hostname: The hostname on the nic.
:type hostname: str
:param site_name: The site_name that this nic's ip is in.
:type site_name: str
:param nic_meta_data: Other options that pertain to the nic.
:type nic_meta_data: dict.
:return build_hostname: The hostname to use in the A/PTR record.
:type build_hostname: str
"""
try:
validate_name(nic)
except ValidationError, e:
return None
# Hey look Values are stored as strings.
if nic.dns_auto_hostname is False:
return nic.hostname
if len(nic.hostname.split('.')) == 1:
hostname = "{0}.{1}.mozilla.com".format(nic.hostname, site_name)
else:
hostname = "{0}.mozilla.com".format(nic.hostname)
# These are checks for common mistakes. For now just print. TODO.
# Send an email or something.
if hostname.count('mozilla.com') >= 2:
log("nic might have incorrect hostname "
"{0} (https://inventory.mozilla.org/en-US/systems/edit/{1}/). "
"Use the 'dns_auto_hostname' key to override."
.format(hostname, nic.system.pk), WARNING)
return hostname
def inventory_build_sites(sites):
"""
For each site in sites partition data into logical site groups. Later these
groups will be used to print BIND Zone files.
Sites have the form::
('<vlan-name>. <site-name>', <'network-mask'>, '<file_path_to_site>')
For example::
('dmz.scl3', '10.22.1.0/24')
:param sites: The sites that data should be aggrigated for.
:type sites: tuple
"""
# Aggregated data goes in these dicts.
agg_sites = {}
agg_reverse = {}
for site in sites:
# Pre-calculate the regular expressions so we can do this in one pass.
name, network, site_path = site
# Add address family logic here if need be.
try:
agg_sites[name] = (ipaddr.IPv4Network(network), [])
except ipaddr.AddressValueError, e:
# Eventually send an email or do something more serious about an
# error here.
log(str(e), ERROR)
for intr in get_nic_objs():
if intr.dns_auto_build is False:
continue
for site_name, data in agg_sites.items():
network, entries = data
for ip in intr.ips:
if ipaddr.IPv4Network(ip).overlaps(network):
hostname = generate_hostname(intr, site_name)
if hostname is None:
continue
intr.hostname = hostname
entries.append(intr)
rev_file_name = '.'.join(ip.split('.')[:3])
reverse_data = agg_reverse.setdefault(rev_file_name, [])
reverse_data.append(intr)
# TODO Should we add all hosts to reverse zones? Even if it
# doesn't belong to a site? Or should we only add to the
# reverse when there is a corresponding record A in forward?
# For now only add if we find a forward match. I'm pretty sure
# there is an RFC that says A records should always have a PTR.
# Is that the same for PTR's?
return (agg_sites, agg_reverse)
| bsd-3-clause | 74bbc5f427614c8f56138945d645df98 | 34.663366 | 79 | 0.598279 | 4.038117 | false | false | false | false |
mozilla/inventory | scripts/output_d3_dns_tree.py | 2 | 1091 | __import__('inventory_context')
from mozdns.domain.models import Domain
import pprint
import simplejson as json
pp = pprint.PrettyPrinter(depth=4)
def emit_node(domain):
node = {}
node['name'] = domain.name
children = []
for child_domain in domain.domain_set.all():
children.append(emit_node(child_domain))
object_sets = [
domain.addressrecord_set,
domain.cname_set,
domain.mx_set,
domain.srv_set,
domain.sshfp_set,
domain.staticreg_set,
domain.reverse_staticreg_set,
domain.txt_set,
domain.ptr_set,
domain.nameserver_set
]
for obj_type in object_sets:
for obj in obj_type.all():
children.append({
'name': obj.bind_render_record()
})
if children:
node['children'] = children
return node
net = Domain.objects.get(name='net')
children = []
for domain in Domain.objects.filter(master_domain=None):
children.append(emit_node(domain))
tree = {'name': '.', 'children': children}
print json.dumps(tree)
| bsd-3-clause | ef736df9efc6a6338ec421f1d7065694 | 21.729167 | 56 | 0.611366 | 3.624585 | false | false | false | false |
mozilla/inventory | mozdns/search_utils.py | 2 | 5530 | from django.db.models import Q
from django.core.exceptions import ObjectDoesNotExist
import mozdns
import core
def fqdn_search(fqdn, *args, **kwargs):
"""
Find any records that have a name that is the provided fqdn. This
name would show up on the left hand side of a zone file and in a PTR's
case the right side.
:param fqdn: The name to search for.
:type fqdn: str
:return: (type, Querysets) tuples containing all the objects that matched
during
the search are returned.
"""
return _build_queries(fqdn, *args, **kwargs)
def smart_fqdn_exists(fqdn, *args, **kwargs):
"""
Searching for a fqdn by actually looking at a fqdn is very inefficient.
Instead we should:
1) Look for a domain with the name of fqdn.
2) Look for a label = fqdn.split('.')[0]
and domain = fqdn.split('.')[1:]
"""
# Try approach 1
try:
search_domain = mozdns.domain.models.Domain.objects.get(name=fqdn)
label = ''
except ObjectDoesNotExist:
search_domain = None
if search_domain:
for type_, qset in _build_label_domain_queries(label, search_domain,
**kwargs):
if qset.exists():
return qset
# Try approach 2
search_domain = None
if len(fqdn.split('.')) == 1:
return None
try:
label = fqdn.split('.')[0]
domain_name = '.'.join(fqdn.split('.')[1:])
search_domain = mozdns.domain.models.Domain.objects.get(
name=domain_name)
except ObjectDoesNotExist:
search_domain = None
if search_domain:
for type_, qset in _build_label_domain_queries(label, search_domain,
**kwargs):
if qset.exists():
return qset
def _build_label_domain_queries(label, domain, mx=True, sr=True, tx=True,
cn=True, ar=True, sreg=True, ns=True, ss=True):
# We import this way to make it easier to import this file without
# getting cyclic imports.
qsets = []
if mx:
qsets.append(('MX', mozdns.mx.models.MX.objects.
filter(**{'label': label, 'domain': domain})))
if ns:
if label == '':
qsets.append(('NS', mozdns.nameserver.models.Nameserver.objects.
filter(**{'domain': domain})))
if sr:
qsets.append(('SRV', mozdns.srv.models.SRV.objects.
filter(**{'label': label, 'domain': domain})))
if tx:
qsets.append(('TXT', mozdns.txt.models.TXT.objects.
filter(**{'label': label, 'domain': domain})))
if ss:
qsets.append(('SSHFP', mozdns.sshfp.models.SSHFP.objects.
filter(**{'label': label, 'domain': domain})))
if cn:
qsets.append(('CNAME', mozdns.cname.models.CNAME.objects.
filter(**{'label': label, 'domain': domain})))
if ar:
AddressRecord = mozdns.address_record.models.AddressRecord
ars = AddressRecord.objects.filter(
**{'label': label, 'domain': domain})
qsets.append(('AddressRecord', ars))
if sreg:
StaticReg = core.registration.static.models.StaticReg
sregs = StaticReg.objects.filter(
**{'label': label, 'domain': domain})
qsets.append(('AddressRecord', sregs))
return qsets
def fqdn_exists(fqdn, **kwargs):
"""Return a Queryset or False depending on whether an object exists
with the fqdn.
:param fqdn: The name to search for.
:type fqdn: str
:return: True or False
"""
for type_, qset in _build_queries(fqdn, **kwargs):
if qset.exists():
return qset
return False
def _build_queries(fqdn, dn=True, mx=True, sr=True, tx=True,
cn=True, ar=True, pt=True, ip=False, sreg=True,
search_operator=''):
# We import this way to make it easier to import this file without
# getting cyclic imports.
qsets = []
if dn:
qsets.append(('Domain', mozdns.domain.models.Domain.objects.
filter(**{'name{0}'.format(search_operator): fqdn})))
if mx:
qsets.append(('MX', mozdns.mx.models.MX.objects.
filter(**{'fqdn{0}'.format(search_operator): fqdn})))
if sr:
qsets.append(('SRV', mozdns.srv.models.SRV.objects.
filter(**{'fqdn{0}'.format(search_operator): fqdn})))
if tx:
qsets.append(('TXT', mozdns.txt.models.TXT.objects.
filter(**{'fqdn{0}'.format(search_operator): fqdn})))
if cn:
qsets.append(('CNAME', mozdns.cname.models.CNAME.objects.
filter(**{'fqdn{0}'.format(search_operator): fqdn})))
if ar:
AddressRecord = mozdns.address_record.models.AddressRecord
ars = AddressRecord.objects.filter(Q(fqdn=fqdn) | Q(ip_str=ip))
qsets.append(('AddressRecord', ars))
if pt:
qsets.append(('PTR', mozdns.ptr.models.PTR.objects.
Q(**{'name{0}'.format(search_operator): fqdn}) |
Q(**{'ip_str{0}'.format(search_operator): ip})))
if sreg:
StaticReg = core.registration.static.models.StaticReg
qsets.append(('StaticReg', StaticReg.objects.filter(
Q(**{'fqdn{0}'.format(search_operator): fqdn}) |
Q(**{'ip_str{0}'.format(search_operator): ip}))))
return qsets
| bsd-3-clause | 75bb37d4fe5ec20f9227759ac2a155c1 | 36.364865 | 79 | 0.566727 | 3.859037 | false | false | false | false |
mozilla/inventory | mozdns/address_record/views.py | 3 | 2416 | from django.core.exceptions import ObjectDoesNotExist
from mozdns.address_record.forms import AddressRecordForm
from mozdns.address_record.models import AddressRecord
from mozdns.domain.models import Domain
from mozdns.views import MozdnsDeleteView, MozdnsDetailView
from mozdns.views import MozdnsCreateView, MozdnsUpdateView, MozdnsListView
from core.network.utils import calc_parent_str
class AddressRecordView(object):
model = AddressRecord
form_class = AddressRecordForm
queryset = AddressRecord.objects.all()
class AddressRecordDeleteView(AddressRecordView, MozdnsDeleteView):
""" """
class AddressRecordDetailView(AddressRecordView, MozdnsDetailView):
def get_context_data(self, **kwargs):
context = super(AddressRecordDetailView, self).get_context_data(
**kwargs)
context['form_title'] = "{0} Details".format(
self.form_class.Meta.model.__name__
)
# extra_context takes precedence over original values in context
if self.extra_context:
context = dict(context.items() + self.extra_context.items())
return context
template_name = 'address_record/addressrecord_detail.html'
class AddressRecordCreateView(AddressRecordView, MozdnsCreateView):
""" """
def get_form(self, *args, **kwargs):
initial = self.get_form_kwargs()
if 'ip_type' in self.request.GET and 'ip_str' in self.request.GET:
ip_str = self.request.GET['ip_str']
ip_type = self.request.GET['ip_type']
network = calc_parent_str(ip_str, ip_type)
if network and network.vlan and network.site:
expected_name = "{0}.{1}.mozilla.com".format(
network.vlan.name,
network.site.get_site_path())
try:
domain = Domain.objects.get(name=expected_name)
except ObjectDoesNotExist:
domain = None
if domain:
initial['initial'] = {'ip_str': ip_str, 'domain': domain,
'ip_type': ip_type}
else:
initial['initial'] = {'ip_str': ip_str, 'ip_type': ip_type}
return AddressRecordForm(**initial)
class AddressRecordUpdateView(AddressRecordView, MozdnsUpdateView):
""" """
class AddressRecordListView(AddressRecordView, MozdnsListView):
""" """
| bsd-3-clause | 85462301ff4ea9c7a664e8c71dd5ead3 | 35.606061 | 75 | 0.639487 | 4.053691 | false | false | false | false |
mozilla/inventory | core/registration/static/migrations/0001_initial.py | 2 | 16955 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Remove StaticInterface
# XXX PLEASE uncomment this in production
from django.contrib.contenttypes.models import ContentType
try:
db.delete_table('static_interface')
db.delete_table('static_inter_key_value')
db.delete_table('static_interface_views')
except:
pass
for content_type in ContentType.objects.filter(app_label='static_intr'):
content_type.delete()
# Adding model 'StaticReg'
db.create_table('static_reg', (
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['domain.Domain'])),
('label', self.gf('django.db.models.fields.CharField')(max_length=63, null=True, blank=True)),
('fqdn', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('ttl', self.gf('django.db.models.fields.PositiveIntegerField')(default=3600, null=True, blank=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1000, null=True, blank=True)),
('ip_str', self.gf('django.db.models.fields.CharField')(max_length=39)),
('ip_upper', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('ip_lower', self.gf('django.db.models.fields.BigIntegerField')(null=True, blank=True)),
('ip_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('reverse_domain', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='reverse_staticreg_set', null=True, to=orm['domain.Domain'])),
('system', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['systems.System'], null=True, blank=True)),
))
db.send_create_signal('static', ['StaticReg'])
# Adding unique constraint on 'StaticReg', fields ['ip_upper', 'ip_lower', 'label', 'domain']
db.create_unique('static_reg', ['ip_upper', 'ip_lower', 'label', 'domain_id'])
# Adding M2M table for field views on 'StaticReg'
m2m_table_name = db.shorten_name('static_reg_views')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('staticreg', models.ForeignKey(orm['static.staticreg'], null=False)),
('view', models.ForeignKey(orm['view.view'], null=False))
))
db.create_unique(m2m_table_name, ['staticreg_id', 'view_id'])
# Adding model 'StaticRegKeyValue'
db.create_table('static_key_value', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=255)),
('value', self.gf('django.db.models.fields.CharField')(max_length=255)),
('is_option', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_statement', self.gf('django.db.models.fields.BooleanField')(default=False)),
('has_validator', self.gf('django.db.models.fields.BooleanField')(default=False)),
('obj', self.gf('django.db.models.fields.related.ForeignKey')(related_name='keyvalue_set', to=orm['static.StaticReg'])),
))
db.send_create_signal('static', ['StaticRegKeyValue'])
# Adding unique constraint on 'StaticRegKeyValue', fields ['key', 'value', 'obj']
db.create_unique('static_key_value', ['key', 'value', 'obj_id'])
def backwards(self, orm):
# Removing unique constraint on 'StaticRegKeyValue', fields ['key', 'value', 'obj']
db.delete_unique('static_key_value', ['key', 'value', 'obj_id'])
# Removing unique constraint on 'StaticReg', fields ['ip_upper', 'ip_lower', 'label', 'domain']
db.delete_unique('static_reg', ['ip_upper', 'ip_lower', 'label', 'domain_id'])
# Deleting model 'StaticReg'
db.delete_table('static_reg')
# Removing M2M table for field views on 'StaticReg'
db.delete_table(db.shorten_name('static_reg_views'))
# Deleting model 'StaticRegKeyValue'
db.delete_table('static_key_value')
models = {
'domain.domain': {
'Meta': {'object_name': 'Domain', 'db_table': "'domain'"},
'delegated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'master_domain': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['domain.Domain']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'purgeable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'soa': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['soa.SOA']", 'null': 'True', 'blank': 'True'})
},
'soa.soa': {
'Meta': {'unique_together': "(('primary', 'contact', 'description'),)", 'object_name': 'SOA', 'db_table': "'soa'"},
'contact': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'expire': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1209600'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minimum': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'primary': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'refresh': ('django.db.models.fields.PositiveIntegerField', [], {'default': '180'}),
'retry': ('django.db.models.fields.PositiveIntegerField', [], {'default': '86400'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2013062501'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'})
},
'static.staticreg': {
'Meta': {'unique_together': "(('ip_upper', 'ip_lower', 'label', 'domain'),)", 'object_name': 'StaticReg', 'db_table': "'static_reg'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['domain.Domain']"}),
'fqdn': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_lower': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'ip_str': ('django.db.models.fields.CharField', [], {'max_length': '39'}),
'ip_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'ip_upper': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'reverse_domain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reverse_staticreg_set'", 'null': 'True', 'to': "orm['domain.Domain']"}),
'system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.System']", 'null': 'True', 'blank': 'True'}),
'ttl': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3600', 'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['view.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'static.staticregkeyvalue': {
'Meta': {'unique_together': "(('key', 'value', 'obj'),)", 'object_name': 'StaticRegKeyValue', 'db_table': "'static_key_value'"},
'has_validator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_option': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_statement': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'obj': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keyvalue_set'", 'to': "orm['static.StaticReg']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.allocation': {
'Meta': {'ordering': "['name']", 'object_name': 'Allocation', 'db_table': "u'allocations'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'systems.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location', 'db_table': "u'locations'"},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'systems.operatingsystem': {
'Meta': {'ordering': "['name', 'version']", 'object_name': 'OperatingSystem', 'db_table': "u'operating_systems'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.servermodel': {
'Meta': {'ordering': "['vendor', 'model']", 'object_name': 'ServerModel', 'db_table': "u'server_models'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'part_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.system': {
'Meta': {'object_name': 'System', 'db_table': "u'systems'"},
'allocation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Allocation']", 'null': 'True', 'blank': 'True'}),
'asset_tag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'change_password': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_dhcp_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_dns_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_nagios_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_puppet_server': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_switch': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'licenses': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'oob_ip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'oob_switch_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'operating_system': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.OperatingSystem']", 'null': 'True', 'blank': 'True'}),
'patch_panel_port': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'purchase_price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'rack_order': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'ram': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'server_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.ServerModel']", 'null': 'True', 'blank': 'True'}),
'switch_ports': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'system_rack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemRack']", 'null': 'True', 'blank': 'True'}),
'system_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemStatus']", 'null': 'True', 'blank': 'True'}),
'system_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.SystemType']", 'null': 'True', 'blank': 'True'}),
'updated_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'warranty_end': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'warranty_start': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'systems.systemrack': {
'Meta': {'ordering': "['name']", 'object_name': 'SystemRack', 'db_table': "u'system_racks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['systems.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemstatus': {
'Meta': {'ordering': "['status']", 'object_name': 'SystemStatus', 'db_table': "u'system_statuses'"},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'color_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'systems.systemtype': {
'Meta': {'object_name': 'SystemType', 'db_table': "u'system_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'view.view': {
'Meta': {'unique_together': "(('name',),)", 'object_name': 'View', 'db_table': "'view'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['static']
| bsd-3-clause | cd56f2be463388f5e5a177d22e5bdeec | 75.373874 | 189 | 0.561781 | 3.593684 | false | false | false | false |
mozilla/inventory | migrate_dns/import_utils.py | 2 | 2950 | from migrate_dns.zone_migrate import populate_forward_dns, populate_reverse_dns
from dns import zone
from iscpy.iscpy_dns.named_importer_lib import MakeNamedDict
from mozdns.view.models import View
import settings
import os
# Add zones that should not be imported here
black_list = (
'svc.mozilla.com',
'services.mozilla.com',
)
PRIVATE = os.path.join(settings.ZONE_PATH, "config/zones.private")
PUBLIC = os.path.join(settings.ZONE_PATH, "config/zones.public")
def show_possible_imports(zones_file, view):
CONFIG = os.path.join(settings.ZONE_PATH, zones_file)
zones = MakeNamedDict(open(CONFIG).read())
m_c = ('python manage.py dns_migrate_single_zone {view} {zone_name} '
'$ZONES_PREFIX/{fname}')
for zone_name, zone_meta in zones['orphan_zones'].iteritems():
print m_c.format(
view=view, zone_name=zone_name, fname=zone_meta['file']
)
def do_import():
private_zones = MakeNamedDict(open(PRIVATE).read())
public_zones = MakeNamedDict(open(PUBLIC).read())
View.objects.get_or_create(name='public')
View.objects.get_or_create(name='private')
for zone_name, zone_meta in private_zones['orphan_zones'].iteritems():
if zone_name in black_list:
continue
handle_zone(zone_name, zone_meta, False, True)
for zone_name, zone_meta in public_zones['orphan_zones'].iteritems():
if zone_name in black_list:
continue
handle_zone(zone_name, zone_meta, True, False)
def migrate_single_zone(view_name, zone_name, zone_file):
if view_name not in ('public', 'private', 'both'):
print "view must be 'public' or 'private'"
return
zone_meta = {'file': zone_file}
if view_name == 'private':
handle_zone(zone_name, zone_meta, False, True)
elif view_name == 'public':
handle_zone(zone_name, zone_meta, True, False)
elif view_name == 'both':
handle_zone(zone_name, zone_meta, True, True)
def get_zone_data(zone_name, filepath, dirpath):
cwd = os.getcwd()
os.chdir(dirpath)
mzone = zone.from_file(filepath, zone_name, relativize=False)
os.chdir(cwd)
return mzone
def handle_zone(zone_name, zone_meta, public, private):
if not zone_meta['file']:
print "No zone file for {0}".format(zone_name)
return
print "Importing {0}. View: {1}".format(zone_name,
'public' if public else 'private')
mzone = get_zone_data(zone_name, zone_meta['file'], settings.ZONE_PATH)
views = []
if public:
views.append(View.objects.get(name='public'))
if private:
views.append(View.objects.get(name='private'))
if zone_name.endswith(('in-addr.arpa', 'ip6.arpa')):
direction = 'reverse'
else:
direction = 'forward'
if direction == 'reverse':
populate_reverse_dns(mzone, zone_name, views)
else:
populate_forward_dns(mzone, zone_name, views)
| bsd-3-clause | 7bd9df41033d39782a0f3b4a34231e14 | 33.705882 | 79 | 0.646441 | 3.398618 | false | false | false | false |
amperser/proselint | proselint/tools.py | 1 | 14182 | """General-purpose tools shared across linting checks."""
import copy
import dbm
import functools
import hashlib
import importlib
import inspect
import json
import os
import re
import shelve
import sys
import traceback
from warnings import showwarning as warn
from . import config
_cache_shelves = dict()
proselint_path = os.path.dirname(os.path.realpath(__file__))
home_dir = os.path.expanduser("~")
cwd = os.getcwd()
def close_cache_shelves():
"""Close previously opened cache shelves."""
for cache in _cache_shelves.values():
cache.close()
_cache_shelves.clear()
def close_cache_shelves_after(f):
"""Decorate a function to ensure cache shelves are closed after call."""
@functools.wraps(f)
def wrapped(*args, **kwargs):
f(*args, **kwargs)
close_cache_shelves()
return wrapped
def _get_xdg_path(variable_name, default_path):
path = os.environ.get(variable_name)
if path is None or path == '':
return default_path
else:
return path
def _get_xdg_config_home():
return _get_xdg_path('XDG_CONFIG_HOME', os.path.join(home_dir, '.config'))
def _get_xdg_cache_home():
return _get_xdg_path('XDG_CACHE_HOME', os.path.join(home_dir, '.cache'))
def _get_cache(cachepath):
if cachepath in _cache_shelves:
return _cache_shelves[cachepath]
try:
cache = shelve.open(cachepath, protocol=2)
except dbm.error:
# dbm error on open - delete and retry
print('Error (%s) opening %s - will attempt to delete and re-open.' %
(sys.exc_info()[1], cachepath))
try:
os.remove(cachepath)
cache = shelve.open(cachepath, protocol=2)
except Exception:
print('Error on re-open: %s' % sys.exc_info()[1])
cache = None
except Exception:
# unknown error
print('Could not open cache file %s, maybe name collision. '
'Error: %s' % (cachepath, traceback.format_exc()))
cache = None
# Don't fail on bad caches
if cache is None:
print('Using in-memory shelf for cache file %s' % cachepath)
cache = shelve.Shelf(dict())
_cache_shelves[cachepath] = cache
return cache
def memoize(f):
"""Cache results of computations on disk."""
# Determine the location of the cache.
cache_dirname = os.path.join(_get_xdg_cache_home(), 'proselint')
legacy_cache_dirname = os.path.join(home_dir, ".proselint")
if not os.path.isdir(cache_dirname):
# Migrate the cache from the legacy path to XDG compliant location.
if os.path.isdir(legacy_cache_dirname):
os.rename(legacy_cache_dirname, cache_dirname)
# Create the cache if it does not already exist.
else:
os.makedirs(cache_dirname)
cache_filename = f.__module__ + "." + f.__name__
cachepath = os.path.join(cache_dirname, cache_filename)
@functools.wraps(f)
def wrapped(*args, **kwargs):
# handle instance methods
if hasattr(f, '__self__'):
args = args[1:]
signature = cache_filename.encode("utf-8")
tempargdict = inspect.getcallargs(f, *args, **kwargs)
for item in list(tempargdict.items()):
if item[0] == "text":
signature += item[1].encode("utf-8")
key = hashlib.sha256(signature).hexdigest()
cache = _get_cache(cachepath)
try:
return cache[key]
except KeyError:
value = f(*args, **kwargs)
cache[key] = value
cache.sync()
return value
except TypeError:
call_to = f.__module__ + '.' + f.__name__
print('Warning: could not disk cache call to %s;'
'it probably has unhashable args. Error: %s' %
(call_to, traceback.format_exc()))
return f(*args, **kwargs)
return wrapped
def get_checks(options):
"""Extract the checks."""
sys.path.append(proselint_path)
checks = []
check_names = [key for (key, val) in options["checks"].items() if val]
for check_name in check_names:
module = importlib.import_module("checks." + check_name)
for d in dir(module):
if re.match("check", d):
checks.append(getattr(module, d))
return checks
def deepmerge_dicts(dict1, dict2):
"""Deep merge dictionaries, second dict will take priority."""
result = copy.deepcopy(dict1)
for key, value in dict2.items():
if isinstance(value, dict):
result[key] = deepmerge_dicts(result[key] or {}, value)
else:
result[key] = value
return result
def load_options(config_file_path=None, conf_default=None):
"""Read various proselintrc files, allowing user overrides."""
conf_default = conf_default or {}
if os.path.isfile("/etc/proselintrc"):
conf_default = json.load(open("/etc/proselintrc"))
user_config_paths = [
os.path.join(cwd, '.proselintrc.json'),
os.path.join(_get_xdg_config_home(), 'proselint', 'config.json'),
os.path.join(home_dir, '.proselintrc.json')
]
if config_file_path:
if not os.path.isfile(config_file_path):
raise FileNotFoundError(
f"Config file {config_file_path} does not exist")
user_config_paths.insert(0, config_file_path)
user_options = {}
for path in user_config_paths:
if os.path.isfile(path):
user_options = json.load(open(path))
break
oldpath = path.replace(".json", "")
if os.path.isfile(oldpath):
warn(f"{oldpath} was found instead of a JSON file."
f" Rename to {path}.", DeprecationWarning, "", 0)
user_options = json.load(open(oldpath))
break
return deepmerge_dicts(conf_default, user_options)
def errors_to_json(errors):
"""Convert the errors to JSON."""
out = []
for e in errors:
out.append({
"check": e[0],
"message": e[1],
"line": 1 + e[2],
"column": 1 + e[3],
"start": 1 + e[4],
"end": 1 + e[5],
"extent": e[6],
"severity": e[7],
"replacements": e[8],
})
return json.dumps(
{"status": "success", "data": {"errors": out}}, sort_keys=True)
def line_and_column(text, position):
"""Return the line number and column of a position in a string."""
position_counter = 0
line_no = 0
for line in text.splitlines(True):
if (position_counter + len(line.rstrip())) >= position:
break
position_counter += len(line)
line_no += 1
return (line_no, position - position_counter)
def lint(input_file, debug=False, config=config.default):
"""Run the linter on the input file."""
if isinstance(input_file, str):
text = input_file
else:
text = input_file.read()
# Get the checks.
checks = get_checks(config)
# Apply all the checks.
errors = []
for check in checks:
result = check(text)
for error in result:
(start, end, check, message, replacements) = error
(line, column) = line_and_column(text, start)
if not is_quoted(start, text):
errors += [(check, message, line, column, start, end,
end - start, "warning", replacements)]
if len(errors) > config["max_errors"]:
break
# Sort the errors by line and column number.
errors = sorted(errors[:config["max_errors"]], key=lambda e: (e[2], e[3]))
return errors
def assert_error(text, check, n=1):
"""Assert that text has n errors of type check."""
assert_error.description = f"No {check} error for '{text}'"
assert(check in [error[0] for error in lint(text)])
def consistency_check(text, word_pairs, err, msg, offset=0):
"""Build a consistency checker for the given word_pairs."""
errors = []
msg = " ".join(msg.split())
for w in word_pairs:
matches = [
[m for m in re.finditer(w[0], text)],
[m for m in re.finditer(w[1], text)]
]
if len(matches[0]) > 0 and len(matches[1]) > 0:
idx_minority = len(matches[0]) > len(matches[1])
for m in matches[idx_minority]:
errors.append((
m.start() + offset,
m.end() + offset,
err,
msg.format(w[~idx_minority], m.group(0)),
w[~idx_minority]))
return errors
def preferred_forms_check(text, list, err, msg, ignore_case=True, offset=0):
"""Build a checker that suggests the preferred form."""
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
msg = " ".join(msg.split())
errors = []
regex = r"[\W^]{}[\W$]"
for p in list:
for r in p[1]:
for m in re.finditer(regex.format(r), text, flags=flags):
txt = m.group(0).strip()
errors.append((
m.start() + 1 + offset,
m.end() + offset,
err,
msg.format(p[0], txt),
p[0]))
return errors
def existence_check(text, list, err, msg, ignore_case=True, str=False,
offset=0, require_padding=True, dotall=False,
excluded_topics=None, exceptions=(), join=False):
"""Build a checker that prohibits certain words or phrases."""
flags = 0
msg = " ".join(msg.split())
if ignore_case:
flags = flags | re.IGNORECASE
if str:
flags = flags | re.UNICODE
if dotall:
flags = flags | re.DOTALL
if require_padding:
regex = r"(?:^|\W){}[\W$]"
else:
regex = r"{}"
errors = []
# If the topic of the text is in the excluded list, return immediately.
if excluded_topics:
tps = topics(text)
if any([t in excluded_topics for t in tps]):
return errors
rx = "|".join(regex.format(w) for w in list)
for m in re.finditer(rx, text, flags=flags):
txt = m.group(0).strip()
if any([re.search(exception, txt) for exception in exceptions]):
continue
errors.append((
m.start() + 1 + offset,
m.end() + offset,
err,
msg.format(txt),
None))
return errors
def max_errors(limit):
"""Decorate a check to truncate error output to a specified limit."""
def wrapper(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
return truncate_errors(f(*args, **kwargs), limit)
return wrapped
return wrapper
def truncate_errors(errors, limit=float("inf")):
"""If limit was specified, truncate the list of errors.
Give the total number of times that the error was found elsewhere.
"""
if len(errors) > limit:
start1, end1, err1, msg1, replacements = errors[0]
if len(errors) == limit + 1:
msg1 += " Found once elsewhere."
else:
msg1 += f" Found {len(errors)} times elsewhere."
errors = [(start1, end1, err1, msg1, replacements)] + errors[1:limit]
return errors
def ppm_threshold(threshold):
"""Decorate a check to error if the PPM threshold is surpassed."""
def wrapped(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return threshold_check(f(*args, **kwargs), threshold, len(args[0]))
return wrapper
return wrapped
def threshold_check(errors, threshold, length):
"""Check that returns an error if the PPM threshold is surpassed."""
if length > 0:
errcount = len(errors)
ppm = (errcount / length) * 1e6
if ppm >= threshold and errcount >= 1:
return [errors[0]]
return []
def is_quoted(position, text):
"""Determine if the position in the text falls within a quote."""
def matching(quotemark1, quotemark2):
straight = '\"\''
curly = '“”'
if quotemark1 in straight and quotemark2 in straight:
return True
if quotemark1 in curly and quotemark2 in curly:
return True
else:
return False
def find_ranges(text):
s = 0
q = pc = ''
start = None
ranges = []
seps = " .,:;-\r\n"
quotes = ['\"', '“', '”', "'"]
for i, c in enumerate(text + "\n"):
if s == 0 and c in quotes and pc in seps:
start = i
s = 1
q = c
elif s == 1 and matching(c, q):
s = 2
elif s == 2:
if c in seps:
ranges.append((start+1, i-1))
start = None
s = 0
else:
s = 1
pc = c
return ranges
def position_in_ranges(ranges, position):
for start, end in ranges:
if start <= position < end:
return True
return False
return position_in_ranges(find_ranges(text), position)
def detector_50_Cent(text):
"""Determine whether 50 Cent is a topic."""
keywords = [
"50 Cent",
"rap",
"hip hop",
"Curtis James Jackson III",
"Curtis Jackson",
"Eminem",
"Dre",
"Get Rich or Die Tryin'",
"G-Unit",
"Street King Immortal",
"In da Club",
"Interscope",
]
num_keywords = sum(word in text for word in keywords)
return ("50 Cent", float(num_keywords > 2))
def topics(text):
"""Return a list of topics."""
detectors = [
detector_50_Cent
]
ts = []
for detector in detectors:
ts.append(detector(text))
return [t[0] for t in ts if t[1] > 0.95]
def context(text, position, level="paragraph"):
"""Get sentence or paragraph that surrounds the given position."""
if level == "sentence":
pass
elif level == "paragraph":
pass
return ""
| bsd-3-clause | c1e2b84a91f0ae83f881fb00ee799f60 | 27.291417 | 79 | 0.55496 | 3.78276 | false | false | false | false |
amperser/proselint | proselint/checks/misc/capitalization.py | 1 | 2517 | """Incorrect capitalization.
---
layout: post
source: Garner's Modern American Usage
source_url: http://bit.ly/1T4alrY
title: incorrect captalization
date: 2014-06-10 12:31:19
categories: writing
---
Incorrect capitalization.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "misc.captalization"
msg = "Incorrect capitalization. '{}' is the preferred form."
list = [
["Stone Age", ["stone age"]],
["space age", ["Space Age"]],
["the American West", ["the American west"]],
["Mother Nature", ["mother nature"]],
]
return preferred_forms_check(text, list, err, msg, ignore_case=False)
# @memoize
# def check_seasons(text):
# """Suggest the preferred forms."""
# err = "MAU102"
# msg = "Seasons shouldn't be capitalized. '{}' is the preferred form."
# list = [
# # ["winter", ["Winter"]],
# # ["fall", ["Fall"]],
# # ["summer", ["Summer"]],
# # ["spring", ["Spring"]],
# ]
# return preferred_forms_check(text, list, err, msg, ignore_case=False)
@memoize
def check_months(text):
"""Suggest the preferred forms."""
err = "MAU102"
msg = "Months should be capitalized. '{}' is the preferred form."
list = [
["January", ["january"]],
["February", ["february"]],
# ["March", ["march"]],
["April", ["april"]],
# ["May", ["may"]],
["June", ["june"]],
["July", ["july"]],
["August", ["august"]],
["September", ["september"]],
["October", ["october"]],
["November", ["november"]],
["December", ["december"]],
]
return preferred_forms_check(text, list, err, msg, ignore_case=False)
@memoize
def check_days(text):
"""Suggest the preferred forms."""
err = "MAU102"
msg = "Days of the week should be capitalized. '{}' is the preferred form."
list = [
["Monday", ["monday"]],
["Tuesday", ["tuesday"]],
["Wednesday", ["wednesday"]],
["Thursday", ["thursday"]],
["Friday", ["friday"]],
["Saturday", ["saturday"]],
["Sunday", ["sunday"]],
]
return preferred_forms_check(text, list, err, msg, ignore_case=False)
| bsd-3-clause | 0ba58ffba33fa19888cddc90d6e147ad | 26.358696 | 79 | 0.501788 | 3.500695 | false | false | false | false |
ros-infrastructure/rosdep | src/rosdep2/platforms/openembedded.py | 1 | 3382 | # Copyright (c) 2019, LG Electronics, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Andre Rosa/andre.rosa@lge.com
import subprocess
from rospkg.os_detect import OS_OPENEMBEDDED, OsDetect
from ..installers import PackageManagerInstaller
OPKG_INSTALLER = 'opkg'
def register_installers(context):
context.set_installer(OPKG_INSTALLER, OpkgInstaller())
def register_platforms(context):
register_oe(context)
def register_oe(context):
context.add_os_installer_key(OS_OPENEMBEDDED, OPKG_INSTALLER)
context.set_default_os_installer_key(OS_OPENEMBEDDED, lambda self: OPKG_INSTALLER)
context.set_os_version_type(OS_OPENEMBEDDED, OsDetect.get_codename)
def opkg_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
NOTE: These are stubs currently and will be filled after semantics are fully defined.
:param pkgs: list of package names, optionally followed by a fixed version (`foo=3.0`)
:param exec_fn: function to execute Popen and read stdout (for testing)
:return: list elements in *pkgs* that were found installed on the system
"""
raise NotImplementedError("opkg_detect is not implemented yet")
class OpkgInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on oe systems.
NOTE: These are stubs currently and will be filled after semantics are fully defined.
"""
def __init__(self):
super(OpkgInstaller, self).__init__(opkg_detect)
def get_version_strings(self):
output = subprocess.check_output(['opkg', '--version'])
version = output.splitlines()[0].split(b' ')[2].decode()
return [('opkg {}').format(version)]
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
raise NotImplementedError('get_install_command is not implemented yet')
| bsd-3-clause | 9ed9cfa30d9dde013b7530f2ad9a8b4c | 42.922078 | 92 | 0.743347 | 4.154791 | false | false | false | false |
ros-infrastructure/rosdep | src/rosdep2/__init__.py | 1 | 5238 | # Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/tfoote@willowgarage.com, Ken Conley/kwc@willowgarage.com
"""
rosdep library and command-line tool
"""
from __future__ import print_function
from ._version import __version__
import sys
from .installers import InstallerContext, Installer, \
PackageManagerInstaller
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, \
InvalidData, DownloadFailure
from .model import RosdepDatabase, RosdepDatabaseEntry
from .lookup import RosdepDefinition, RosdepView, RosdepLookup, \
ResolutionError
from .loader import RosdepLoader
# don't let import error take down code as when attempting to compute version number
try:
from .rospkg_loader import RosPkgLoader
except ImportError:
print('Cannot import rospkg, rosdep will not function properly',
file=sys.stderr)
def create_default_installer_context(verbose=False):
from .platforms import alpine
from .platforms import arch
from .platforms import cygwin
from .platforms import debian
from .platforms import gentoo
from .platforms import nix
from .platforms import openembedded
from .platforms import opensuse
from .platforms import osx
from .platforms import pip
from .platforms import npm
from .platforms import gem
from .platforms import redhat
from .platforms import freebsd
from .platforms import slackware
from .platforms import source
platform_mods = [alpine, arch, cygwin, debian, gentoo, nix, openembedded, opensuse, osx, redhat, slackware, freebsd]
installer_mods = [source, pip, gem, npm] + platform_mods
context = InstallerContext()
context.set_verbose(verbose)
# setup installers
for m in installer_mods:
if verbose:
print('registering installers for %s' % (m.__name__))
m.register_installers(context)
# setup platforms
for m in platform_mods:
if verbose:
print('registering platforms for %s' % (m.__name__))
m.register_platforms(context)
return context
from . import gbpdistro_support # noqa
gbpdistro_support.create_default_installer_context = create_default_installer_context
# TODO: this was partially abstracted from main() for another library,
# but it turned out to be unnecessary. Not sure it's worth maintaining
# separately, especially in the top-level module.
def get_default_installer(installer_context=None, verbose=False):
"""
Based on the active OS and installer context configuration, get
the installer to use and the necessary configuration state
(installer keys, OS name/version).
:returns: installer, installer_keys, default_key, os_name, os_version.
"""
if installer_context is None:
installer_context = create_default_installer_context(verbose=verbose)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
return installer, installer_keys, default_key, os_name, os_version
__all__ = [
'InstallerContext', 'Installer', 'PackageManagerInstaller',
'RosdepInternalError', 'InstallFailed', 'UnsupportedOs', 'InvalidData',
'DownloadFailure',
'RosdepDatabase', 'RosdepDatabaseEntry',
'RosdepDefinition', 'RosdepView', 'RosdepLookup', 'ResolutionError',
'RosdepLoader', 'RosPkgLoader',
'get_default_installer',
'create_default_installer_context',
]
| bsd-3-clause | c29849f2f35ad8fd169ee7aeec0d9617 | 38.089552 | 120 | 0.737495 | 4.092188 | false | false | false | false |
ros-infrastructure/rosdep | src/rosdep2/platforms/gentoo.py | 2 | 4726 | # Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author Murph Finnicum/murph@murph.cc
# A word on atoms
# We'll be using 'atoms' instead of 'packages' for the majority of the gentoo installer.
# Atoms can specify a package version (either exactly, or min/max version), flags it has
# to be built with, and even repositories it has to come from
#
# Here are some valid atoms and their meanings:
# sed // A package named 'sed'
# sys-apps/sed // sed from the category 'sys-apps'. There can be collisions otherwise.
# sys-apps/sed::gentoo // sed from the category 'sys-apps' and the repository 'gentoo' (the default).
# >=sys-apps/sed-4 // sed of at least version 4
# sed[static,-nls] // sed built the static USE flag and withou the nls one
import os
from rospkg.os_detect import OS_GENTOO
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
PORTAGE_INSTALLER = 'portage'
def register_installers(context):
context.set_installer(PORTAGE_INSTALLER, PortageInstaller())
def register_platforms(context):
context.add_os_installer_key(OS_GENTOO, PORTAGE_INSTALLER)
context.add_os_installer_key(OS_GENTOO, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_GENTOO, lambda self: PORTAGE_INSTALLER)
# Determine whether an atom is already satisfied
def portage_detect_single(atom, exec_fn=read_stdout):
"""
Check if a given atom is installed.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
std_out = exec_fn(['portageq', 'match', '/', atom])
# TODO consdier checking the name of the package returned
# Also, todo, figure out if just returning true if two packages are returned is cool..
return len(std_out) >= 1
def portage_detect(atoms, exec_fn=read_stdout):
"""
Given a list of atoms, return a list of which are already installed.
:param exec_fn: function to execute Popen and read stdout (for testing)
"""
# This is for testing, to make sure they're always checked in the same order
# TODO: make testing better to not need this
if isinstance(atoms, list):
atoms.sort()
return [a for a in atoms if portage_detect_single(a, exec_fn)]
# Check portage and needed tools for existence and compatibility
def portage_available():
if not os.path.exists('/usr/bin/portageq'):
return False
if not os.path.exists('/usr/bin/emerge'):
return False
# We only use standard, defined portage features.
# They work in all released versions of portage, and should work in
# future versionf for a long time to come.
# but .. TODO: Check versions
return True
class PortageInstaller(PackageManagerInstaller):
def __init__(self):
super(PortageInstaller, self).__init__(portage_detect)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
atoms = self.get_packages_to_install(resolved, reinstall=reinstall)
cmd = self.elevate_priv(['emerge'])
if not atoms:
return []
if interactive:
cmd.append('-a')
cmd.extend(atoms)
return [cmd]
| bsd-3-clause | 662116ae6647590292f9b7f4949e3ebb | 35.921875 | 101 | 0.720694 | 3.938333 | false | false | false | false |
ros-infrastructure/rosdep | src/rosdep2/rep3.py | 1 | 3213 | # Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import yaml
import warnings
from .core import DownloadFailure
from .rosdistrohelper import PreRep137Warning
from .url_utils import urlopen_gzip
# location of targets file for processing gbpdistro files
REP3_TARGETS_URL = 'https://raw.githubusercontent.com/ros/rosdistro/master/releases/targets.yaml'
# seconds to wait before aborting download of gbpdistro data
DOWNLOAD_TIMEOUT = 15.0
def download_targets_data(targets_url=None):
"""
Download REP 3 targets file and unmarshal from YAML.
DEPRECATED: this function is deprecated. List of targets should be obtained
from the rosdistro module.
The body of this function is an example.
:param target_url: override URL of platform targets file. Defaults
to ``REP3_TARGETS_URL``.
:raises: :exc:`DownloadFailure`
:raises: :exc:`InvalidData` If targets file does not pass cursory validation checks.
"""
warnings.warn('deprecated, use rosdistro instead', PreRep137Warning)
if targets_url is None:
targets_url = REP3_TARGETS_URL
try:
f = urlopen_gzip(targets_url, timeout=DOWNLOAD_TIMEOUT)
text = f.read()
f.close()
targets_data = yaml.safe_load(text)
except Exception as e:
raise DownloadFailure('Failed to download target platform data for gbpdistro:\n\t%s' % (str(e)))
if type(targets_data) == list:
# convert to dictionary
new_targets_data = {}
for t in targets_data:
platform = list(t.keys())[0]
new_targets_data[platform] = t[platform]
targets_data = new_targets_data
return targets_data
| bsd-3-clause | aef17b0d4769f393dd271b2f11bfb3f5 | 43.625 | 104 | 0.724556 | 4.28972 | false | false | false | false |
mozilla/make.mozilla.org | vendor-local/lib/python/chardet/constants.py | 238 | 1484 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
import __builtin__
if not hasattr(__builtin__, 'False'):
False = 0
True = 1
else:
False = __builtin__.False
True = __builtin__.True
| bsd-3-clause | acd5503abf95a1a9db4812507159a89b | 30.574468 | 69 | 0.68531 | 4.156863 | false | false | false | false |
mozilla/make.mozilla.org | vendor-local/lib/python/celery/utils/compat.py | 14 | 8928 | # -*- coding: utf-8 -*-
"""
celery.utils.compat
~~~~~~~~~~~~~~~~~~~
Backward compatible implementations of features
only available in newer Python versions.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
############## py3k #########################################################
import sys
try:
reload = reload # noqa
except NameError:
from imp import reload # noqa
try:
from UserList import UserList # noqa
except ImportError:
from collections import UserList # noqa
try:
from UserDict import UserDict # noqa
except ImportError:
from collections import UserDict # noqa
if sys.version_info >= (3, 0):
from io import StringIO, BytesIO
from .encoding import bytes_to_str
class WhateverIO(StringIO):
def write(self, data):
StringIO.write(self, bytes_to_str(data))
else:
try:
from cStringIO import StringIO # noqa
except ImportError:
from StringIO import StringIO # noqa
BytesIO = WhateverIO = StringIO # noqa
############## collections.OrderedDict ######################################
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict # noqa
############## logging.LoggerAdapter ########################################
import logging
try:
import multiprocessing
except ImportError:
multiprocessing = None # noqa
import sys
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError("Unknown level: %r" % level)
rv = logging._levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
class _CompatLoggerAdapter(object):
def __init__(self, logger, extra):
self.logger = logger
self.extra = extra
def setLevel(self, level):
self.logger.level = _checkLevel(level)
def process(self, msg, kwargs):
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
self.log(logging.DEBUG, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.log(logging.INFO, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.log(logging.WARNING, msg, *args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
self.log(logging.ERROR, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
kwargs.setdefault("exc_info", 1)
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
self.log(logging.CRITICAL, msg, *args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
if self.logger.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self._log(level, msg, args, **kwargs)
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None):
rv = logging.LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key, value in extra.items():
if key in ("message", "asctime") or key in rv.__dict__:
raise KeyError(
"Attempt to override %r in LogRecord" % key)
rv.__dict__[key] = value
if multiprocessing is not None:
rv.processName = multiprocessing.current_process()._name
else:
rv.processName = ""
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
defcaller = "(unknown file)", 0, "(unknown function)"
if logging._srcfile:
# IronPython doesn't track Python frames, so findCaller
# throws an exception on some versions of IronPython.
# We trap it here so that IronPython can use logging.
try:
fn, lno, func = self.logger.findCaller()
except ValueError:
fn, lno, func = defcaller
else:
fn, lno, func = defcaller
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.logger.name, level, fn, lno, msg,
args, exc_info, func, extra)
self.logger.handle(record)
def isEnabledFor(self, level):
return self.logger.isEnabledFor(level)
def addHandler(self, hdlr):
self.logger.addHandler(hdlr)
def removeHandler(self, hdlr):
self.logger.removeHandler(hdlr)
@property
def level(self):
return self.logger.level
try:
from logging import LoggerAdapter
except ImportError:
LoggerAdapter = _CompatLoggerAdapter # noqa
############## itertools.zip_longest #######################################
try:
from itertools import izip_longest as zip_longest
except ImportError:
import itertools
def zip_longest(*args, **kwds): # noqa
fillvalue = kwds.get("fillvalue")
def sentinel(counter=([fillvalue] * (len(args) - 1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers)
for it in args]
try:
for tup in itertools.izip(*iters):
yield tup
except IndexError:
pass
############## itertools.chain.from_iterable ################################
from itertools import chain
def _compat_chain_from_iterable(iterables):
for it in iterables:
for element in it:
yield element
try:
chain_from_iterable = getattr(chain, "from_iterable")
except AttributeError:
chain_from_iterable = _compat_chain_from_iterable
############## logging.handlers.WatchedFileHandler ##########################
import os
from stat import ST_DEV, ST_INO
import platform as _platform
if _platform.system() == "Windows":
#since windows doesn't go with WatchedFileHandler use FileHandler instead
WatchedFileHandler = logging.FileHandler
else:
try:
from logging.handlers import WatchedFileHandler
except ImportError:
class WatchedFileHandler(logging.FileHandler): # noqa
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, *args, **kwargs):
logging.FileHandler.__init__(self, *args, **kwargs)
if not os.path.exists(self.baseFilename):
self.dev, self.ino = -1, -1
else:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
if not os.path.exists(self.baseFilename):
stat = None
changed = 1
else:
stat = os.stat(self.baseFilename)
changed = ((stat[ST_DEV] != self.dev) or
(stat[ST_INO] != self.ino))
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
| bsd-3-clause | b90847f02bac3ada1577d096877413f9 | 32.189591 | 79 | 0.566756 | 4.376471 | false | false | false | false |
mozilla/make.mozilla.org | vendor-local/lib/python/requests/packages/urllib3/filepost.py | 41 | 2663 | # urllib3/filepost.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import mimetypes
try:
from mimetools import choose_boundary
except ImportError:
from .packages.mimetools_choose_boundary import choose_boundary
from io import BytesIO
from .packages import six
from .packages.six import b
writer = codecs.lookup('utf-8')[3]
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def iter_fields(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data mime format.
:param fields:
Dictionary of fields or list of (key, value) field tuples. The key is
treated as the field name, and the value as the body of the form-data
bytes. If the value is a tuple of two elements, then the first element
is treated as the filename of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
filename, data = value
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(get_content_type(filename))))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'Content-Type: text/plain\r\n\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| bsd-3-clause | 1702de564debac710ef2ac6cad6689a3 | 29.261364 | 80 | 0.623357 | 3.887591 | false | false | false | false |
mozilla/make.mozilla.org | vendor-local/lib/python/south/modelsinspector.py | 1 | 17995 | """
Like the old south.modelsparser, but using introspection where possible
rather than direct inspection of models.py.
"""
import datetime
import re
import decimal
from south.utils import get_attribute, auto_through
from django.db import models
from django.db.models.base import ModelBase, Model
from django.db.models.fields import NOT_PROVIDED
from django.db.models import CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING
from django.conf import settings
from django.utils.functional import Promise
from django.contrib.contenttypes import generic
from django.utils.datastructures import SortedDict
from django.utils import datetime_safe
NOISY = False
try:
from django.utils import timezone
except ImportError:
timezone = False
def convert_on_delete_handler(value):
django_db_models_module = 'models' # relative to standard import 'django.db'
if django_db_models_module:
if value in (CASCADE, PROTECT, DO_NOTHING, SET_DEFAULT):
# straightforward functions
return '%s.%s' % (django_db_models_module, value.__name__)
else:
# This is totally dependent on the implementation of django.db.models.deletion.SET
func_name = getattr(value, '__name__', None)
if func_name == 'set_on_delete':
# we must inspect the function closure to see what parameters were passed in
closure_contents = value.func_closure[0].cell_contents
if closure_contents is None:
return "%s.SET_NULL" % (django_db_models_module)
# simple function we can perhaps cope with:
elif hasattr(closure_contents, '__call__'):
# module_name = getattr(closure_contents, '__module__', None)
# inner_func_name = getattr(closure_contents, '__name__', None)
# if inner_func_name:
# TODO there is no way of checking that module_name matches the
# model file, which is the only code that will be imported in
# the Fake ORM. Any other functions won't be available.
# TODO this doesn't work anyway yet as even the app's models
# file is not imported, contrary to the coments in
# orm.LazyFakeORM.eval_in_context, which implies that
# migrations are expected to import that.
# return "%s.SET(%s)" % (django_db_models_module, inner_func_name)
raise ValueError("Function for on_delete could not be serialized.")
else:
# an actual value rather than a sentinel function - insanity
raise ValueError("on_delete=SET with a model instance is not supported.")
raise ValueError("%s was not recognized as a valid model deletion handler. Possible values: %s." % (value, ', '.join(f.__name__ for f in (CASCADE, PROTECT, SET, SET_NULL, SET_DEFAULT, DO_NOTHING))))
# Gives information about how to introspect certain fields.
# This is a list of triples; the first item is a list of fields it applies to,
# (note that isinstance is used, so superclasses are perfectly valid here)
# the second is a list of positional argument descriptors, and the third
# is a list of keyword argument descriptors.
# Descriptors are of the form:
# [attrname, options]
# Where attrname is the attribute on the field to get the value from, and options
# is an optional dict.
#
# The introspector uses the combination of all matching entries, in order.
introspection_details = [
(
(models.Field, ),
[],
{
"null": ["null", {"default": False}],
"blank": ["blank", {"default": False, "ignore_if":"primary_key"}],
"primary_key": ["primary_key", {"default": False}],
"max_length": ["max_length", {"default": None}],
"unique": ["_unique", {"default": False}],
"db_index": ["db_index", {"default": False}],
"default": ["default", {"default": NOT_PROVIDED, "ignore_dynamics": True}],
"db_column": ["db_column", {"default": None}],
"db_tablespace": ["db_tablespace", {"default": settings.DEFAULT_INDEX_TABLESPACE}],
},
),
(
(models.ForeignKey, models.OneToOneField),
[],
{
"to": ["rel.to", {}],
"to_field": ["rel.field_name", {"default_attr": "rel.to._meta.pk.name"}],
"related_name": ["rel.related_name", {"default": None}],
"db_index": ["db_index", {"default": True}],
"on_delete": ["rel.on_delete", {"default": CASCADE, "is_django_function": True, "converter": convert_on_delete_handler, }],
},
),
(
(models.ManyToManyField,),
[],
{
"to": ["rel.to", {}],
"symmetrical": ["rel.symmetrical", {"default": True}],
"related_name": ["rel.related_name", {"default": None}],
"db_table": ["db_table", {"default": None}],
# TODO: Kind of ugly to add this one-time-only option
"through": ["rel.through", {"ignore_if_auto_through": True}],
},
),
(
(models.DateField, models.TimeField),
[],
{
"auto_now": ["auto_now", {"default": False}],
"auto_now_add": ["auto_now_add", {"default": False}],
},
),
(
(models.DecimalField, ),
[],
{
"max_digits": ["max_digits", {"default": None}],
"decimal_places": ["decimal_places", {"default": None}],
},
),
(
(models.SlugField, ),
[],
{
"db_index": ["db_index", {"default": True}],
},
),
(
(models.BooleanField, ),
[],
{
"default": ["default", {"default": NOT_PROVIDED, "converter": bool}],
"blank": ["blank", {"default": True, "ignore_if":"primary_key"}],
},
),
(
(models.FilePathField, ),
[],
{
"path": ["path", {"default": ''}],
"match": ["match", {"default": None}],
"recursive": ["recursive", {"default": False}],
},
),
(
(generic.GenericRelation, ),
[],
{
"to": ["rel.to", {}],
"symmetrical": ["rel.symmetrical", {"default": True}],
"object_id_field": ["object_id_field_name", {"default": "object_id"}],
"content_type_field": ["content_type_field_name", {"default": "content_type"}],
"blank": ["blank", {"default": True}],
},
),
]
# Regexes of allowed field full paths
allowed_fields = [
"^django\.db",
"^django\.contrib\.contenttypes\.generic",
"^django\.contrib\.localflavor",
]
# Regexes of ignored fields (custom fields which look like fields, but have no column behind them)
ignored_fields = [
"^django\.contrib\.contenttypes\.generic\.GenericRelation",
"^django\.contrib\.contenttypes\.generic\.GenericForeignKey",
]
# Similar, but for Meta, so just the inner level (kwds).
meta_details = {
"db_table": ["db_table", {"default_attr_concat": ["%s_%s", "app_label", "module_name"]}],
"db_tablespace": ["db_tablespace", {"default": settings.DEFAULT_TABLESPACE}],
"unique_together": ["unique_together", {"default": []}],
"ordering": ["ordering", {"default": []}],
"proxy": ["proxy", {"default": False, "ignore_missing": True}],
}
# 2.4 compatability
any = lambda x: reduce(lambda y, z: y or z, x, False)
def add_introspection_rules(rules=[], patterns=[]):
"Allows you to add some introspection rules at runtime, e.g. for 3rd party apps."
assert isinstance(rules, (list, tuple))
assert isinstance(patterns, (list, tuple))
allowed_fields.extend(patterns)
introspection_details.extend(rules)
def add_ignored_fields(patterns):
"Allows you to add some ignore field patterns."
assert isinstance(patterns, (list, tuple))
ignored_fields.extend(patterns)
def can_ignore(field):
"""
Returns True if we know for certain that we can ignore this field, False
otherwise.
"""
full_name = "%s.%s" % (field.__class__.__module__, field.__class__.__name__)
for regex in ignored_fields:
if re.match(regex, full_name):
return True
return False
def can_introspect(field):
"""
Returns True if we are allowed to introspect this field, False otherwise.
('allowed' means 'in core'. Custom fields can declare they are introspectable
by the default South rules by adding the attribute _south_introspects = True.)
"""
# Check for special attribute
if hasattr(field, "_south_introspects") and field._south_introspects:
return True
# Check it's an introspectable field
full_name = "%s.%s" % (field.__class__.__module__, field.__class__.__name__)
for regex in allowed_fields:
if re.match(regex, full_name):
return True
return False
def matching_details(field):
"""
Returns the union of all matching entries in introspection_details for the field.
"""
our_args = []
our_kwargs = {}
for classes, args, kwargs in introspection_details:
if any([isinstance(field, x) for x in classes]):
our_args.extend(args)
our_kwargs.update(kwargs)
return our_args, our_kwargs
class IsDefault(Exception):
"""
Exception for when a field contains its default value.
"""
def get_value(field, descriptor):
"""
Gets an attribute value from a Field instance and formats it.
"""
attrname, options = descriptor
# If the options say it's not a attribute name but a real value, use that.
if options.get('is_value', False):
value = attrname
else:
try:
value = get_attribute(field, attrname)
except AttributeError:
if options.get("ignore_missing", False):
raise IsDefault
else:
raise
# Lazy-eval functions get eval'd.
if isinstance(value, Promise):
value = unicode(value)
# If the value is the same as the default, omit it for clarity
if "default" in options and value == options['default']:
raise IsDefault
# If there's an ignore_if, use it
if "ignore_if" in options:
if get_attribute(field, options['ignore_if']):
raise IsDefault
# If there's an ignore_if_auto_through which is True, use it
if options.get("ignore_if_auto_through", False):
if auto_through(field):
raise IsDefault
# Some default values need to be gotten from an attribute too.
if "default_attr" in options:
default_value = get_attribute(field, options['default_attr'])
if value == default_value:
raise IsDefault
# Some are made from a formatting string and several attrs (e.g. db_table)
if "default_attr_concat" in options:
format, attrs = options['default_attr_concat'][0], options['default_attr_concat'][1:]
default_value = format % tuple(map(lambda x: get_attribute(field, x), attrs))
if value == default_value:
raise IsDefault
# Clean and return the value
return value_clean(value, options)
def value_clean(value, options={}):
"Takes a value and cleans it up (so e.g. it has timezone working right)"
# Lazy-eval functions get eval'd.
if isinstance(value, Promise):
value = unicode(value)
# Callables get called.
if not options.get('is_django_function', False) and callable(value) and not isinstance(value, ModelBase):
# Datetime.datetime.now is special, as we can access it from the eval
# context (and because it changes all the time; people will file bugs otherwise).
if value == datetime.datetime.now:
return "datetime.datetime.now"
elif value == datetime.datetime.utcnow:
return "datetime.datetime.utcnow"
elif value == datetime.date.today:
return "datetime.date.today"
# In case we use Django's own now function, revert to datetime's
# original one since we'll deal with timezones on our own.
elif timezone and value == timezone.now:
return "datetime.datetime.now"
# All other callables get called.
value = value()
# Models get their own special repr()
if isinstance(value, ModelBase):
# If it's a proxy model, follow it back to its non-proxy parent
if getattr(value._meta, "proxy", False):
value = value._meta.proxy_for_model
return "orm['%s.%s']" % (value._meta.app_label, value._meta.object_name)
# As do model instances
if isinstance(value, Model):
if options.get("ignore_dynamics", False):
raise IsDefault
return "orm['%s.%s'].objects.get(pk=%r)" % (value.__class__._meta.app_label, value.__class__._meta.object_name, value.pk)
# Make sure Decimal is converted down into a string
if isinstance(value, decimal.Decimal):
value = str(value)
# in case the value is timezone aware
datetime_types = (
datetime.datetime,
datetime.time,
datetime_safe.datetime,
)
if (timezone and isinstance(value, datetime_types) and
getattr(settings, 'USE_TZ', False) and
value is not None and timezone.is_aware(value)):
default_timezone = timezone.get_default_timezone()
value = timezone.make_naive(value, default_timezone)
# datetime_safe has an improper repr value
if isinstance(value, datetime_safe.datetime):
value = datetime.datetime(*value.utctimetuple()[:7])
# converting a date value to a datetime to be able to handle
# timezones later gracefully
elif isinstance(value, (datetime.date, datetime_safe.date)):
value = datetime.datetime(*value.timetuple()[:3])
# Now, apply the converter func if there is one
if "converter" in options:
value = options['converter'](value)
# Return the final value
if options.get('is_django_function', False):
return value
else:
return repr(value)
def introspector(field):
"""
Given a field, introspects its definition triple.
"""
arg_defs, kwarg_defs = matching_details(field)
args = []
kwargs = {}
# For each argument, use the descriptor to get the real value.
for defn in arg_defs:
try:
args.append(get_value(field, defn))
except IsDefault:
pass
for kwd, defn in kwarg_defs.items():
try:
kwargs[kwd] = get_value(field, defn)
except IsDefault:
pass
return args, kwargs
def get_model_fields(model, m2m=False):
"""
Given a model class, returns a dict of {field_name: field_triple} defs.
"""
field_defs = SortedDict()
inherited_fields = {}
# Go through all bases (that are themselves models, but not Model)
for base in model.__bases__:
if hasattr(base, '_meta') and issubclass(base, models.Model):
if not base._meta.abstract:
# Looks like we need their fields, Ma.
inherited_fields.update(get_model_fields(base))
# Now, go through all the fields and try to get their definition
source = model._meta.local_fields[:]
if m2m:
source += model._meta.local_many_to_many
for field in source:
# Can we ignore it completely?
if can_ignore(field):
continue
# Does it define a south_field_triple method?
if hasattr(field, "south_field_triple"):
if NOISY:
print " ( Nativing field: %s" % field.name
field_defs[field.name] = field.south_field_triple()
# Can we introspect it?
elif can_introspect(field):
# Get the full field class path.
field_class = field.__class__.__module__ + "." + field.__class__.__name__
# Run this field through the introspector
args, kwargs = introspector(field)
# Workaround for Django bug #13987
if model._meta.pk.column == field.column and 'primary_key' not in kwargs:
kwargs['primary_key'] = True
# That's our definition!
field_defs[field.name] = (field_class, args, kwargs)
# Shucks, no definition!
else:
if NOISY:
print " ( Nodefing field: %s" % field.name
field_defs[field.name] = None
# If they've used the horrific hack that is order_with_respect_to, deal with
# it.
if model._meta.order_with_respect_to:
field_defs['_order'] = ("django.db.models.fields.IntegerField", [], {"default": "0"})
return field_defs
def get_model_meta(model):
"""
Given a model class, will return the dict representing the Meta class.
"""
# Get the introspected attributes
meta_def = {}
for kwd, defn in meta_details.items():
try:
meta_def[kwd] = get_value(model._meta, defn)
except IsDefault:
pass
# Also, add on any non-abstract model base classes.
# This is called _ormbases as the _bases variable was previously used
# for a list of full class paths to bases, so we can't conflict.
for base in model.__bases__:
if hasattr(base, '_meta') and issubclass(base, models.Model):
if not base._meta.abstract:
# OK, that matches our terms.
if "_ormbases" not in meta_def:
meta_def['_ormbases'] = []
meta_def['_ormbases'].append("%s.%s" % (
base._meta.app_label,
base._meta.object_name,
))
return meta_def
# Now, load the built-in South introspection plugins
import south.introspection_plugins
| bsd-3-clause | ff2080481c17d1e4c795d015fb893b33 | 37.698925 | 202 | 0.593443 | 4.091633 | false | false | false | false |
mozilla/make.mozilla.org | make_mozilla/projects/admin.py | 1 | 1204 | from django.contrib.gis import admin
from make_mozilla.projects import models
class ProjectStepInline(admin.TabularInline):
model = models.ProjectStep
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'featured', 'contributor', )
list_editable = ('featured', )
list_filter = ('contributor', 'difficulties', 'topics', 'tools', 'skills', )
exclude = ('url_hash',)
prepopulated_fields = {'slug': ('name',),}
# inlines = [ProjectStepInline,]
class TagAdmin(admin.ModelAdmin):
prepopulated_fields = {'value':('label',),}
fieldsets = (
(None, {
'fields': ('label', 'value', ),
}),
('Advanced', {
'classes': ('collapse', ),
'fields': ('index', ),
})
)
class TopicAdmin(TagAdmin):
pass
class DifficultyAdmin(TagAdmin):
pass
class SkillAdmin(TagAdmin):
pass
class ContributorAdmin(admin.ModelAdmin):
pass
admin.site.register(models.Project, ProjectAdmin)
admin.site.register(models.Topic, TopicAdmin)
admin.site.register(models.Difficulty, DifficultyAdmin)
admin.site.register(models.Skill, SkillAdmin)
admin.site.register(models.Contributor, ContributorAdmin)
| bsd-3-clause | 3ad123b71d2ad5228ad1bc2d00ad1038 | 22.607843 | 80 | 0.655316 | 3.716049 | false | false | false | false |
mozilla/make.mozilla.org | vendor-local/lib/python/celery/task/http.py | 14 | 6477 | # -*- coding: utf-8 -*-
"""
celery.task.http
~~~~~~~~~~~~~~~~
Task webhooks implementation.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
import urllib2
from urllib import urlencode
from urlparse import urlparse
try:
from urlparse import parse_qsl
except ImportError: # pragma: no cover
from cgi import parse_qsl # noqa
from anyjson import deserialize
from .. import __version__ as celery_version
from .base import Task as BaseTask
GET_METHODS = frozenset(["GET", "HEAD"])
class InvalidResponseError(Exception):
"""The remote server gave an invalid response."""
class RemoteExecuteError(Exception):
"""The remote task gave a custom error."""
class UnknownStatusError(InvalidResponseError):
"""The remote server gave an unknown status."""
def maybe_utf8(value):
"""Encode to utf-8, only if the value is Unicode."""
if isinstance(value, unicode):
return value.encode("utf-8")
return value
if sys.version_info >= (3, 0):
def utf8dict(tup):
if not isinstance(tup, dict):
return dict(tup)
return tup
else:
def utf8dict(tup): # noqa
"""With a dict's items() tuple return a new dict with any utf-8
keys/values encoded."""
return dict((key.encode("utf-8"), maybe_utf8(value))
for key, value in tup)
def extract_response(raw_response):
"""Extract the response text from a raw JSON response."""
if not raw_response:
raise InvalidResponseError("Empty response")
try:
payload = deserialize(raw_response)
except ValueError, exc:
raise InvalidResponseError, InvalidResponseError(
str(exc)), sys.exc_info()[2]
status = payload["status"]
if status == "success":
return payload["retval"]
elif status == "failure":
raise RemoteExecuteError(payload.get("reason"))
else:
raise UnknownStatusError(str(status))
class MutableURL(object):
"""Object wrapping a Uniform Resource Locator.
Supports editing the query parameter list.
You can convert the object back to a string, the query will be
properly urlencoded.
Examples
>>> url = URL("http://www.google.com:6580/foo/bar?x=3&y=4#foo")
>>> url.query
{'x': '3', 'y': '4'}
>>> str(url)
'http://www.google.com:6580/foo/bar?y=4&x=3#foo'
>>> url.query["x"] = 10
>>> url.query.update({"George": "Costanza"})
>>> str(url)
'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo'
"""
def __init__(self, url):
self.parts = urlparse(url)
self.query = dict(parse_qsl(self.parts[4]))
def __str__(self):
scheme, netloc, path, params, query, fragment = self.parts
query = urlencode(utf8dict(self.query.items()))
components = [scheme + "://", netloc, path or "/",
";%s" % params if params else "",
"?%s" % query if query else "",
"#%s" % fragment if fragment else ""]
return "".join(filter(None, components))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
class HttpDispatch(object):
"""Make task HTTP request and collect the task result.
:param url: The URL to request.
:param method: HTTP method used. Currently supported methods are `GET`
and `POST`.
:param task_kwargs: Task keyword arguments.
:param logger: Logger used for user/system feedback.
"""
user_agent = "celery/%s" % celery_version
timeout = 5
def __init__(self, url, method, task_kwargs, logger):
self.url = url
self.method = method
self.task_kwargs = task_kwargs
self.logger = logger
def make_request(self, url, method, params):
"""Makes an HTTP request and returns the response."""
request = urllib2.Request(url, params)
for key, val in self.http_headers.items():
request.add_header(key, val)
response = urllib2.urlopen(request) # user catches errors.
return response.read()
def dispatch(self):
"""Dispatch callback and return result."""
url = MutableURL(self.url)
params = None
if self.method in GET_METHODS:
url.query.update(self.task_kwargs)
else:
params = urlencode(utf8dict(self.task_kwargs.items()))
raw_response = self.make_request(str(url), self.method, params)
return extract_response(raw_response)
@property
def http_headers(self):
headers = {"User-Agent": self.user_agent}
return headers
class HttpDispatchTask(BaseTask):
"""Task dispatching to an URL.
:keyword url: The URL location of the HTTP callback task.
:keyword method: Method to use when dispatching the callback. Usually
`GET` or `POST`.
:keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback.
.. attribute:: url
If this is set, this is used as the default URL for requests.
Default is to require the user of the task to supply the url as an
argument, as this attribute is intended for subclasses.
.. attribute:: method
If this is set, this is the default method used for requests.
Default is to require the user of the task to supply the method as an
argument, as this attribute is intended for subclasses.
"""
url = None
method = None
def run(self, url=None, method="GET", **kwargs):
url = url or self.url
method = method or self.method
logger = self.get_logger(**kwargs)
return HttpDispatch(url, method, kwargs, logger).dispatch()
class URL(MutableURL):
"""HTTP Callback URL
Supports requesting an URL asynchronously.
:param url: URL to request.
:keyword dispatcher: Class used to dispatch the request.
By default this is :class:`HttpDispatchTask`.
"""
dispatcher = HttpDispatchTask
def __init__(self, url, dispatcher=None):
super(URL, self).__init__(url)
self.dispatcher = dispatcher or self.dispatcher
def get_async(self, **kwargs):
return self.dispatcher.delay(str(self), "GET", **kwargs)
def post_async(self, **kwargs):
return self.dispatcher.delay(str(self), "POST", **kwargs)
| bsd-3-clause | 2acfab5d09c876de26c5b08a18649a84 | 28.711009 | 77 | 0.618187 | 4.048125 | false | false | false | false |
mozilla/make.mozilla.org | vendor-local/lib/python/requests/packages/urllib3/connectionpool.py | 8 | 18316 | # urllib3/connectionpool.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import socket
from socket import error as SocketError, timeout as SocketTimeout
try: # Python 3
from http.client import HTTPConnection, HTTPException
from http.client import HTTP_PORT, HTTPS_PORT
except ImportError:
from httplib import HTTPConnection, HTTPException
from httplib import HTTP_PORT, HTTPS_PORT
try: # Python 3
from queue import LifoQueue, Empty, Full
except ImportError:
from Queue import LifoQueue, Empty, Full
try: # Compiled with SSL?
HTTPSConnection = object
BaseSSLError = None
ssl = None
try: # Python 3
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError):
pass
from .request import RequestMethods
from .response import HTTPResponse
from .util import get_host, is_connection_dropped
from .exceptions import (
EmptyPoolError,
HostChangedError,
MaxRetryError,
SSLError,
TimeoutError,
)
from .packages.ssl_match_hostname import match_hostname, CertificateError
from .packages import six
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
port_by_scheme = {
'http': HTTP_PORT,
'https': HTTPS_PORT,
}
## Connection objects (extension of httplib)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs='CERT_NONE', ca_certs=None):
ssl_req_scheme = {
'CERT_NONE': ssl.CERT_NONE,
'CERT_OPTIONAL': ssl.CERT_OPTIONAL,
'CERT_REQUIRED': ssl.CERT_REQUIRED
}
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = ssl_req_scheme.get(cert_reqs) or ssl.CERT_NONE
self.ca_certs = ca_certs
def connect(self):
# Add certificate verification
sock = socket.create_connection((self.host, self.port), self.timeout)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs)
if self.ca_certs:
match_hostname(self.sock.getpeercert(), self.host)
## Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
self.host = host
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
:param timeout:
Socket timeout for each individual connection, can be a float. None
disables timeout.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to false, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
scheme = 'http'
def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1,
block=False, headers=None):
super(HTTPConnectionPool, self).__init__(host, port)
self.strict = strict
self.timeout = timeout
self.pool = self.QueueCls(maxsize)
self.block = block
self.headers = headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTP connection (%d): %s" %
(self.num_connections, self.host))
return HTTPConnection(host=self.host, port=self.port)
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.info("Resetting dropped connection: %s" % self.host)
conn.close()
except Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is discarded because we
exceeded maxsize. If connections are discarded frequently, then maxsize
should be increased.
"""
try:
self.pool.put(conn, block=False)
except Full:
# This should never happen if self.block == True
log.warning("HttpConnectionPool is full, discarding connection: %s"
% self.host)
def _make_request(self, conn, method, url, timeout=_Default,
**httplib_request_kw):
"""
Perform a request on a given httplib connection object taken from our
pool.
"""
self.num_requests += 1
if timeout is _Default:
timeout = self.timeout
conn.timeout = timeout # This only does anything in Py26+
conn.request(method, url, **httplib_request_kw)
# Set timeout
sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr.
if sock:
sock.settimeout(timeout)
httplib_response = conn.getresponse()
log.debug("\"%s %s %s\" %s %s" %
(method, url,
conn._http_vsn_str, # pylint: disable-msg=W0212
httplib_response.status, httplib_response.length))
return httplib_response
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if self.port and not port:
# Use explicit default port for comparison when none is given.
port = port_by_scheme.get(scheme)
return (url.startswith('/') or
(scheme, host, port) == (self.scheme, self.host, self.port))
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Number of retries to allow before raising a MaxRetryError exception.
:param redirect:
Automatically handle redirects (status codes 301, 302, 303, 307),
each redirect counts as a retry.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one request.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param \**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if retries < 0:
raise MaxRetryError(self, url)
if timeout is _Default:
timeout = self.timeout
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
host = "%s://%s" % (self.scheme, self.host)
if self.port:
host = "%s:%d" % (host, self.port)
raise HostChangedError(self, url, retries - 1)
conn = None
try:
# Request a connection from the queue
# (Could raise SocketError: Bad file descriptor)
conn = self._get_conn(timeout=pool_timeout)
# Make the request on the httplib connection object
httplib_response = self._make_request(conn, method, url,
timeout=timeout,
body=body, headers=headers)
# If we're going to release the connection in ``finally:``, then
# the request doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = not release_conn and conn
# Import httplib's response into our own wrapper object
response = HTTPResponse.from_httplib(httplib_response,
pool=self,
connection=response_conn,
**response_kw)
# else:
# The connection will be put back into the pool when
# ``response.release_conn()`` is called (implicitly by
# ``response.read()``)
except Empty as e:
# Timed out by queue
raise TimeoutError(self, "Request timed out. (pool_timeout=%s)" %
pool_timeout)
except SocketTimeout as e:
# Timed out by socket
raise TimeoutError(self, "Request timed out. (timeout=%s)" %
timeout)
except BaseSSLError as e:
# SSL certificate error
raise SSLError(e)
except CertificateError as e:
# Name mismatch
raise SSLError(e)
except (HTTPException, SocketError) as e:
# Connection broken, discard. It will be replaced next _get_conn().
conn = None
# This is necessary so we can access e below
err = e
finally:
if conn and release_conn:
# Put the connection back to be reused
self._put_conn(conn)
if not conn:
log.warn("Retrying (%d attempts remain) after connection "
"broken by '%r': %s" % (retries, err, url))
return self.urlopen(method, url, body, headers, retries - 1,
redirect, assert_same_host) # Try again
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
log.info("Redirecting %s -> %s" % (url, redirect_location))
return self.urlopen(method, redirect_location, body, headers,
retries - 1, redirect, assert_same_host)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:httplib.HTTPSConnection`.
The ``key_file``, ``cert_file``, ``cert_reqs``, and ``ca_certs`` parameters
are only used if :mod:`ssl` is available and are fed into
:meth:`ssl.wrap_socket` to upgrade the connection socket into an SSL socket.
"""
scheme = 'https'
def __init__(self, host, port=None,
strict=False, timeout=None, maxsize=1,
block=False, headers=None,
key_file=None, cert_file=None,
cert_reqs='CERT_NONE', ca_certs=None):
super(HTTPSConnectionPool, self).__init__(host, port,
strict, timeout, maxsize,
block, headers)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.info("Starting new HTTPS connection (%d): %s"
% (self.num_connections, self.host))
if not ssl: # Platform-specific: Python compiled without +ssl
if not HTTPSConnection or HTTPSConnection is object:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
return HTTPSConnection(host=self.host, port=self.port)
connection = VerifiedHTTPSConnection(host=self.host, port=self.port)
connection.set_cert(key_file=self.key_file, cert_file=self.cert_file,
cert_reqs=self.cert_reqs, ca_certs=self.ca_certs)
return connection
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example: ::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
| bsd-3-clause | 10912bd082cf8cf77538b09652e82bc5 | 34.15547 | 80 | 0.593197 | 4.552821 | false | false | false | false |
mozilla/make.mozilla.org | vendor-local/lib/python/celery/security/__init__.py | 14 | 2610 | from __future__ import absolute_import
from __future__ import with_statement
from kombu.serialization import registry
from .. import current_app
from ..exceptions import ImproperlyConfigured
from .serialization import register_auth
SSL_NOT_INSTALLED = """\
You need to install the pyOpenSSL library to use the auth serializer.
Please install by:
$ pip install pyOpenSSL
"""
SETTING_MISSING = """\
Sorry, but you have to configure the
* CELERY_SECURITY_KEY
* CELERY_SECURITY_CERTIFICATE, and the
* CELERY_SECURITY_CERT_STORE
configuration settings to use the auth serializer.
Please see the configuration reference for more information.
"""
def disable_untrusted_serializers(whitelist=None):
for name in set(registry._decoders) - set(whitelist or []):
registry.disable(name)
def setup_security(allowed_serializers=None, key=None, cert=None, store=None,
digest="sha1", serializer="json"):
"""Setup the message-signing serializer.
Disables untrusted serializers and if configured to use the ``auth``
serializer will register the auth serializer with the provided settings
into the Kombu serializer registry.
:keyword allowed_serializers: List of serializer names, or content_types
that should be exempt from being disabled.
:keyword key: Name of private key file to use.
Defaults to the :setting:`CELERY_SECURITY_KEY` setting.
:keyword cert: Name of certificate file to use.
Defaults to the :setting:`CELERY_SECURITY_CERTIFICATE` setting.
:keyword store: Directory containing certificates.
Defaults to the :setting:`CELERY_SECURITY_CERT_STORE` setting.
:keyword digest: Digest algorithm used when signing messages.
Default is ``sha1``.
:keyword serializer: Serializer used to encode messages after
they have been signed. See :setting:`CELERY_TASK_SERIALIZER` for
the serializers supported.
Default is ``json``.
"""
disable_untrusted_serializers(allowed_serializers)
conf = current_app.conf
if conf.CELERY_TASK_SERIALIZER != "auth":
return
try:
from OpenSSL import crypto # noqa
except ImportError:
raise ImproperlyConfigured(SSL_NOT_INSTALLED)
key = key or conf.CELERY_SECURITY_KEY
cert = cert or conf.CELERY_SECURITY_CERTIFICATE
store = store or conf.CELERY_SECURITY_CERT_STORE
if any(not v for v in (key, cert, store)):
raise ImproperlyConfigured(SETTING_MISSING)
with open(key) as kf:
with open(cert) as cf:
register_auth(kf.read(), cf.read(), store)
| bsd-3-clause | e18db53abe8d1702b6ad4ce6e5dc54b7 | 32.037975 | 77 | 0.709579 | 4.278689 | false | true | false | false |
ets-labs/python-dependency-injector | setup.py | 1 | 4134 | """`Dependency injector` setup script."""
import os
import re
from setuptools import setup, Extension
# Defining setup variables:
defined_macros = dict()
defined_macros["CYTHON_CLINE_IN_TRACEBACK"] = 0
# Getting description:
with open("README.rst") as readme_file:
description = readme_file.read()
# Getting requirements:
with open("requirements.txt") as requirements_file:
requirements = requirements_file.readlines()
# Getting version:
with open("src/dependency_injector/__init__.py") as init_file:
version = re.search("__version__ = \"(.*?)\"", init_file.read()).group(1)
# Adding debug options:
if os.environ.get("DEPENDENCY_INJECTOR_DEBUG_MODE") == "1":
defined_macros["CYTHON_TRACE"] = 1
defined_macros["CYTHON_TRACE_NOGIL"] = 1
defined_macros["CYTHON_CLINE_IN_TRACEBACK"] = 1
setup(name="dependency-injector",
version=version,
description="Dependency injection framework for Python",
long_description=description,
author="Roman Mogylatov",
author_email="rmogilatov@gmail.com",
maintainer="Roman Mogylatov",
maintainer_email="rmogilatov@gmail.com",
url="https://github.com/ets-labs/python-dependency-injector",
download_url="https://pypi.python.org/pypi/dependency_injector",
packages=[
"dependency_injector",
"dependency_injector.ext",
],
package_dir={
"": "src",
},
package_data={
"dependency_injector": ["*.pxd", "*.pyi", "py.typed"],
},
ext_modules=[
Extension("dependency_injector.containers",
["src/dependency_injector/containers.c"],
define_macros=list(defined_macros.items()),
extra_compile_args=["-O2"]),
Extension("dependency_injector.providers",
["src/dependency_injector/providers.c"],
define_macros=list(defined_macros.items()),
extra_compile_args=["-O2"]),
Extension("dependency_injector._cwiring",
["src/dependency_injector/_cwiring.c"],
define_macros=list(defined_macros.items()),
extra_compile_args=["-O2"]),
],
install_requires=requirements,
extras_require={
"yaml": [
"pyyaml",
],
"pydantic": [
"pydantic",
],
"flask": [
"flask",
],
"aiohttp": [
"aiohttp",
],
},
zip_safe=True,
license="BSD New",
platforms=["any"],
keywords=[
"Dependency injection",
"DI",
"Inversion of Control",
"IoC",
"Factory",
"Singleton",
"Design patterns",
"Flask",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Framework :: AsyncIO",
"Framework :: Bottle",
"Framework :: Django",
"Framework :: Flask",
"Framework :: Pylons",
"Framework :: Pyramid",
"Framework :: Pytest",
"Framework :: TurboGears",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
])
| bsd-3-clause | 5f75ac7c179e44d6625af614eac36184 | 32.885246 | 77 | 0.552008 | 4.283938 | false | false | false | false |
ets-labs/python-dependency-injector | tests/unit/containers/test_traversal_py3.py | 3 | 1606 | """Container traversing tests."""
from dependency_injector import containers, providers
class Container(containers.DeclarativeContainer):
obj_factory = providers.DelegatedFactory(
dict,
foo=providers.Resource(
dict,
foo="bar"
),
bar=providers.Resource(
dict,
foo="bar"
)
)
def test_nested_providers():
container = Container()
all_providers = list(container.traverse())
assert container.obj_factory in all_providers
assert container.obj_factory.kwargs["foo"] in all_providers
assert container.obj_factory.kwargs["bar"] in all_providers
assert len(all_providers) == 3
def test_nested_providers_with_filtering():
container = Container()
all_providers = list(container.traverse(types=[providers.Resource]))
assert container.obj_factory.kwargs["foo"] in all_providers
assert container.obj_factory.kwargs["bar"] in all_providers
assert len(all_providers) == 2
def test_container_cls_nested_providers():
all_providers = list(Container.traverse())
assert Container.obj_factory in all_providers
assert Container.obj_factory.kwargs["foo"] in all_providers
assert Container.obj_factory.kwargs["bar"] in all_providers
assert len(all_providers) == 3
def test_container_cls_nested_providers_with_filtering():
all_providers = list(Container.traverse(types=[providers.Resource]))
assert Container.obj_factory.kwargs["foo"] in all_providers
assert Container.obj_factory.kwargs["bar"] in all_providers
assert len(all_providers) == 2
| bsd-3-clause | 0b1b217fded7b9906ecac22ae51471d2 | 29.301887 | 72 | 0.693649 | 4.128535 | false | true | false | false |
pbrod/numdifftools | src/numdifftools/tests/test_fornberg.py | 1 | 4538 | from __future__ import absolute_import, print_function
import numpy as np
from numpy.testing import assert_allclose # @UnresolvedImport
from hypothesis import given, note, settings, strategies as st #, reproduce_failure
from numdifftools.example_functions import function_names, get_function
from numdifftools.fornberg import (fd_weights, fd_weights_all, derivative,
fd_derivative,
CENTRAL_WEIGHTS_AND_POINTS)
# @reproduce_failure('5.36.1', b'AAJRcYUpZHQ=')
# @reproduce_failure('4.32.2', b'AAJRcYUpZHQ=')
@settings(deadline=800.0)
@given(st.floats(min_value=1e-1, max_value=0.98))
def test_high_order_derivative(x):
# small_radius = ['sqrt', 'log', 'log2', 'log10', 'arccos', 'log1p',
# 'arcsin', 'arctan', 'arcsinh', 'tan', 'tanh',
# 'arctanh', 'arccosh']
r = 0.0059
n_max = 20
y = x
for name in function_names + ['arccosh', 'arctanh']:
f, true_df = get_function(name, n=1)
if name == 'arccosh':
y = y + 1
vals, info = derivative(f, y, r=r, n=n_max, full_output=True, step_ratio=1.6)
for n in range(1, n_max):
f, true_df = get_function(name, n=n)
if true_df is None:
continue
tval = true_df(y)
aerr0 = info.error_estimate[n] + 1e-15
aerr = min(aerr0, max(np.abs(tval) * 1e-6, 1e-8))
try:
assert_allclose(np.real(vals[n]), tval, rtol=1e-6, atol=aerr)
except AssertionError as error:
print(n, name, y, vals[n], tval, info.iterations, aerr0, aerr)
note("{}, {}, {}, {}, {}, {}, {}, {}".format(
n, name, y, vals[n], tval, info.iterations, aerr0, aerr))
raise error
def test_all_weights():
w = fd_weights_all(range(-2, 3), n=4)
print(w)
true_w = [[0., 0., 1., 0., 0.],
[0.0833333333333, -0.6666666666667, 0., 0.6666666666667,
-0.0833333333333],
[-0.0833333333333, 1.333333333333, -2.5, 1.333333333333,
-0.083333333333],
[-0.5, 1., 0., -1., 0.5],
[1., -4., 6., -4., 1.]]
assert_allclose(w, true_w, atol=1e-12)
def test_weights():
for name in CENTRAL_WEIGHTS_AND_POINTS:
# print(name)
n, m = name
w, x = CENTRAL_WEIGHTS_AND_POINTS[name]
assert len(w) == m
weights = fd_weights(np.array(x, dtype=float), 0.0, n=n)
assert_allclose(weights, w, atol=1e-15)
def test_fd_derivative():
x = np.linspace(-1, 1, 25)
fx = np.exp(x)
for n in range(1, 7):
df = fd_derivative(fx, x, n=n)
m = n // 2 + 2
assert_allclose(df[m:-m], fx[m:-m], atol=1e-5)
assert_allclose(df[-m:], fx[-m:], atol=1e-4)
assert_allclose(df[:m], fx[:m], atol=1e-4)
class ExampleFunctions(object):
@staticmethod
def fun0(z):
return np.exp(z)
@staticmethod
def fun1(z):
return np.exp(z) / (np.sin(z) ** 3 + np.cos(z) ** 3)
@staticmethod
def fun2(z):
return np.exp(1.0j * z)
@staticmethod
def fun3(z):
return z ** 6
@staticmethod
def fun4(z):
return z * (0.5 + 1. / np.expm1(z))
@staticmethod
def fun5(z):
return np.tan(z)
@staticmethod
def fun6(z):
return 1.0j + z + 1.0j * z ** 2
@staticmethod
def fun7(z):
return 1.0 / (1.0 - z)
@staticmethod
def fun8(z):
return (1 + z) ** 10 * np.log1p(z)
@staticmethod
def fun9(z):
return 10 * 5 + 1. / (1 - z)
@staticmethod
def fun10(z):
return 1. / (1 - z)
@staticmethod
def fun11(z):
return np.sqrt(z)
@staticmethod
def fun12(z):
return np.arcsinh(z)
@staticmethod
def fun13(z):
return np.cos(z)
@staticmethod
def fun14(z):
return np.log1p(z)
def test_low_order_derivative_on_example_functions():
for j in range(15):
fun = getattr(ExampleFunctions, 'fun{}'.format(j))
der, info = derivative(fun, z0=0., r=0.06, n=10, max_iter=30,
full_output=True, step_ratio=1.6)
print(info)
print('answer:')
msg = '{0:3d}: {1:24.18f} + {2:24.18f}j ({3:g})'
print(info.function_count)
for i, der_i in enumerate(der):
err = info.error_estimate[i]
print(msg.format(i, der_i.real, der_i.imag, err))
| bsd-3-clause | e6f9f1e511715c2a71a4f692b9087954 | 28.089744 | 85 | 0.524901 | 3.043595 | false | false | false | false |
pbrod/numdifftools | src/numdifftools/limits.py | 1 | 18529 | """
Created on 27. aug. 2015
@author: pab
Author: John D'Errico
e-mail: woodchips@rochester.rr.com
Release: 1.0
Release date: 5/23/2008
"""
from __future__ import absolute_import, division, print_function
from collections import namedtuple
from functools import partial
import warnings
import numpy as np
from numdifftools.step_generators import MinStepGenerator
from numdifftools.extrapolation import Richardson, dea3
def _assert(cond, msg):
if not cond:
raise ValueError(msg)
class CStepGenerator(MinStepGenerator):
"""
Generates a sequence of steps
where
steps = base_step * step_nom * (exp(1j*dtheta) * step_ratio) ** (i + offset)
for i = 0, 1, ..., num_steps-1
Parameters
----------
base_step : float, array-like, default None
Defines the minimum step, if None, the value is set to EPS**(1/scale)
step_ratio : real scalar, optional, default 4.0
Ratio between sequential steps generated.
num_steps : scalar integer, optional,
defines number of steps generated.
If None the value is 2 * int(round(16.0/log(abs(step_ratio)))) + 1
step_nom : default maximum(log(exp(1)+|x|), 1)
Nominal step where x is supplied at runtime through the __call__ method.
offset : real scalar, optional, default 0
offset to the base step
use_exact_steps : boolean, default True.
If true make sure exact steps are generated.
scale : real scalar, default 1.2
scale used in base step.
path : 'radial' or 'spiral'
Specifies the type of path to take the limit along. Default 'radial'.
dtheta: real scalar, default pi/8
If the path is 'spiral' it will follow an exponential spiral into the
limit, with angular steps at dtheta radians.
"""
def __init__(self, base_step=None, step_ratio=4.0, num_steps=None, step_nom=None,
offset=0, scale=1.2, **options):
self.path = options.pop('path', 'radial')
self.dtheta = options.pop('dtheta', np.pi / 8)
super(CStepGenerator,
self).__init__(base_step=base_step, step_ratio=step_ratio,
num_steps=num_steps, step_nom=step_nom, offset=offset, scale=scale,
**options)
self._check_path()
def _check_path(self):
_assert(self.path in ['spiral', 'radial'], 'Invalid Path: {}'.format(str(self.path)))
@property
def step_ratio(self):
"""Ratio between sequential steps generated."""
dtheta = self.dtheta
_step_ratio = float(self._step_ratio) # radial path
if dtheta != 0:
_step_ratio = np.exp(1j * dtheta) * _step_ratio # a spiral path
return _step_ratio
@step_ratio.setter
def step_ratio(self, step_ratio):
self._step_ratio = step_ratio
@property
def dtheta(self):
"""Angular steps in radians used for the exponential spiral path."""
radial_path = self.path[0].lower() == 'r'
return 0 if radial_path else self._dtheta
@dtheta.setter
def dtheta(self, dtheta):
self._dtheta = dtheta
@property
def num_steps(self):
"""The number of steps generated"""
if self._num_steps is None:
return 2 * int(np.round(16.0 / np.log(np.abs(self.step_ratio)))) + 1
return self._num_steps
@num_steps.setter
def num_steps(self, num_steps):
self._num_steps = num_steps
class _Limit(object):
"""Common methods and member variables"""
info = namedtuple('info', ['error_estimate', 'final_step', 'index'])
def __init__(self, step=None, **options):
self.step = step, options
self.richardson = Richardson(step_ratio=1.6, step=1, order=1, num_terms=2)
@staticmethod
def _parse_step_options(step):
options = {}
if isinstance(step, tuple) and isinstance(step[-1], dict):
step, options = step
return step, options
@staticmethod
def _step_generator(step, options):
if hasattr(step, '__call__'):
return step
step_nom = None if step is None else 1
return CStepGenerator(base_step=step, step_nom=step_nom, **options)
@property
def step(self):
"""The step spacing(s) used in the approximation"""
return self._step
@step.setter
def step(self, step_options):
step, options = self._parse_step_options(step_options)
self._step = self._step_generator(step, options)
@staticmethod
def _get_arg_min(errors):
shape = errors.shape
try:
arg_mins = np.nanargmin(errors, axis=0)
min_errors = np.nanmin(errors, axis=0)
except ValueError as msg:
warnings.warn(str(msg))
return np.arange(shape[1])
for i, min_error in enumerate(min_errors):
idx = np.flatnonzero(errors[:, i] == min_error)
arg_mins[i] = idx[idx.size // 2]
return np.ravel_multi_index((arg_mins, np.arange(shape[1])), shape)
@staticmethod
def _add_error_to_outliers(der, trim_fact=10):
"""
discard any estimate that differs wildly from the
median of all estimates. A factor of 10 to 1 in either
direction is probably wild enough here. The actual
trimming factor is defined as a parameter.
"""
try:
if np.any(np.isnan(der)):
p25, median, p75 = np.nanpercentile(der, [25,50, 75], axis=0)
else:
p25, median, p75 = np.percentile(der, [25,50, 75], axis=0)
iqr = np.abs(p75 - p25)
except ValueError as msg:
warnings.warn(str(msg))
return 0 * der
a_median = np.abs(median)
outliers = (((abs(der) < (a_median / trim_fact)) +
(abs(der) > (a_median * trim_fact))) * (a_median > 1e-8) +
((der < p25 - 1.5 * iqr) + (p75 + 1.5 * iqr < der)))
errors = outliers * np.abs(der - median)
return errors
@staticmethod
def _get_best_estimate(der, errors, steps, shape):
errors += _Limit._add_error_to_outliers(der)
idx = _Limit._get_arg_min(errors)
final_step = steps.flat[idx].reshape(shape)
err = errors.flat[idx].reshape(shape)
return der.flat[idx].reshape(shape), _Limit.info(err, final_step, idx)
@staticmethod
def _wynn_extrapolate(der, steps):
der, errors = dea3(der[0:-2], der[1:-1], der[2:], symmetric=False)
return der, errors, steps[2:]
def _extrapolate(self, results, steps, shape):
# if len(results) > 2:
# der0, errors0, steps0 = self._wynn_extrapolate(results, steps)
# if len(der0) > 0:
# der2, errors2, steps2 = self._wynn_extrapolate(der0, steps0)
# else:
der1, errors1, steps = self.richardson(results, steps)
if len(der1) > 2:
der1, errors1, steps = self._wynn_extrapolate(der1, steps)
der, info = self._get_best_estimate(der1, errors1, steps, shape)
return der, info
@staticmethod
def _vstack(sequence, steps):
original_shape = np.shape(sequence[0])
f_del = np.vstack([np.ravel(r) for r in sequence])
one = np.ones(original_shape)
h = np.vstack([np.ravel(one * step) for step in steps])
_assert(f_del.size == h.size, 'fun did not return data of correct '
'size (it must be vectorized)')
return f_del, h, original_shape
class Limit(_Limit):
"""
Compute limit of a function at a given point
Parameters
----------
fun : callable
function fun(z, `*args`, `**kwds`) to compute the limit for z->z0.
The function, fun, is assumed to return a result of the same shape and
size as its input, `z`.
step: float, complex, array-like or StepGenerator object, optional
Defines the spacing used in the approximation.
Default is CStepGenerator(base_step=step, **options)
method : {'above', 'below'}
defines if the limit is taken from `above` or `below`
order: positive scalar integer, optional.
defines the order of approximation used to find the specified limit.
The order must be member of [1 2 3 4 5 6 7 8]. 4 is a good compromise.
full_output: bool
If true return additional info.
options:
options to pass on to CStepGenerator
Returns
-------
limit_fz: array like
estimated limit of f(z) as z --> z0
info:
Only given if full_output is True and contains the following:
error estimate: ndarray
95 % uncertainty estimate around the limit, such that
abs(limit_fz - lim z->z0 f(z)) < error_estimate
final_step: ndarray
final step used in approximation
Notes
-----
`Limit` computes the limit of a given function at a specified
point, z0. When the function is evaluable at the point in question,
this is a simple task. But when the function cannot be evaluated
at that location due to a singularity, you may need a tool to
compute the limit. `Limit` does this, as well as produce an
uncertainty estimate in the final result.
The methods used by `Limit` are Richardson extrapolation in a combination
with Wynn's epsilon algorithm which also yield an error estimate.
The user can specify the method order, as well as the path into
z0. z0 may be real or complex. `Limit` uses a proportionally cascaded
series of function evaluations, moving away from your point of evaluation
along a path along the real line (or in the complex plane for complex z0 or
step.) The `step_ratio` is the ratio used between sequential steps. The
sign of step allows you to specify a limit from above or below. Negative
values of step will cause the limit to be taken approaching z0 from below.
A smaller `step_ratio` means that `Limit` will take more function
evaluations to evaluate the limit, but the result will potentially be less
accurate. The `step_ratio` MUST be a scalar larger than 1. A value in the
range [2,100] is recommended. 4 seems a good compromise.
Examples
--------
Compute the limit of sin(x)./x, at x == 0. The limit is 1.
>>> import numpy as np
>>> from numdifftools.limits import Limit
>>> def f(x): return np.sin(x)/x
>>> lim_f0, err = Limit(f, full_output=True)(0)
>>> np.allclose(lim_f0, 1)
True
>>> np.allclose(err.error_estimate, 1.77249444610966e-15)
True
Compute the derivative of cos(x) at x == pi/2. It should
be -1. The limit will be taken as a function of the
differential parameter, dx.
>>> x0 = np.pi/2;
>>> def g(x): return (np.cos(x0+x)-np.cos(x0))/x
>>> lim_g0, err = Limit(g, full_output=True)(0)
>>> np.allclose(lim_g0, -1)
True
>>> err.error_estimate < 1e-14
True
Compute the residue at a first order pole at z = 0
The function 1./(1-exp(2*z)) has a pole at z == 0.
The residue is given by the limit of z*fun(z) as z --> 0.
Here, that residue should be -0.5.
>>> def h(z): return -z/(np.expm1(2*z))
>>> lim_h0, err = Limit(h, full_output=True)(0)
>>> np.allclose(lim_h0, -0.5)
True
>>> err.error_estimate < 1e-14
True
Compute the residue of function 1./sin(z)**2 at z = 0.
This pole is of second order thus the residue is given by the limit of
z**2*fun(z) as z --> 0.
>>> def g(z): return z**2/(np.sin(z)**2)
>>> lim_gpi, err = Limit(g, full_output=True)(0)
>>> np.allclose(lim_gpi, 1)
True
>>> err.error_estimate < 1e-14
True
A more difficult limit is one where there is significant
subtractive cancellation at the limit point. In the following
example, the cancellation is second order. The true limit
should be 0.5.
>>> def k(x): return (x*np.exp(x)-np.expm1(x))/x**2
>>> lim_k0,err = Limit(k, full_output=True)(0)
>>> np.allclose(lim_k0, 0.5)
True
>>> err.error_estimate < 1.0e-8
True
>>> def h(x): return (x-np.sin(x))/x**3
>>> lim_h0, err = Limit(h, full_output=True)(0)
>>> np.allclose(lim_h0, 1./6)
True
>>> err.error_estimate < 1e-8
True
"""
def __init__(self, fun, step=None, method='above', order=4, full_output=False, **options):
super(Limit, self).__init__(step=step, **options)
self.fun = fun
self.method = method
self.order = order
self.full_output = full_output
def _fun(self, z, d_z, args, kwds):
return self.fun(z + d_z, *args, **kwds)
def _get_steps(self, x_i):
return list(self.step(x_i)) # pylint: disable=not-callable
def _set_richardson_rule(self, step_ratio, num_terms=2):
self.richardson = Richardson(step_ratio=step_ratio, step=1, order=1,
num_terms=num_terms)
def _lim(self, f, z):
sign = dict(forward=1, above=1, backward=-1, below=-1)[self.method]
steps = [sign * step for step in self.step(z)] # pylint: disable=not-callable
# pylint: disable=no-member
self._set_richardson_rule(self.step.step_ratio, self.order + 1)
sequence = [f(z, h) for h in steps]
results = self._vstack(sequence, steps)
lim_fz, info = self._extrapolate(*results)
return lim_fz, info
def limit(self, x, *args, **kwds):
"""Return lim f(z) as z-> x"""
z = np.asarray(x)
f = partial(self._fun, args=args, kwds=kwds)
f_z, info = self._lim(f, z)
if self.full_output:
return f_z, info
return f_z
def _call_lim(self, f_z, z, f):
err = np.zeros_like(f_z, dtype=float)
final_step = np.zeros_like(f_z)
index = np.zeros_like(f_z, dtype=int)
k = np.flatnonzero(np.isnan(f_z))
if k.size > 0:
lim_fz, info1 = self._lim(f, z.flat[k])
zero = np.zeros(1, dtype=np.result_type(lim_fz))
f_z = np.where(np.isnan(f_z), zero, f_z)
np.put(f_z, k, lim_fz)
if self.full_output:
final_step = np.where(np.isnan(f_z), zero, final_step)
np.put(final_step, k, info1.final_step)
np.put(index, k, info1.index)
np.put(err, k, info1.error_estimate)
return f_z, self.info(err, final_step, index)
def __call__(self, x, *args, **kwds):
z = np.asarray(x)
f = partial(self._fun, args=args, kwds=kwds)
with np.errstate(divide='ignore', invalid='ignore'):
f_z = f(z, 0)
f_z, info = self._call_lim(f_z, z, f)
if self.full_output:
return f_z, info
return f_z
class Residue(Limit):
"""
Compute residue of a function at a given point
Parameters
----------
fun : callable
function fun(z, `*args`, `**kwds`) to compute the Residue at z=z0.
The function, fun, is assumed to return a result of the same shape and
size as its input, `z`.
step: float, complex, array-like or StepGenerator object, optional
Defines the spacing used in the approximation.
Default is CStepGenerator(base_step=step, **options)
method : {'above', 'below'}
defines if the limit is taken from `above` or `below`
order: positive scalar integer, optional.
defines the order of approximation used to find the specified limit.
The order must be member of [1 2 3 4 5 6 7 8]. 4 is a good compromise.
pole_order : scalar integer
specifies the order of the pole at z0.
full_output: bool
If true return additional info.
options:
options to pass on to CStepGenerator
Returns
-------
res_fz: array like
estimated residue, i.e., limit of f(z)*(z-z0)**pole_order as z --> z0
When the residue is estimated as approximately zero,
the wrong order pole may have been specified.
info: namedtuple,
Only given if full_output is True and contains the following:
error estimate: ndarray
95 % uncertainty estimate around the residue, such that
abs(res_fz - lim z->z0 f(z)*(z-z0)**pole_order) < error_estimate
Large uncertainties here suggest that the wrong order
pole was specified for f(z0).
final_step: ndarray
final step used in approximation
Notes
-----
Residue computes the residue of a given function at a simple first order
pole, or at a second order pole.
The methods used by residue are polynomial extrapolants, which also yield
an error estimate. The user can specify the method order, as well as the
order of the pole.
z0 - scalar point at which to compute the residue. z0 may be
real or complex.
See the document DERIVEST.pdf for more explanation of the
algorithms behind the parameters of Residue. In most cases,
the user should never need to specify anything other than possibly
the PoleOrder.
Examples
--------
A first order pole at z = 0
>>> import numpy as np
>>> from numdifftools.limits import Residue
>>> def f(z): return -1./(np.expm1(2*z))
>>> res_f, info = Residue(f, full_output=True)(0)
>>> np.allclose(res_f, -0.5)
True
>>> info.error_estimate < 1e-14
True
A second order pole around z = 0 and z = pi
>>> def h(z): return 1.0/np.sin(z)**2
>>> res_h, info = Residue(h, full_output=True, pole_order=2)([0, np.pi])
>>> np.allclose(res_h, 1)
True
>>> (info.error_estimate < 1e-10).all()
True
"""
def __init__(self, f, step=None, method='above', order=None, pole_order=1,
full_output=False, **options):
if order is None:
# MethodOrder will always = pole_order + 2
order = pole_order + 2
_assert(pole_order < order, 'order must be at least pole_order+1.')
self.pole_order = pole_order
super(Residue, self).__init__(f, step=step, method=method, order=order,
full_output=full_output, **options)
def _fun(self, z, d_z, args, kwds):
return self.fun(z + d_z, *args, **kwds) * (d_z ** self.pole_order)
def __call__(self, x, *args, **kwds):
return self.limit(x, *args, **kwds)
if __name__ == '__main__':
from numdifftools.testing import test_docstrings
test_docstrings(__file__)
| bsd-3-clause | 572b97d43cb38b9b7d1b0467a4cda417 | 34.632692 | 96 | 0.601166 | 3.507952 | false | false | false | false |
ets-labs/python-dependency-injector | examples/providers/provided_instance_complex.py | 3 | 1080 | """Complex example of the injecting of provided instance attributes and items."""
from dependency_injector import containers, providers
class Service:
def __init__(self, value):
self.value = value
def get_value(self):
return self.value
class Container(containers.DeclarativeContainer):
service = providers.Singleton(Service, value=42)
dependency = providers.Object(
{
"foo": {
"bar": 10,
"baz": lambda arg: {"arg": arg}
},
},
)
demo_list = providers.List(
dependency.provided["foo"]["bar"],
dependency.provided["foo"]["baz"].call(22)["arg"],
dependency.provided["foo"]["baz"].call(service)["arg"],
dependency.provided["foo"]["baz"].call(service)["arg"].value,
dependency.provided["foo"]["baz"].call(service)["arg"].get_value.call(),
)
if __name__ == "__main__":
container = Container()
assert container.demo_list() == [
10,
22,
container.service(),
42,
42,
]
| bsd-3-clause | c3dee473225e6745db54eeefa9fa6d5f | 22.478261 | 81 | 0.556481 | 4.268775 | false | false | false | false |
ets-labs/python-dependency-injector | examples/providers/dict_non_string_keys.py | 3 | 1243 | """`Dict` provider with non-string keys example."""
import dataclasses
from typing import Dict
from dependency_injector import containers, providers
class Command:
...
class CommandA(Command):
...
class CommandB(Command):
...
class Handler:
...
class HandlerA(Handler):
...
class HandlerB(Handler):
...
@dataclasses.dataclass
class Dispatcher:
command_handlers: Dict[Command, Handler]
class Container(containers.DeclarativeContainer):
dispatcher_factory = providers.Factory(
Dispatcher,
command_handlers=providers.Dict({
CommandA: providers.Factory(HandlerA),
CommandB: providers.Factory(HandlerB),
}),
)
if __name__ == "__main__":
container = Container()
dispatcher = container.dispatcher_factory()
assert isinstance(dispatcher.command_handlers, dict)
assert isinstance(dispatcher.command_handlers[CommandA], HandlerA)
assert isinstance(dispatcher.command_handlers[CommandB], HandlerB)
# Call "dispatcher = container.dispatcher_factory()" is equivalent to:
# dispatcher = Dispatcher(
# command_handlers={
# CommandA: HandlerA(),
# CommandB: HandlerB(),
# },
# )
| bsd-3-clause | b1730d2e2af8cc7eb83ec7cef650aecf | 18.421875 | 74 | 0.655672 | 4.227891 | false | false | false | false |
ets-labs/python-dependency-injector | tests/unit/providers/test_aggregate_py2_py3.py | 2 | 8430 | """Aggregate provider tests."""
from dependency_injector import providers, errors
from pytest import fixture, mark, raises
class Example:
def __init__(self, init_arg1=None, init_arg2=None, init_arg3=None, init_arg4=None):
self.init_arg1 = init_arg1
self.init_arg2 = init_arg2
self.init_arg3 = init_arg3
self.init_arg4 = init_arg4
self.attribute1 = None
self.attribute2 = None
class ExampleA(Example):
pass
class ExampleB(Example):
pass
@fixture
def factory_a():
return providers.Factory(ExampleA)
@fixture
def factory_b():
return providers.Factory(ExampleB)
@fixture
def aggregate_type():
return "default"
@fixture
def aggregate(aggregate_type, factory_a, factory_b):
if aggregate_type == "empty":
return providers.Aggregate()
elif aggregate_type == "non-string-keys":
return providers.Aggregate({
ExampleA: factory_a,
ExampleB: factory_b,
})
elif aggregate_type == "default":
return providers.Aggregate(
example_a=factory_a,
example_b=factory_b,
)
else:
raise ValueError("Unknown factory type \"{0}\"".format(aggregate_type))
def test_is_provider(aggregate):
assert providers.is_provider(aggregate) is True
def test_is_delegated_provider(aggregate):
assert providers.is_delegated(aggregate) is True
@mark.parametrize("aggregate_type", ["non-string-keys"])
def test_init_with_non_string_keys(aggregate, factory_a, factory_b):
object_a = aggregate(ExampleA, 1, 2, init_arg3=3, init_arg4=4)
object_b = aggregate(ExampleB, 11, 22, init_arg3=33, init_arg4=44)
assert isinstance(object_a, ExampleA)
assert object_a.init_arg1 == 1
assert object_a.init_arg2 == 2
assert object_a.init_arg3 == 3
assert object_a.init_arg4 == 4
assert isinstance(object_b, ExampleB)
assert object_b.init_arg1 == 11
assert object_b.init_arg2 == 22
assert object_b.init_arg3 == 33
assert object_b.init_arg4 == 44
assert aggregate.providers == {
ExampleA: factory_a,
ExampleB: factory_b,
}
def test_init_with_not_a_factory():
with raises(errors.Error):
providers.Aggregate(
example_a=providers.Factory(ExampleA),
example_b=object(),
)
@mark.parametrize("aggregate_type", ["empty"])
def test_init_optional_providers(aggregate, factory_a, factory_b):
aggregate.set_providers(
example_a=factory_a,
example_b=factory_b,
)
assert aggregate.providers == {
"example_a": factory_a,
"example_b": factory_b,
}
assert isinstance(aggregate("example_a"), ExampleA)
assert isinstance(aggregate("example_b"), ExampleB)
@mark.parametrize("aggregate_type", ["non-string-keys"])
def test_set_providers_with_non_string_keys(aggregate, factory_a, factory_b):
aggregate.set_providers({
ExampleA: factory_a,
ExampleB: factory_b,
})
object_a = aggregate(ExampleA, 1, 2, init_arg3=3, init_arg4=4)
object_b = aggregate(ExampleB, 11, 22, init_arg3=33, init_arg4=44)
assert isinstance(object_a, ExampleA)
assert object_a.init_arg1 == 1
assert object_a.init_arg2 == 2
assert object_a.init_arg3 == 3
assert object_a.init_arg4 == 4
assert isinstance(object_b, ExampleB)
assert object_b.init_arg1 == 11
assert object_b.init_arg2 == 22
assert object_b.init_arg3 == 33
assert object_b.init_arg4 == 44
assert aggregate.providers == {
ExampleA: factory_a,
ExampleB: factory_b,
}
def test_set_providers_returns_self(aggregate, factory_a):
assert aggregate.set_providers(example_a=factory_a) is aggregate
@mark.parametrize("aggregate_type", ["empty"])
def test_init_optional_providers(aggregate, factory_a, factory_b):
aggregate.set_providers(
example_a=factory_a,
example_b=factory_b,
)
assert aggregate.providers == {
"example_a": factory_a,
"example_b": factory_b,
}
assert isinstance(aggregate("example_a"), ExampleA)
assert isinstance(aggregate("example_b"), ExampleB)
@mark.parametrize("aggregate_type", ["non-string-keys"])
def test_set_providers_with_non_string_keys(aggregate, factory_a, factory_b):
aggregate.set_providers({
ExampleA: factory_a,
ExampleB: factory_b,
})
object_a = aggregate(ExampleA, 1, 2, init_arg3=3, init_arg4=4)
object_b = aggregate(ExampleB, 11, 22, init_arg3=33, init_arg4=44)
assert isinstance(object_a, ExampleA)
assert object_a.init_arg1 == 1
assert object_a.init_arg2 == 2
assert object_a.init_arg3 == 3
assert object_a.init_arg4 == 4
assert isinstance(object_b, ExampleB)
assert object_b.init_arg1 == 11
assert object_b.init_arg2 == 22
assert object_b.init_arg3 == 33
assert object_b.init_arg4 == 44
assert aggregate.providers == {
ExampleA: factory_a,
ExampleB: factory_b,
}
def test_set_providers_returns_self(aggregate, factory_a):
assert aggregate.set_providers(example_a=factory_a) is aggregate
def test_call(aggregate):
object_a = aggregate("example_a", 1, 2, init_arg3=3, init_arg4=4)
object_b = aggregate("example_b", 11, 22, init_arg3=33, init_arg4=44)
assert isinstance(object_a, ExampleA)
assert object_a.init_arg1 == 1
assert object_a.init_arg2 == 2
assert object_a.init_arg3 == 3
assert object_a.init_arg4 == 4
assert isinstance(object_b, ExampleB)
assert object_b.init_arg1 == 11
assert object_b.init_arg2 == 22
assert object_b.init_arg3 == 33
assert object_b.init_arg4 == 44
def test_call_factory_name_as_kwarg(aggregate):
object_a = aggregate(
factory_name="example_a",
init_arg1=1,
init_arg2=2,
init_arg3=3,
init_arg4=4,
)
assert isinstance(object_a, ExampleA)
assert object_a.init_arg1 == 1
assert object_a.init_arg2 == 2
assert object_a.init_arg3 == 3
assert object_a.init_arg4 == 4
def test_call_no_factory_name(aggregate):
with raises(TypeError):
aggregate()
def test_call_no_such_provider(aggregate):
with raises(errors.NoSuchProviderError):
aggregate("unknown")
def test_overridden(aggregate):
with raises(errors.Error):
aggregate.override(providers.Object(object()))
def test_getattr(aggregate, factory_a, factory_b):
assert aggregate.example_a is factory_a
assert aggregate.example_b is factory_b
def test_getattr_no_such_provider(aggregate):
with raises(errors.NoSuchProviderError):
aggregate.unknown
def test_providers(aggregate, factory_a, factory_b):
assert aggregate.providers == dict(
example_a=factory_a,
example_b=factory_b,
)
def test_deepcopy(aggregate):
provider_copy = providers.deepcopy(aggregate)
assert aggregate is not provider_copy
assert isinstance(provider_copy, type(aggregate))
assert aggregate.example_a is not provider_copy.example_a
assert isinstance(aggregate.example_a, type(provider_copy.example_a))
assert aggregate.example_a.cls is provider_copy.example_a.cls
assert aggregate.example_b is not provider_copy.example_b
assert isinstance(aggregate.example_b, type(provider_copy.example_b))
assert aggregate.example_b.cls is provider_copy.example_b.cls
@mark.parametrize("aggregate_type", ["non-string-keys"])
def test_deepcopy_with_non_string_keys(aggregate):
provider_copy = providers.deepcopy(aggregate)
assert aggregate is not provider_copy
assert isinstance(provider_copy, type(aggregate))
assert aggregate.providers[ExampleA] is not provider_copy.providers[ExampleA]
assert isinstance(aggregate.providers[ExampleA], type(provider_copy.providers[ExampleA]))
assert aggregate.providers[ExampleA].provides is provider_copy.providers[ExampleA].provides
assert aggregate.providers[ExampleB] is not provider_copy.providers[ExampleB]
assert isinstance(aggregate.providers[ExampleB], type(provider_copy.providers[ExampleB]))
assert aggregate.providers[ExampleB].provides is provider_copy.providers[ExampleB].provides
def test_repr(aggregate):
assert repr(aggregate) == (
"<dependency_injector.providers."
"Aggregate({0}) at {1}>".format(
repr(aggregate.providers),
hex(id(aggregate)),
)
)
| bsd-3-clause | 7acf4cd872059600907a2ec254ba2172 | 27.869863 | 95 | 0.672716 | 3.490683 | false | true | false | false |
ets-labs/python-dependency-injector | examples/miniapps/movie-lister/movies/finders.py | 3 | 1326 | """Movie finders module."""
import csv
import sqlite3
from typing import Callable, List
from .entities import Movie
class MovieFinder:
def __init__(self, movie_factory: Callable[..., Movie]) -> None:
self._movie_factory = movie_factory
def find_all(self) -> List[Movie]:
raise NotImplementedError()
class CsvMovieFinder(MovieFinder):
def __init__(
self,
movie_factory: Callable[..., Movie],
path: str,
delimiter: str,
) -> None:
self._csv_file_path = path
self._delimiter = delimiter
super().__init__(movie_factory)
def find_all(self) -> List[Movie]:
with open(self._csv_file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=self._delimiter)
return [self._movie_factory(*row) for row in csv_reader]
class SqliteMovieFinder(MovieFinder):
def __init__(
self,
movie_factory: Callable[..., Movie],
path: str,
) -> None:
self._database = sqlite3.connect(path)
super().__init__(movie_factory)
def find_all(self) -> List[Movie]:
with self._database as db:
rows = db.execute("SELECT title, year, director FROM movies")
return [self._movie_factory(*row) for row in rows]
| bsd-3-clause | 4c847e86359c9512628b5a02c88923a0 | 25.52 | 73 | 0.58371 | 4.030395 | false | false | false | false |
sphinx-gallery/sphinx-gallery | examples/plot_1_exp.py | 3 | 1109 | # -*- coding: utf-8 -*-
"""
Plotting the exponential function
=================================
This example demonstrates how to import a local module and how images are
stacked when two plots are created in one code block. The variable ``N`` from
the example 'Local module' (file ``local_module.py``) is imported in the code
below. Further, note that when there is only one code block in an example, the
output appears before the code block.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
# You can use modules local to the example being run, here we import
# N from local_module
from local_module import N # = 100
def main():
x = np.linspace(-1, 2, N)
y = np.exp(x)
plt.figure()
plt.plot(x, y)
plt.xlabel('$x$')
plt.ylabel('$\exp(x)$')
plt.title('Exponential function')
plt.figure()
plt.plot(x, -np.exp(-x))
plt.xlabel('$x$')
plt.ylabel('$-\exp(-x)$')
plt.title('Negative exponential\nfunction')
# To avoid matplotlib text output
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause | 2fd76bcd194e9c73f1e745434cd3e539 | 24.744186 | 78 | 0.64047 | 3.548077 | false | false | false | false |
rlpy/rlpy | tests/gibbs_policy.py | 1 | 1578 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import range
from rlpy.Domains import GridWorld
from rlpy.Representations import Tabular
from scipy.optimize import check_grad, approx_fprime
from rlpy.Policies.gibbs import GibbsPolicy
import numpy as np
def test_fdcheck_dlogpi():
"""Finite differences check for the dlogpi of the gibbs policy"""
logger = Logger()
domain = GridWorld()
representation = Tabular(logger=logger, domain=domain, discretization=20)
policy = GibbsPolicy(representation=representation, logger=logger)
def f(theta, s, a):
policy.representation.theta = theta
return np.log(policy.prob(s, a))
def df(theta, s, a):
policy.representation.theta = theta
return policy.dlogpi(s, a)
def df_approx(theta, s, a):
return approx_fprime(theta, f, 1e-10, s, a)
thetas = np.random.rand(10, len(representation.theta))
for i in range(10):
s = np.array([np.random.randint(4), np.random.randint(5)])
a = np.random.choice(domain.possibleActions(s))
for theta in thetas:
# print "s", s
# print "a", a
# print "f", f(theta, s, a)
# print "df", df(theta, s, a)
# print "df_approx", df_approx(theta, s, a)
error = check_grad(f, df, theta, s, a)
print(error)
assert np.abs(error) < 1e-6
| bsd-3-clause | 7b3bfb55526350ea5b11a45d3f134497 | 34.066667 | 77 | 0.647655 | 3.522321 | false | false | false | false |
rlpy/rlpy | examples/blocksworld/ggq-indep.py | 1 | 1623 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from rlpy.Domains import BlocksWorld
from rlpy.Agents import Greedy_GQ
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
param_space = {'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
lambda_=0.,
boyan_N0=10.09,
initial_learn_rate=.47):
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
opt["max_steps"] = 100000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 5
sparsify = 1
ifddeps = 1e-7
domain = BlocksWorld(blocks=6, noise=0.3)
opt["domain"] = domain
representation = IndependentDiscretization(domain)
policy = eGreedy(representation, epsilon=0.1)
opt["agent"] = Greedy_GQ(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run()
# experiment.plot()
# experiment.save()
| bsd-3-clause | 8e8a575513b4d1ab5bfcbedf66f74cbc | 32.8125 | 98 | 0.672212 | 3.305499 | false | false | false | false |
rlpy/rlpy | examples/cartpole_orig/kifdd_gauss.py | 1 | 3023 | """
Cart-pole balancing with continuous / Kernelized iFDD
"""
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from past.utils import old_div
from rlpy.Domains.FiniteTrackCartPole import FiniteCartPoleBalanceOriginal, FiniteCartPoleBalanceModern
from rlpy.Agents import SARSA, Q_LEARNING
from rlpy.Representations import *
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import numpy as np
from hyperopt import hp
from rlpy.Representations import KernelizediFDD
param_space = {
'kernel_resolution':
hp.loguniform("kernel_resolution", np.log(5), np.log(50)),
'discover_threshold':
hp.loguniform(
"discover_threshold",
np.log(1e-1),
np.log(1e3)),
'lambda_': hp.uniform("lambda_", 0., 1.),
'boyan_N0': hp.loguniform("boyan_N0", np.log(1e1), np.log(1e5)),
'initial_learn_rate': hp.loguniform("initial_learn_rate", np.log(5e-2), np.log(1))}
def make_experiment(
exp_id=1, path="./Results/Temp/{domain}/{agent}/{representation}/",
discover_threshold=.21,
boyan_N0=37.,
lambda_=.9,
initial_learn_rate=.07,
kernel_resolution=13.14):
opt = {}
opt["path"] = path
opt["exp_id"] = exp_id
opt["max_steps"] = 30000
opt["num_policy_checks"] = 20
opt["checks_per_policy"] = 10
active_threshold = 0.01
max_base_feat_sim = 0.5
sparsify = 1
domain = FiniteCartPoleBalanceOriginal(good_reward=0.)
opt["domain"] = domain
# domain = FiniteCartPoleBalanceModern()
kernel_width = old_div((domain.statespace_limits[:, 1] - domain.statespace_limits[:, 0]), kernel_resolution)
representation = KernelizediFDD(domain, sparsify=sparsify,
kernel=gaussian_kernel,
kernel_args=[kernel_width],
active_threshold=active_threshold,
discover_threshold=discover_threshold,
normalization=True,
max_active_base_feat=10,
max_base_feat_sim=max_base_feat_sim)
policy = eGreedy(representation, epsilon=0.1)
# agent = SARSA(representation,policy,domain,initial_learn_rate=initial_learn_rate,
# lambda_=.0, learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
opt["agent"] = Q_LEARNING(
policy, representation, discount_factor=domain.discount_factor,
lambda_=lambda_, initial_learn_rate=initial_learn_rate,
learn_rate_decay_mode="boyan", boyan_N0=boyan_N0)
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
from rlpy.Tools.run import run_profiled
# run_profiled(make_experiment)
experiment = make_experiment(1)
experiment.run(visualize_learning=True)
experiment.plot()
# experiment.save()
| bsd-3-clause | 20043d281bcf920b4bddd8a235cab80e | 37.75641 | 112 | 0.643401 | 3.482719 | false | false | false | false |
rlpy/rlpy | examples/gridworld/trajectory_based_value_iteration.py | 1 | 1513 | #!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
__author__ = "William Dabney"
from rlpy.Domains import GridWorld
from rlpy.MDPSolvers import TrajectoryBasedValueIteration
from rlpy.Representations import Tabular
from rlpy.Policies import GibbsPolicy
from rlpy.Experiments import MDPSolverExperiment
import os
def make_experiment(exp_id=1, path="./Results/Temp", show=False):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
# Domain:
# MAZE = '/Domains/GridWorldMaps/1x3.txt'
maze = os.path.join(GridWorld.default_map_dir, '4x5.txt')
domain = GridWorld(maze, noise=0.3)
# Representation
representation = Tabular(domain, discretization=20)
# Agent
agent = TrajectoryBasedValueIteration(
exp_id,
representation,
domain,
project_path=path,
show=show)
return MDPSolverExperiment(agent, domain)
if __name__ == '__main__':
path = "./Results/Temp/{domain}/{agent}/{representation}/"
experiment = make_experiment(1, path=path)
experiment.run()
| bsd-3-clause | a79c95c7fa761e9bc49be8688def6a3a | 29.26 | 72 | 0.709187 | 3.879487 | false | false | false | false |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/signature.py | 1 | 32076 | """
This module is an implementation of `section 3.4`_ of RFC 5849.
**Usage**
Steps for signing a request:
1. Collect parameters from the request using ``collect_parameters``.
2. Normalize those parameters using ``normalize_parameters``.
3. Create the *base string URI* using ``base_string_uri``.
4. Create the *signature base string* from the above three components
using ``signature_base_string``.
5. Pass the *signature base string* and the client credentials to one of the
sign-with-client functions. The HMAC-based signing functions needs
client credentials with secrets. The RSA-based signing functions needs
client credentials with an RSA private key.
To verify a request, pass the request and credentials to one of the verify
functions. The HMAC-based signing functions needs the shared secrets. The
RSA-based verify functions needs the RSA public key.
**Scope**
All of the functions in this module should be considered internal to OAuthLib,
since they are not imported into the "oauthlib.oauth1" module. Programs using
OAuthLib should not use directly invoke any of the functions in this module.
**Deprecated functions**
The "sign_" methods that are not "_with_client" have been deprecated. They may
be removed in a future release. Since they are all internal functions, this
should have no impact on properly behaving programs.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
import binascii
import hashlib
import hmac
import ipaddress
import logging
import urllib.parse as urlparse
import warnings
from oauthlib.common import extract_params, safe_string_equals, urldecode
from . import utils
log = logging.getLogger(__name__)
# ==== Common functions ==========================================
def signature_base_string(
http_method: str,
base_str_uri: str,
normalized_encoded_request_parameters: str) -> str:
"""
Construct the signature base string.
The *signature base string* is the value that is calculated and signed by
the client. It is also independently calculated by the server to verify
the signature, and therefore must produce the exact same value at both
ends or the signature won't verify.
The rules for calculating the *signature base string* are defined in
section 3.4.1.1`_ of RFC 5849.
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string += utils.escape(base_str_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Sec 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def base_string_uri(uri: str, host: str = None) -> str:
"""
Calculates the _base string URI_.
The *base string URI* is one of the components that make up the
*signature base string*.
The ``host`` is optional. If provided, it is used to override any host and
port values in the ``uri``. The value for ``host`` is usually extracted from
the "Host" request header from the HTTP request. Its value may be just the
hostname, or the hostname followed by a colon and a TCP/IP port number
(hostname:port). If a value for the``host`` is provided but it does not
contain a port number, the default port number is used (i.e. if the ``uri``
contained a port number, it will be discarded).
The rules for calculating the *base string URI* are defined in
section 3.4.1.2`_ of RFC 5849.
.. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
:param uri: URI
:param host: hostname with optional port number, separated by a colon
:return: base string URI
"""
if not isinstance(uri, str):
raise ValueError('uri must be a string.')
# FIXME: urlparse does not support unicode
output = urlparse.urlparse(uri)
scheme = output.scheme
hostname = output.hostname
port = output.port
path = output.path
params = output.params
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the query or fragment) as follows:
#
# .. _`RFC3986`: https://tools.ietf.org/html/rfc3986
if not scheme:
raise ValueError('missing scheme')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
# Note: if ``host`` is used, it will be converted to lowercase below
if hostname is not None:
hostname = hostname.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
# NOTE: override value in uri with provided host
# Host argument is equal to netloc. It means it's missing scheme.
# Add it back, before parsing.
host = host.lower()
host = f"{scheme}://{host}"
output = urlparse.urlparse(host)
hostname = output.hostname
port = output.port
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: https://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: https://tools.ietf.org/html/rfc2818
if hostname is None:
raise ValueError('missing host')
# NOTE: Try guessing if we're dealing with IP or hostname
try:
hostname = ipaddress.ip_address(hostname)
except ValueError:
pass
if isinstance(hostname, ipaddress.IPv6Address):
hostname = f"[{hostname}]"
elif isinstance(hostname, ipaddress.IPv4Address):
hostname = f"{hostname}"
if port is not None and not (0 < port <= 65535):
raise ValueError('port out of range') # 16-bit unsigned ints
if (scheme, port) in (('http', 80), ('https', 443)):
netloc = hostname # default port for scheme: exclude port num
elif port:
netloc = f"{hostname}:{port}" # use hostname:port
else:
netloc = hostname
v = urlparse.urlunparse((scheme, netloc, path, params, '', ''))
# RFC 5849 does not specify which characters are encoded in the
# "base string URI", nor how they are encoded - which is very bad, since
# the signatures won't match if there are any differences. Fortunately,
# most URIs only use characters that are clearly not encoded (e.g. digits
# and A-Z, a-z), so have avoided any differences between implementations.
#
# The example from its section 3.4.1.2 illustrates that spaces in
# the path are percent encoded. But it provides no guidance as to what other
# characters (if any) must be encoded (nor how); nor if characters in the
# other components are to be encoded or not.
#
# This implementation **assumes** that **only** the space is percent-encoded
# and it is done to the entire value (not just to spaces in the path).
#
# This code may need to be changed if it is discovered that other characters
# are expected to be encoded.
#
# Note: the "base string URI" returned by this function will be encoded
# again before being concatenated into the "signature base string". So any
# spaces in the URI will actually appear in the "signature base string"
# as "%2520" (the "%20" further encoded according to section 3.6).
return v.replace(' ', '%20')
def collect_parameters(uri_query='', body=None, headers=None,
exclude_oauth_signature=True, with_realm=False):
"""
Gather the request parameters from all the parameter sources.
This function is used to extract all the parameters, which are then passed
to ``normalize_parameters`` to produce one of the components that make up
the *signature base string*.
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
form encoded query string.
Headers must be supplied as a dict.
The rules where the parameters must be sourced from are defined in
`section 3.4.1.3.1`_ of RFC 5849.
.. _`Sec 3.4.1.3.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
if body is None:
body = []
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The query component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The query component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by W3C.REC-html40-19980424
# `W3C-HTML-4.0`_, Section 17.13.4.
#
# .. _`RFC3986, Sec 3.4`: https://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C-HTML-4.0`: https://www.w3.org/TR/1998/REC-html40-19980424/
if uri_query:
params.extend(urldecode(uri_query))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: https://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = {k.lower(): v for k, v in headers.items()}
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if with_realm or i[0] != 'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# W3C.REC-html40-19980424 `W3C-HTML-4.0`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .. _`W3C-HTML-4.0`: https://www.w3.org/TR/1998/REC-html40-19980424/
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith('oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
unescaped_params))
return unescaped_params
def normalize_parameters(params) -> str:
"""
Calculate the normalized request parameters.
The *normalized request parameters* is one of the components that make up
the *signature base string*.
The rules for parameter normalization are defined in `section 3.4.1.3.2`_ of
RFC 5849.
.. _`Sec 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = ['{}={}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return '&'.join(parameter_parts)
# ==== Common functions for HMAC-based signature methods =========
def _sign_hmac(hash_algorithm_name: str,
sig_base_str: str,
client_secret: str,
resource_owner_secret: str):
"""
**HMAC-SHA256**
The "HMAC-SHA256" signature method uses the HMAC-SHA256 signature
algorithm as defined in `RFC4634`_::
digest = HMAC-SHA256 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC4634`: https://tools.ietf.org/html/rfc4634
.. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA256 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = sig_base_str
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# Get the hashing algorithm to use
m = {
'SHA-1': hashlib.sha1,
'SHA-256': hashlib.sha256,
'SHA-512': hashlib.sha512,
}
hash_alg = m[hash_algorithm_name]
# Calculate the signature
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hash_alg)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Sec 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def _verify_hmac(hash_algorithm_name: str,
request,
client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(request.http_method, bs_uri,
norm_params)
signature = _sign_hmac(hash_algorithm_name, sig_base_str,
client_secret, resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC failed: signature base string: %s', sig_base_str)
return match
# ==== HMAC-SHA1 =================================================
def sign_hmac_sha1_with_client(sig_base_str, client):
return _sign_hmac('SHA-1', sig_base_str,
client.client_secret, client.resource_owner_secret)
def verify_hmac_sha1(request, client_secret=None, resource_owner_secret=None):
return _verify_hmac('SHA-1', request, client_secret, resource_owner_secret)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""
Deprecated function for calculating a HMAC-SHA1 signature.
This function has been replaced by invoking ``sign_hmac`` with "SHA-1"
as the hash algorithm name.
This function was invoked by sign_hmac_sha1_with_client and
test_signatures.py, but does any application invoke it directly? If not,
it can be removed.
"""
warnings.warn('use sign_hmac_sha1_with_client instead of sign_hmac_sha1',
DeprecationWarning)
# For some unknown reason, the original implementation assumed base_string
# could either be bytes or str. The signature base string calculating
# function always returned a str, so the new ``sign_rsa`` only expects that.
base_string = base_string.decode('ascii') \
if isinstance(base_string, bytes) else base_string
return _sign_hmac('SHA-1', base_string,
client_secret, resource_owner_secret)
# ==== HMAC-SHA256 ===============================================
def sign_hmac_sha256_with_client(sig_base_str, client):
return _sign_hmac('SHA-256', sig_base_str,
client.client_secret, client.resource_owner_secret)
def verify_hmac_sha256(request, client_secret=None, resource_owner_secret=None):
return _verify_hmac('SHA-256', request,
client_secret, resource_owner_secret)
def sign_hmac_sha256(base_string, client_secret, resource_owner_secret):
"""
Deprecated function for calculating a HMAC-SHA256 signature.
This function has been replaced by invoking ``sign_hmac`` with "SHA-256"
as the hash algorithm name.
This function was invoked by sign_hmac_sha256_with_client and
test_signatures.py, but does any application invoke it directly? If not,
it can be removed.
"""
warnings.warn(
'use sign_hmac_sha256_with_client instead of sign_hmac_sha256',
DeprecationWarning)
# For some unknown reason, the original implementation assumed base_string
# could either be bytes or str. The signature base string calculating
# function always returned a str, so the new ``sign_rsa`` only expects that.
base_string = base_string.decode('ascii') \
if isinstance(base_string, bytes) else base_string
return _sign_hmac('SHA-256', base_string,
client_secret, resource_owner_secret)
# ==== HMAC-SHA512 ===============================================
def sign_hmac_sha512_with_client(sig_base_str: str,
client):
return _sign_hmac('SHA-512', sig_base_str,
client.client_secret, client.resource_owner_secret)
def verify_hmac_sha512(request,
client_secret: str = None,
resource_owner_secret: str = None):
return _verify_hmac('SHA-512', request,
client_secret, resource_owner_secret)
# ==== Common functions for RSA-based signature methods ==========
_jwt_rsa = {} # cache of RSA-hash implementations from PyJWT jwt.algorithms
def _get_jwt_rsa_algorithm(hash_algorithm_name: str):
"""
Obtains an RSAAlgorithm object that implements RSA with the hash algorithm.
This method maintains the ``_jwt_rsa`` cache.
Returns a jwt.algorithm.RSAAlgorithm.
"""
if hash_algorithm_name in _jwt_rsa:
# Found in cache: return it
return _jwt_rsa[hash_algorithm_name]
else:
# Not in cache: instantiate a new RSAAlgorithm
# PyJWT has some nice pycrypto/cryptography abstractions
import jwt.algorithms as jwt_algorithms
m = {
'SHA-1': jwt_algorithms.hashes.SHA1,
'SHA-256': jwt_algorithms.hashes.SHA256,
'SHA-512': jwt_algorithms.hashes.SHA512,
}
v = jwt_algorithms.RSAAlgorithm(m[hash_algorithm_name])
_jwt_rsa[hash_algorithm_name] = v # populate cache
return v
def _prepare_key_plus(alg, keystr):
"""
Prepare a PEM encoded key (public or private), by invoking the `prepare_key`
method on alg with the keystr.
The keystr should be a string or bytes. If the keystr is bytes, it is
decoded as UTF-8 before being passed to prepare_key. Otherwise, it
is passed directly.
"""
if isinstance(keystr, bytes):
keystr = keystr.decode('utf-8')
return alg.prepare_key(keystr)
def _sign_rsa(hash_algorithm_name: str,
sig_base_str: str,
rsa_private_key: str):
"""
Calculate the signature for an RSA-based signature method.
The ``alg`` is used to calculate the digest over the signature base string.
For the "RSA_SHA1" signature method, the alg must be SHA-1. While OAuth 1.0a
only defines the RSA-SHA1 signature method, this function can be used for
other non-standard signature methods that only differ from RSA-SHA1 by the
digest algorithm.
Signing for the RSA-SHA1 signature method is defined in
`section 3.4.3`_ of RFC 5849.
The RSASSA-PKCS1-v1_5 signature algorithm used defined by
`RFC3447, Section 8.2`_ (also known as PKCS#1), with the `alg` as the
hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: https://tools.ietf.org/html/rfc3447#section-8.2
"""
# Get the implementation of RSA-hash
alg = _get_jwt_rsa_algorithm(hash_algorithm_name)
# Check private key
if not rsa_private_key:
raise ValueError('rsa_private_key required for RSA with ' +
alg.hash_alg.name + ' signature method')
# Convert the "signature base string" into a sequence of bytes (M)
#
# The signature base string, by definition, only contain printable US-ASCII
# characters. So encoding it as 'ascii' will always work. It will raise a
# ``UnicodeError`` if it can't encode the value, which will never happen
# if the signature base string was created correctly. Therefore, using
# 'ascii' encoding provides an extra level of error checking.
m = sig_base_str.encode('ascii')
# Perform signing: S = RSASSA-PKCS1-V1_5-SIGN (K, M)
key = _prepare_key_plus(alg, rsa_private_key)
s = alg.sign(m, key)
# base64-encoded per RFC2045 section 6.8.
#
# 1. While b2a_base64 implements base64 defined by RFC 3548. As used here,
# it is the same as base64 defined by RFC 2045.
# 2. b2a_base64 includes a "\n" at the end of its result ([:-1] removes it)
# 3. b2a_base64 produces a binary string. Use decode to produce a str.
# It should only contain only printable US-ASCII characters.
return binascii.b2a_base64(s)[:-1].decode('ascii')
def _verify_rsa(hash_algorithm_name: str,
request,
rsa_public_key: str):
"""
Verify a base64 encoded signature for a RSA-based signature method.
The ``alg`` is used to calculate the digest over the signature base string.
For the "RSA_SHA1" signature method, the alg must be SHA-1. While OAuth 1.0a
only defines the RSA-SHA1 signature method, this function can be used for
other non-standard signature methods that only differ from RSA-SHA1 by the
digest algorithm.
Verification for the RSA-SHA1 signature method is defined in
`section 3.4.3`_ of RFC 5849.
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 Sec 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
try:
# Calculate the *signature base string* of the actual received request
norm_params = normalize_parameters(request.params)
bs_uri = base_string_uri(request.uri)
sig_base_str = signature_base_string(
request.http_method, bs_uri, norm_params)
# Obtain the signature that was received in the request
sig = binascii.a2b_base64(request.signature.encode('ascii'))
# Get the implementation of RSA-with-hash algorithm to use
alg = _get_jwt_rsa_algorithm(hash_algorithm_name)
# Verify the received signature was produced by the private key
# corresponding to the `rsa_public_key`, signing exact same
# *signature base string*.
#
# RSASSA-PKCS1-V1_5-VERIFY ((n, e), M, S)
key = _prepare_key_plus(alg, rsa_public_key)
# The signature base string only contain printable US-ASCII characters.
# The ``encode`` method with the default "strict" error handling will
# raise a ``UnicodeError`` if it can't encode the value. So using
# "ascii" will always work.
verify_ok = alg.verify(sig_base_str.encode('ascii'), key, sig)
if not verify_ok:
log.debug('Verify failed: RSA with ' + alg.hash_alg.name +
': signature base string=%s' + sig_base_str)
return verify_ok
except UnicodeError:
# A properly encoded signature will only contain printable US-ASCII
# characters. The ``encode`` method with the default "strict" error
# handling will raise a ``UnicodeError`` if it can't decode the value.
# So using "ascii" will work with all valid signatures. But an
# incorrectly or maliciously produced signature could contain other
# bytes.
#
# This implementation treats that situation as equivalent to the
# signature verification having failed.
#
# Note: simply changing the encode to use 'utf-8' will not remove this
# case, since an incorrect or malicious request can contain bytes which
# are invalid as UTF-8.
return False
# ==== RSA-SHA1 ==================================================
def sign_rsa_sha1_with_client(sig_base_str, client):
# For some reason, this function originally accepts both str and bytes.
# This behaviour is preserved here. But won't be done for the newer
# sign_rsa_sha256_with_client and sign_rsa_sha512_with_client functions,
# which will only accept strings. The function to calculate a
# "signature base string" always produces a string, so it is not clear
# why support for bytes would ever be needed.
sig_base_str = sig_base_str.decode('ascii')\
if isinstance(sig_base_str, bytes) else sig_base_str
return _sign_rsa('SHA-1', sig_base_str, client.rsa_key)
def verify_rsa_sha1(request, rsa_public_key: str):
return _verify_rsa('SHA-1', request, rsa_public_key)
def sign_rsa_sha1(base_string, rsa_private_key):
"""
Deprecated function for calculating a RSA-SHA1 signature.
This function has been replaced by invoking ``sign_rsa`` with "SHA-1"
as the hash algorithm name.
This function was invoked by sign_rsa_sha1_with_client and
test_signatures.py, but does any application invoke it directly? If not,
it can be removed.
"""
warnings.warn('use _sign_rsa("SHA-1", ...) instead of sign_rsa_sha1',
DeprecationWarning)
if isinstance(base_string, bytes):
base_string = base_string.decode('ascii')
return _sign_rsa('SHA-1', base_string, rsa_private_key)
# ==== RSA-SHA256 ================================================
def sign_rsa_sha256_with_client(sig_base_str: str, client):
return _sign_rsa('SHA-256', sig_base_str, client.rsa_key)
def verify_rsa_sha256(request, rsa_public_key: str):
return _verify_rsa('SHA-256', request, rsa_public_key)
# ==== RSA-SHA512 ================================================
def sign_rsa_sha512_with_client(sig_base_str: str, client):
return _sign_rsa('SHA-512', sig_base_str, client.rsa_key)
def verify_rsa_sha512(request, rsa_public_key: str):
return _verify_rsa('SHA-512', request, rsa_public_key)
# ==== PLAINTEXT =================================================
def sign_plaintext_with_client(_signature_base_string, client):
# _signature_base_string is not used because the signature with PLAINTEXT
# is just the secret: it isn't a real signature.
return sign_plaintext(client.client_secret, client.resource_owner_secret)
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: https://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or '')
return signature
def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
"""Verify a PLAINTEXT signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
signature = sign_plaintext(client_secret, resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify PLAINTEXT failed')
return match
| bsd-3-clause | 1d2b99f3c82f423dfa422c10658b2aee | 36.647887 | 80 | 0.650549 | 3.769213 | false | false | false | false |
oauthlib/oauthlib | oauthlib/oauth1/rfc5849/utils.py | 5 | 2613 | """
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth
spec.
"""
import urllib.request as urllib2
from oauthlib.common import quote, unquote
UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789')
def filter_params(target):
"""Decorator which filters params to remove non-oauth_* parameters
Assumes the decorated method takes a params dict or list of tuples as its
first argument.
"""
def wrapper(params, *args, **kwargs):
params = filter_oauth_params(params)
return target(params, *args, **kwargs)
wrapper.__doc__ = target.__doc__
return wrapper
def filter_oauth_params(params):
"""Removes all non oauth parameters from a dict or a list of params."""
is_oauth = lambda kv: kv[0].startswith("oauth_")
if isinstance(params, dict):
return list(filter(is_oauth, list(params.items())))
else:
return list(filter(is_oauth, params))
def escape(u):
"""Escape a unicode string in an OAuth-compatible fashion.
Per `section 3.6`_ of the spec.
.. _`section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
"""
if not isinstance(u, str):
raise ValueError('Only unicode objects are escapable. ' +
'Got {!r} of type {}.'.format(u, type(u)))
# Letters, digits, and the characters '_.-' are already treated as safe
# by urllib.quote(). We need to add '~' to fully support rfc5849.
return quote(u, safe=b'~')
def unescape(u):
if not isinstance(u, str):
raise ValueError('Only unicode objects are unescapable.')
return unquote(u)
def parse_keqv_list(l):
"""A unicode-safe version of urllib2.parse_keqv_list"""
# With Python 2.6, parse_http_list handles unicode fine
return urllib2.parse_keqv_list(l)
def parse_http_list(u):
"""A unicode-safe version of urllib2.parse_http_list"""
# With Python 2.6, parse_http_list handles unicode fine
return urllib2.parse_http_list(u)
def parse_authorization_header(authorization_header):
"""Parse an OAuth authorization header into a list of 2-tuples"""
auth_scheme = 'OAuth '.lower()
if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme):
items = parse_http_list(authorization_header[len(auth_scheme):])
try:
return list(parse_keqv_list(items).items())
except (IndexError, ValueError):
pass
raise ValueError('Malformed authorization header')
| bsd-3-clause | 70a5fc94dc6d8b5133088ee9439eab19 | 30.481928 | 79 | 0.649445 | 3.94713 | false | false | false | false |
oauthlib/oauthlib | setup.py | 1 | 2203 | # Hack because logging + setuptools sucks.
try:
import multiprocessing
except ImportError:
pass
from os.path import dirname, join
from setuptools import find_packages, setup
import oauthlib
def fread(fn):
with open(join(dirname(__file__), fn), 'r') as f:
return f.read()
rsa_require = ['cryptography>=3.0.0']
signedtoken_require = ['cryptography>=3.0.0', 'pyjwt>=2.0.0,<3']
signals_require = ['blinker>=1.4.0']
setup(
name='oauthlib',
version=oauthlib.__version__,
description='A generic, spec-compliant, thorough implementation of the OAuth request-signing logic',
long_description=fread('README.rst'),
long_description_content_type='text/x-rst',
author='The OAuthlib Community',
author_email='idan@gazit.me',
maintainer='Ib Lundgren',
maintainer_email='ib.lundgren@gmail.com',
url='https://github.com/oauthlib/oauthlib',
platforms='any',
license='BSD-3-Clause',
packages=find_packages(exclude=('docs', 'tests', 'tests.*')),
python_requires='>=3.6',
extras_require={
'rsa': rsa_require,
'signedtoken': signedtoken_require,
'signals': signals_require,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| bsd-3-clause | 82260ee9837d0d28dea11578d2bb746f | 32.892308 | 104 | 0.624603 | 4.027422 | false | false | false | false |
oasis-open/cti-python-stix2 | stix2/test/v20/test_datastore_filters.py | 1 | 19219 | import pytest
from stix2 import parse
from stix2.datastore.filters import Filter, apply_common_filters
from stix2.utils import STIXdatetime, parse_into_datetime
stix_objs = [
{
"created": "2017-01-27T13:49:53.997Z",
"description": "\n\nTITLE:\n\tPoison Ivy",
"id": "malware--fdd60b30-b67c-41e3-b0b9-f01faf20d111",
"labels": [
"remote-access-trojan",
],
"modified": "2017-01-27T13:49:53.997Z",
"name": "Poison Ivy",
"type": "malware",
},
{
"created": "2014-05-08T09:00:00.000Z",
"id": "indicator--a932fcc6-e032-476c-826f-cb970a5a1ade",
"labels": [
"compromised",
],
"modified": "2014-05-08T09:00:00.000Z",
"name": "File hash for Poison Ivy variant",
"pattern": "[file:hashes.'SHA-256' = 'ef537f25c895bfa782526529a9b63d97aa631564d5d789c2b765448c8635fb6c']",
"type": "indicator",
"valid_from": "2014-05-08T09:00:00.000000Z",
},
{
"created": "2014-05-08T09:00:00.000Z",
"granular_markings": [
{
"marking_ref": "marking-definition--5e57c739-391a-4eb3-b6be-7d15ca92d5ed",
"selectors": [
"relationship_type",
],
},
],
"id": "relationship--2f9a9aa9-108a-4333-83e2-4fb25add0463",
"modified": "2014-05-08T09:00:00.000Z",
"object_marking_refs": [
"marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9",
],
"relationship_type": "indicates",
"revoked": True,
"source_ref": "indicator--a932fcc6-e032-476c-826f-cb970a5a1ade",
"target_ref": "malware--fdd60b30-b67c-41e3-b0b9-f01faf20d111",
"type": "relationship",
},
{
"id": "vulnerability--ee916c28-c7a4-4d0d-ad56-a8d357f89fef",
"created": "2016-02-14T00:00:00.000Z",
"created_by_ref": "identity--f1350682-3290-4e0d-be58-69e290537647",
"modified": "2016-02-14T00:00:00.000Z",
"type": "vulnerability",
"name": "CVE-2014-0160",
"description": "The (1) TLS...",
"external_references": [
{
"source_name": "cve",
"external_id": "CVE-2014-0160",
},
],
"labels": ["heartbleed", "has-logo"],
},
{
"type": "observed-data",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"created": "2016-04-06T19:58:16.000Z",
"modified": "2016-04-06T19:58:16.000Z",
"first_observed": "2015-12-21T19:00:00Z",
"last_observed": "2015-12-21T19:00:00Z",
"number_observed": 1,
"objects": {
"0": {
"type": "file",
"name": "HAL 9000.exe",
},
},
},
]
filters = [
Filter("type", "!=", "relationship"),
Filter("id", "=", "relationship--2f9a9aa9-108a-4333-83e2-4fb25add0463"),
Filter("labels", "in", "remote-access-trojan"),
Filter("created", ">", "2015-01-01T01:00:00.000Z"),
Filter("revoked", "=", True),
Filter("revoked", "!=", True),
Filter("object_marking_refs", "=", "marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9"),
Filter("granular_markings.selectors", "in", "relationship_type"),
Filter("granular_markings.marking_ref", "=", "marking-definition--5e57c739-391a-4eb3-b6be-7d15ca92d5ed"),
Filter("external_references.external_id", "in", "CVE-2014-0160,CVE-2017-6608"),
Filter("created_by_ref", "=", "identity--f1350682-3290-4e0d-be58-69e290537647"),
Filter("object_marking_refs", "=", "marking-definition--613f2e26-0000-4000-8000-b8e91df99dc9"),
Filter("granular_markings.selectors", "in", "description"),
Filter("external_references.source_name", "=", "CVE"),
Filter("objects", "=", {"0": {"type": "file", "name": "HAL 9000.exe"}}),
Filter("objects", "contains", {"type": "file", "name": "HAL 9000.exe"}),
Filter("labels", "contains", "heartbleed"),
]
# same as above objects but converted to real Python STIX2 objects
# to test filters against true Python STIX2 objects
real_stix_objs = [parse(stix_obj) for stix_obj in stix_objs]
def test_filter_ops_check():
# invalid filters - non supported operators
with pytest.raises(ValueError) as excinfo:
# create Filter that has an operator that is not allowed
Filter('modified', '*', 'not supported operator')
assert str(excinfo.value) == "Filter operator '*' not supported for specified property: 'modified'"
with pytest.raises(ValueError) as excinfo:
Filter("type", "%", "4")
assert "Filter operator '%' not supported for specified property" in str(excinfo.value)
def test_filter_value_type_check():
# invalid filters - non supported value types
with pytest.raises(TypeError) as excinfo:
Filter('created', '=', object())
assert "'<class 'object'>'" in str(excinfo.value)
assert "is not supported. The type must be a Python immutable type or dictionary" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Filter("type", "=", complex(2, -1))
assert "'<class 'complex'>'" in str(excinfo.value)
assert "is not supported. The type must be a Python immutable type or dictionary" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
Filter("type", "=", set([16, 23]))
assert "'<class 'set'>'" in str(excinfo.value)
assert "is not supported. The type must be a Python immutable type or dictionary" in str(excinfo.value)
def test_filter_type_underscore_check():
# check that Filters where property="type", value (name) doesnt have underscores
with pytest.raises(ValueError) as excinfo:
Filter("type", "=", "oh_underscore")
assert "Filter for property 'type' cannot have its value 'oh_underscore'" in str(excinfo.value)
def test_apply_common_filters0():
# "Return any object whose type is not relationship"
resp = list(apply_common_filters(stix_objs, [filters[0]]))
ids = [r['id'] for r in resp]
assert stix_objs[0]['id'] in ids
assert stix_objs[1]['id'] in ids
assert stix_objs[3]['id'] in ids
assert len(ids) == 4
resp = list(apply_common_filters(real_stix_objs, [filters[0]]))
ids = [r.id for r in resp]
assert real_stix_objs[0].id in ids
assert real_stix_objs[1].id in ids
assert real_stix_objs[3].id in ids
assert len(ids) == 4
def test_apply_common_filters1():
# "Return any object that matched id relationship--2f9a9aa9-108a-4333-83e2-4fb25add0463"
resp = list(apply_common_filters(stix_objs, [filters[1]]))
assert resp[0]['id'] == stix_objs[2]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[1]]))
assert resp[0].id == real_stix_objs[2].id
assert len(resp) == 1
def test_apply_common_filters2():
# "Return any object that contains remote-access-trojan in labels"
resp = list(apply_common_filters(stix_objs, [filters[2]]))
assert resp[0]['id'] == stix_objs[0]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[2]]))
assert resp[0].id == real_stix_objs[0].id
assert len(resp) == 1
def test_apply_common_filters3():
# "Return any object created after 2015-01-01T01:00:00.000Z"
resp = list(apply_common_filters(stix_objs, [filters[3]]))
assert resp[0]['id'] == stix_objs[0]['id']
assert len(resp) == 3
resp = list(apply_common_filters(real_stix_objs, [filters[3]]))
assert len(resp) == 3
assert resp[0].id == real_stix_objs[0].id
def test_apply_common_filters4():
# "Return any revoked object"
resp = list(apply_common_filters(stix_objs, [filters[4]]))
assert resp[0]['id'] == stix_objs[2]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[4]]))
assert resp[0].id == real_stix_objs[2].id
assert len(resp) == 1
def test_apply_common_filters5():
# "Return any object whose not revoked"
resp = list(apply_common_filters(stix_objs, [filters[5]]))
assert len(resp) == 0
resp = list(apply_common_filters(real_stix_objs, [filters[5]]))
assert len(resp) == 4
def test_apply_common_filters6():
# "Return any object that matches marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9 in object_marking_refs"
resp = list(apply_common_filters(stix_objs, [filters[6]]))
assert resp[0]['id'] == stix_objs[2]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[6]]))
assert resp[0].id == real_stix_objs[2].id
assert len(resp) == 1
def test_apply_common_filters7():
# "Return any object that contains relationship_type in their selectors AND
# also has marking-definition--5e57c739-391a-4eb3-b6be-7d15ca92d5ed in marking_ref"
resp = list(apply_common_filters(stix_objs, [filters[7], filters[8]]))
assert resp[0]['id'] == stix_objs[2]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[7], filters[8]]))
assert resp[0].id == real_stix_objs[2].id
assert len(resp) == 1
def test_apply_common_filters8():
# "Return any object that contains CVE-2014-0160,CVE-2017-6608 in their external_id"
resp = list(apply_common_filters(stix_objs, [filters[9]]))
assert resp[0]['id'] == stix_objs[3]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[9]]))
assert resp[0].id == real_stix_objs[3].id
assert len(resp) == 1
def test_apply_common_filters9():
# "Return any object that matches created_by_ref identity--f1350682-3290-4e0d-be58-69e290537647"
resp = list(apply_common_filters(stix_objs, [filters[10]]))
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[10]]))
assert len(resp) == 1
def test_apply_common_filters10():
# "Return any object that matches marking-definition--613f2e26-0000-4000-8000-b8e91df99dc9 in object_marking_refs" (None)
resp = list(apply_common_filters(stix_objs, [filters[11]]))
assert len(resp) == 0
resp = list(apply_common_filters(real_stix_objs, [filters[11]]))
assert len(resp) == 0
def test_apply_common_filters11():
# "Return any object that contains description in its selectors" (None)
resp = list(apply_common_filters(stix_objs, [filters[12]]))
assert len(resp) == 0
resp = list(apply_common_filters(real_stix_objs, [filters[12]]))
assert len(resp) == 0
def test_apply_common_filters12():
# "Return any object that matches CVE in source_name" (None, case sensitive)
resp = list(apply_common_filters(stix_objs, [filters[13]]))
assert len(resp) == 0
resp = list(apply_common_filters(real_stix_objs, [filters[13]]))
assert len(resp) == 0
def test_apply_common_filters13():
# Return any object that matches file object in "objects"
resp = list(apply_common_filters(stix_objs, [filters[14]]))
assert resp[0]["id"] == stix_objs[4]["id"]
assert len(resp) == 1
# important additional check to make sure original File dict was
# not converted to File object. (this was a deep bug found)
assert isinstance(resp[0]["objects"]["0"], dict)
resp = list(apply_common_filters(real_stix_objs, [filters[14]]))
assert resp[0].id == real_stix_objs[4].id
assert len(resp) == 1
def test_apply_common_filters14():
# Return any object that contains a specific File Cyber Observable Object
resp = list(apply_common_filters(stix_objs, [filters[15]]))
assert resp[0]['id'] == stix_objs[4]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[15]]))
assert resp[0].id == real_stix_objs[4].id
assert len(resp) == 1
def test_apply_common_filters15():
# Return any object that contains 'heartbleed' in "labels"
resp = list(apply_common_filters(stix_objs, [filters[16]]))
assert resp[0]['id'] == stix_objs[3]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs, [filters[16]]))
assert resp[0].id == real_stix_objs[3].id
assert len(resp) == 1
def test_datetime_filter_behavior():
"""if a filter is initialized with its value being a datetime object
OR the STIX object property being filtered on is a datetime object, all
resulting comparisons executed are done on the string representations
of the datetime objects, as the Filter functionality will convert
all datetime objects to there string forms using format_datetim()
This test makes sure all datetime comparisons are carried out correctly
"""
filter_with_dt_obj = Filter("created", "=", parse_into_datetime("2016-02-14T00:00:00.000Z", "millisecond"))
filter_with_str = Filter("created", "=", "2016-02-14T00:00:00.000Z")
# compare datetime obj to filter w/ datetime obj
resp = list(apply_common_filters(real_stix_objs, [filter_with_dt_obj]))
assert len(resp) == 1
assert resp[0]["id"] == "vulnerability--ee916c28-c7a4-4d0d-ad56-a8d357f89fef"
assert isinstance(resp[0].created, STIXdatetime) # make sure original object not altered
# compare datetime string to filter w/ str
resp = list(apply_common_filters(stix_objs, [filter_with_str]))
assert len(resp) == 1
assert resp[0]["id"] == "vulnerability--ee916c28-c7a4-4d0d-ad56-a8d357f89fef"
# compare datetime obj to filter w/ str
resp = list(apply_common_filters(real_stix_objs, [filter_with_str]))
assert len(resp) == 1
assert resp[0]["id"] == "vulnerability--ee916c28-c7a4-4d0d-ad56-a8d357f89fef"
assert isinstance(resp[0].created, STIXdatetime) # make sure original object not altered
def test_filters0(stix_objs2, real_stix_objs2):
# "Return any object modified before 2017-01-28T13:49:53.935Z"
resp = list(apply_common_filters(stix_objs2, [Filter("modified", "<", "2017-01-28T13:49:53.935Z")]))
assert resp[0]['id'] == stix_objs2[1]['id']
assert len(resp) == 2
resp = list(apply_common_filters(real_stix_objs2, [Filter("modified", "<", parse_into_datetime("2017-01-28T13:49:53.935Z"))]))
assert resp[0].id == real_stix_objs2[1].id
assert len(resp) == 2
def test_filters1(stix_objs2, real_stix_objs2):
# "Return any object modified after 2017-01-28T13:49:53.935Z"
resp = list(apply_common_filters(stix_objs2, [Filter("modified", ">", "2017-01-28T13:49:53.935Z")]))
assert resp[0]['id'] == stix_objs2[0]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs2, [Filter("modified", ">", parse_into_datetime("2017-01-28T13:49:53.935Z"))]))
assert resp[0].id == real_stix_objs2[0].id
assert len(resp) == 1
def test_filters2(stix_objs2, real_stix_objs2):
# "Return any object modified after or on 2017-01-28T13:49:53.935Z"
resp = list(apply_common_filters(stix_objs2, [Filter("modified", ">=", "2017-01-27T13:49:53.935Z")]))
assert resp[0]['id'] == stix_objs2[0]['id']
assert len(resp) == 3
resp = list(apply_common_filters(real_stix_objs2, [Filter("modified", ">=", parse_into_datetime("2017-01-27T13:49:53.935Z"))]))
assert resp[0].id == real_stix_objs2[0].id
assert len(resp) == 3
def test_filters3(stix_objs2, real_stix_objs2):
# "Return any object modified before or on 2017-01-28T13:49:53.935Z"
resp = list(apply_common_filters(stix_objs2, [Filter("modified", "<=", "2017-01-27T13:49:53.935Z")]))
assert resp[0]['id'] == stix_objs2[1]['id']
assert len(resp) == 2
# "Return any object modified before or on 2017-01-28T13:49:53.935Z"
fv = Filter("modified", "<=", parse_into_datetime("2017-01-27T13:49:53.935Z"))
resp = list(apply_common_filters(real_stix_objs2, [fv]))
assert resp[0].id == real_stix_objs2[1].id
assert len(resp) == 2
def test_filters4():
# Assert invalid Filter cannot be created
with pytest.raises(ValueError) as excinfo:
Filter("modified", "?", "2017-01-27T13:49:53.935Z")
assert str(excinfo.value) == (
"Filter operator '?' not supported "
"for specified property: 'modified'"
)
def test_filters5(stix_objs2, real_stix_objs2):
# "Return any object whose id is not indicator--00000000-0000-4000-8000-000000000002"
resp = list(apply_common_filters(stix_objs2, [Filter("id", "!=", "indicator--00000000-0000-4000-8000-000000000002")]))
assert resp[0]['id'] == stix_objs2[0]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objs2, [Filter("id", "!=", "indicator--00000000-0000-4000-8000-000000000002")]))
assert resp[0].id == real_stix_objs2[0].id
assert len(resp) == 1
def test_filters6(stix_objs2, real_stix_objs2):
# Test filtering on non-common property
resp = list(apply_common_filters(stix_objs2, [Filter("name", "=", "Malicious site hosting downloader")]))
assert resp[0]['id'] == stix_objs2[0]['id']
assert len(resp) == 3
resp = list(apply_common_filters(real_stix_objs2, [Filter("name", "=", "Malicious site hosting downloader")]))
assert resp[0].id == real_stix_objs2[0].id
assert len(resp) == 3
def test_filters7(stix_objs2, real_stix_objs2):
# Test filtering on embedded property
obsvd_data_obj = {
"type": "observed-data",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"created": "2016-04-06T19:58:16.000Z",
"modified": "2016-04-06T19:58:16.000Z",
"first_observed": "2015-12-21T19:00:00Z",
"last_observed": "2015-12-21T19:00:00Z",
"number_observed": 50,
"objects": {
"0": {
"type": "file",
"hashes": {
"SHA-256": "35a01331e9ad96f751278b891b6ea09699806faedfa237d40513d92ad1b7100f",
},
"extensions": {
"pdf-ext": {
"version": "1.7",
"document_info_dict": {
"Title": "Sample document",
"Author": "Adobe Systems Incorporated",
"Creator": "Adobe FrameMaker 5.5.3 for Power Macintosh",
"Producer": "Acrobat Distiller 3.01 for Power Macintosh",
"CreationDate": "20070412090123-02",
},
"pdfid0": "DFCE52BD827ECF765649852119D",
"pdfid1": "57A1E0F9ED2AE523E313C",
},
},
},
},
}
stix_objects = list(stix_objs2) + [obsvd_data_obj]
real_stix_objects = list(real_stix_objs2) + [parse(obsvd_data_obj)]
resp = list(apply_common_filters(stix_objects, [Filter("objects.0.extensions.pdf-ext.version", ">", "1.2")]))
assert resp[0]['id'] == stix_objects[3]['id']
assert len(resp) == 1
resp = list(apply_common_filters(real_stix_objects, [Filter("objects.0.extensions.pdf-ext.version", ">", "1.2")]))
assert resp[0].id == real_stix_objects[3].id
assert len(resp) == 1
| bsd-3-clause | 346d9391635c9e518e6a6f8cab1dc58c | 39.039583 | 131 | 0.626984 | 3.056457 | false | true | false | false |
oasis-open/cti-python-stix2 | stix2/test/test_utils_type_checks.py | 1 | 6467 | import pytest
import stix2.utils
###
# Tests using types/behaviors common to STIX 2.0 and 2.1.
###
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"attack-pattern",
"campaign",
"course-of-action",
"identity",
"indicator",
"intrusion-set",
"malware",
"observed-data",
"report",
"threat-actor",
"tool",
"vulnerability",
],
)
def test_is_sdo(type_, stix_version):
assert stix2.utils.is_sdo(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert stix2.utils.is_sdo(id_, stix_version)
assert stix2.utils.is_stix_type(
type_, stix_version, stix2.utils.STIXTypeClass.SDO,
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"relationship",
"sighting",
"marking-definition",
"bundle",
"language-content",
"ipv4-addr",
"foo",
],
)
def test_is_not_sdo(type_, stix_version):
assert not stix2.utils.is_sdo(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert not stix2.utils.is_sdo(id_, stix_version)
d = {
"type": type_,
}
assert not stix2.utils.is_sdo(d, stix_version)
assert not stix2.utils.is_stix_type(
type_, stix_version, stix2.utils.STIXTypeClass.SDO,
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"artifact",
"autonomous-system",
"directory",
"domain-name",
"email-addr",
"email-message",
"file",
"ipv4-addr",
"ipv6-addr",
"mac-addr",
"mutex",
"network-traffic",
"process",
"software",
"url",
"user-account",
"windows-registry-key",
"x509-certificate",
],
)
def test_is_sco(type_, stix_version):
assert stix2.utils.is_sco(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert stix2.utils.is_sco(id_, stix_version)
assert stix2.utils.is_stix_type(
type_, stix_version, stix2.utils.STIXTypeClass.SCO,
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"identity",
"sighting",
"marking-definition",
"bundle",
"language-content",
"foo",
],
)
def test_is_not_sco(type_, stix_version):
assert not stix2.utils.is_sco(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert not stix2.utils.is_sco(id_, stix_version)
d = {
"type": type_,
}
assert not stix2.utils.is_sco(d, stix_version)
assert not stix2.utils.is_stix_type(
type_, stix_version, stix2.utils.STIXTypeClass.SCO,
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"relationship",
"sighting",
],
)
def test_is_sro(type_, stix_version):
assert stix2.utils.is_sro(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert stix2.utils.is_sro(id_, stix_version)
assert stix2.utils.is_stix_type(
type_, stix_version, stix2.utils.STIXTypeClass.SRO,
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"identity",
"marking-definition",
"bundle",
"language-content",
"ipv4-addr",
"foo",
],
)
def test_is_not_sro(type_, stix_version):
assert not stix2.utils.is_sro(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert not stix2.utils.is_sro(id_, stix_version)
d = {
"type": type_,
}
assert not stix2.utils.is_sro(d, stix_version)
assert not stix2.utils.is_stix_type(
type_, stix_version, stix2.utils.STIXTypeClass.SRO,
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
def test_is_marking(stix_version):
assert stix2.utils.is_marking("marking-definition", stix_version)
id_ = "marking-definition--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert stix2.utils.is_marking(id_, stix_version)
assert stix2.utils.is_stix_type(
"marking-definition", stix_version, "marking-definition",
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"identity",
"bundle",
"language-content",
"ipv4-addr",
"foo",
],
)
def test_is_not_marking(type_, stix_version):
assert not stix2.utils.is_marking(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert not stix2.utils.is_marking(id_, stix_version)
d = {
"type": type_,
}
assert not stix2.utils.is_marking(d, stix_version)
assert not stix2.utils.is_stix_type(
type_, stix_version, "marking-definition",
)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
@pytest.mark.parametrize(
"type_", [
"identity",
"relationship",
"sighting",
"marking-definition",
"bundle",
"ipv4-addr",
],
)
def test_is_object(type_, stix_version):
assert stix2.utils.is_object(type_, stix_version)
id_ = type_ + "--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert stix2.utils.is_object(id_, stix_version)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
def test_is_not_object(stix_version):
assert not stix2.utils.is_object("foo", stix_version)
id_ = "foo--a12fa04c-6586-4128-8d1a-cfe0d1c081f5"
assert not stix2.utils.is_object(id_, stix_version)
d = {
"type": "foo",
}
assert not stix2.utils.is_object(d, stix_version)
@pytest.mark.parametrize("stix_version", ["2.0", "2.1"])
def test_is_stix_type(stix_version):
assert not stix2.utils.is_stix_type(
"foo", stix_version, stix2.utils.STIXTypeClass.SDO, "foo",
)
assert stix2.utils.is_stix_type(
"bundle", stix_version, "foo", "bundle",
)
assert stix2.utils.is_stix_type(
"identity", stix_version,
stix2.utils.STIXTypeClass.SDO,
stix2.utils.STIXTypeClass.SRO,
)
assert stix2.utils.is_stix_type(
"software", stix_version,
stix2.utils.STIXTypeClass.SDO,
stix2.utils.STIXTypeClass.SCO,
)
| bsd-3-clause | aee3f62f34e9df22f521abf529503315 | 23.683206 | 69 | 0.592392 | 2.753086 | false | true | false | false |
dask/dask | dask/dataframe/rolling.py | 2 | 21493 | import datetime
import inspect
from numbers import Integral
import pandas as pd
from pandas.api.types import is_datetime64_any_dtype
from pandas.core.window import Rolling as pd_Rolling
from dask.array.core import normalize_arg
from dask.base import tokenize
from dask.blockwise import BlockwiseDepDict
from dask.dataframe import methods
from dask.dataframe.core import (
Scalar,
_Frame,
_get_divisions_map_partitions,
_get_meta_map_partitions,
_maybe_from_pandas,
apply_and_enforce,
new_dd_object,
no_default,
partitionwise_graph,
)
from dask.dataframe.multi import _maybe_align_partitions
from dask.dataframe.utils import insert_meta_param_description
from dask.delayed import unpack_collections
from dask.highlevelgraph import HighLevelGraph
from dask.utils import M, apply, derived_from, funcname, has_keyword
CombinedOutput = type("CombinedOutput", (tuple,), {})
def _combined_parts(prev_part, current_part, next_part, before, after):
msg = (
"Partition size is less than overlapping "
"window size. Try using ``df.repartition`` "
"to increase the partition size."
)
if prev_part is not None and isinstance(before, Integral):
if prev_part.shape[0] != before:
raise NotImplementedError(msg)
if next_part is not None and isinstance(after, Integral):
if next_part.shape[0] != after:
raise NotImplementedError(msg)
parts = [p for p in (prev_part, current_part, next_part) if p is not None]
combined = methods.concat(parts)
return CombinedOutput(
(
combined,
len(prev_part) if prev_part is not None else None,
len(next_part) if next_part is not None else None,
)
)
def overlap_chunk(func, before, after, *args, **kwargs):
dfs = [df for df in args if isinstance(df, CombinedOutput)]
combined, prev_part_length, next_part_length = dfs[0]
args = [arg[0] if isinstance(arg, CombinedOutput) else arg for arg in args]
out = func(*args, **kwargs)
if prev_part_length is None:
before = None
if isinstance(before, datetime.timedelta):
before = prev_part_length
expansion = None
if combined.shape[0] != 0:
expansion = out.shape[0] // combined.shape[0]
if before and expansion:
before *= expansion
if next_part_length is None:
return out.iloc[before:]
if isinstance(after, datetime.timedelta):
after = next_part_length
if after and expansion:
after *= expansion
return out.iloc[before:-after]
@insert_meta_param_description
def map_overlap(
func,
df,
before,
after,
*args,
meta=no_default,
enforce_metadata=True,
transform_divisions=True,
align_dataframes=True,
**kwargs,
):
"""Apply a function to each partition, sharing rows with adjacent partitions.
Parameters
----------
func : function
The function applied to each partition. If this function accepts
the special ``partition_info`` keyword argument, it will recieve
information on the partition's relative location within the
dataframe.
df: dd.DataFrame, dd.Series
args, kwargs :
Positional and keyword arguments to pass to the function.
Positional arguments are computed on a per-partition basis, while
keyword arguments are shared across all partitions. The partition
itself will be the first positional argument, with all other
arguments passed *after*. Arguments can be ``Scalar``, ``Delayed``,
or regular Python objects. DataFrame-like args (both dask and
pandas) will be repartitioned to align (if necessary) before
applying the function; see ``align_dataframes`` to control this
behavior.
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work,
but it won't raise if dtypes don't match.
before : int, timedelta or string timedelta
The rows to prepend to partition ``i`` from the end of
partition ``i - 1``.
after : int, timedelta or string timedelta
The rows to append to partition ``i`` from the beginning
of partition ``i + 1``.
transform_divisions : bool, default True
Whether to apply the function onto the divisions and apply those
transformed divisions to the output.
align_dataframes : bool, default True
Whether to repartition DataFrame- or Series-like args
(both dask and pandas) so their divisions align before applying
the function. This requires all inputs to have known divisions.
Single-partition inputs will be split into multiple partitions.
If False, all inputs must have either the same number of partitions
or a single partition. Single-partition inputs will be broadcast to
every partition of multi-partition inputs.
$META
See Also
--------
dd.DataFrame.map_overlap
"""
args = (df,) + args
dfs = [df for df in args if isinstance(df, _Frame)]
if isinstance(before, str):
before = pd.to_timedelta(before)
if isinstance(after, str):
after = pd.to_timedelta(after)
if isinstance(before, datetime.timedelta) or isinstance(after, datetime.timedelta):
if not is_datetime64_any_dtype(dfs[0].index._meta_nonempty.inferred_type):
raise TypeError(
"Must have a `DatetimeIndex` when using string offset "
"for `before` and `after`"
)
else:
if not (
isinstance(before, Integral)
and before >= 0
and isinstance(after, Integral)
and after >= 0
):
raise ValueError("before and after must be positive integers")
name = kwargs.pop("token", None)
parent_meta = kwargs.pop("parent_meta", None)
assert callable(func)
if name is not None:
token = tokenize(meta, before, after, *args, **kwargs)
else:
name = "overlap-" + funcname(func)
token = tokenize(func, meta, before, after, *args, **kwargs)
name = f"{name}-{token}"
if align_dataframes:
args = _maybe_from_pandas(args)
try:
args = _maybe_align_partitions(args)
except ValueError as e:
raise ValueError(
f"{e}. If you don't want the partitions to be aligned, and are "
"calling `map_overlap` directly, pass `align_dataframes=False`."
) from e
meta = _get_meta_map_partitions(args, dfs, func, kwargs, meta, parent_meta)
if all(isinstance(arg, Scalar) for arg in args):
layer = {
(name, 0): (
apply,
func,
(tuple, [(arg._name, 0) for arg in args]),
kwargs,
)
}
graph = HighLevelGraph.from_collections(name, layer, dependencies=args)
return Scalar(graph, name, meta)
args2 = []
dependencies = []
divisions = _get_divisions_map_partitions(
align_dataframes, transform_divisions, dfs, func, args, kwargs
)
def _handle_frame_argument(arg):
dsk = {}
prevs_parts_dsk, prevs = _get_previous_partitions(arg, before)
dsk.update(prevs_parts_dsk)
nexts_parts_dsk, nexts = _get_nexts_partitions(arg, after)
dsk.update(nexts_parts_dsk)
name_a = "overlap-concat-" + tokenize(arg)
for i, (prev, current, next) in enumerate(
zip(prevs, arg.__dask_keys__(), nexts)
):
key = (name_a, i)
dsk[key] = (_combined_parts, prev, current, next, before, after)
graph = HighLevelGraph.from_collections(name_a, dsk, dependencies=[arg])
return new_dd_object(graph, name_a, meta, divisions)
for arg in args:
if isinstance(arg, _Frame):
arg = _handle_frame_argument(arg)
args2.append(arg)
dependencies.append(arg)
continue
arg = normalize_arg(arg)
arg2, collections = unpack_collections(arg)
if collections:
args2.append(arg2)
dependencies.extend(collections)
else:
args2.append(arg)
kwargs3 = {}
simple = True
for k, v in kwargs.items():
v = normalize_arg(v)
v, collections = unpack_collections(v)
dependencies.extend(collections)
kwargs3[k] = v
if collections:
simple = False
if has_keyword(func, "partition_info"):
partition_info = {
(i,): {"number": i, "division": division}
for i, division in enumerate(divisions[:-1])
}
args2.insert(0, BlockwiseDepDict(partition_info))
orig_func = func
def func(partition_info, *args, **kwargs):
return orig_func(*args, **kwargs, partition_info=partition_info)
if enforce_metadata:
dsk = partitionwise_graph(
apply_and_enforce,
name,
func,
before,
after,
*args2,
dependencies=dependencies,
_func=overlap_chunk,
_meta=meta,
**kwargs3,
)
else:
kwargs4 = kwargs if simple else kwargs3
dsk = partitionwise_graph(
overlap_chunk,
name,
func,
before,
after,
*args2,
**kwargs4,
dependencies=dependencies,
)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=dependencies)
return new_dd_object(graph, name, meta, divisions)
def _get_nexts_partitions(df, after):
"""
Helper to get the nexts partitions required for the overlap
"""
dsk = {}
df_name = df._name
timedelta_partition_message = (
"Partition size is less than specified window. "
"Try using ``df.repartition`` to increase the partition size"
)
name_b = "overlap-append-" + tokenize(df, after)
if after and isinstance(after, Integral):
nexts = []
for i in range(1, df.npartitions):
key = (name_b, i)
dsk[key] = (M.head, (df_name, i), after)
nexts.append(key)
nexts.append(None)
elif isinstance(after, datetime.timedelta):
# TODO: Do we have a use-case for this? Pandas doesn't allow negative rolling windows
deltas = pd.Series(df.divisions).diff().iloc[1:-1]
if (after > deltas).any():
raise ValueError(timedelta_partition_message)
nexts = []
for i in range(1, df.npartitions):
key = (name_b, i)
dsk[key] = (_head_timedelta, (df_name, i - 0), (df_name, i), after)
nexts.append(key)
nexts.append(None)
else:
nexts = [None] * df.npartitions
return dsk, nexts
def _get_previous_partitions(df, before):
"""
Helper to get the previous partitions required for the overlap
"""
dsk = {}
df_name = df._name
name_a = "overlap-prepend-" + tokenize(df, before)
if before and isinstance(before, Integral):
prevs = [None]
for i in range(df.npartitions - 1):
key = (name_a, i)
dsk[key] = (M.tail, (df_name, i), before)
prevs.append(key)
elif isinstance(before, datetime.timedelta):
# Assumes monotonic (increasing?) index
divs = pd.Series(df.divisions)
deltas = divs.diff().iloc[1:-1]
# In the first case window-size is larger than at least one partition, thus it is
# necessary to calculate how many partitions must be used for each rolling task.
# Otherwise, these calculations can be skipped (faster)
if (before > deltas).any():
pt_z = divs[0]
prevs = [None]
for i in range(df.npartitions - 1):
# Select all indexes of relevant partitions between the current partition and
# the partition with the highest division outside the rolling window (before)
pt_i = divs[i + 1]
# lower-bound the search to the first division
lb = max(pt_i - before, pt_z)
first, j = divs[i], i
while first > lb and j > 0:
first = first - deltas[j]
j = j - 1
key = (name_a, i)
dsk[key] = (
_tail_timedelta,
[(df_name, k) for k in range(j, i + 1)],
(df_name, i + 1),
before,
)
prevs.append(key)
else:
prevs = [None]
for i in range(df.npartitions - 1):
key = (name_a, i)
dsk[key] = (
_tail_timedelta,
[(df_name, i)],
(df_name, i + 1),
before,
)
prevs.append(key)
else:
prevs = [None] * df.npartitions
return dsk, prevs
def _head_timedelta(current, next_, after):
"""Return rows of ``next_`` whose index is before the last
observation in ``current`` + ``after``.
Parameters
----------
current : DataFrame
next_ : DataFrame
after : timedelta
Returns
-------
overlapped : DataFrame
"""
return next_[next_.index < (current.index.max() + after)]
def _tail_timedelta(prevs, current, before):
"""Return the concatenated rows of each dataframe in ``prevs`` whose
index is after the first observation in ``current`` - ``before``.
Parameters
----------
current : DataFrame
prevs : list of DataFrame objects
before : timedelta
Returns
-------
overlapped : DataFrame
"""
selected = methods.concat(
[prev[prev.index > (current.index.min() - before)] for prev in prevs]
)
return selected
class Rolling:
"""Provides rolling window calculations."""
def __init__(
self, obj, window=None, min_periods=None, center=False, win_type=None, axis=0
):
self.obj = obj # dataframe or series
self.window = window
self.min_periods = min_periods
self.center = center
self.axis = axis
self.win_type = win_type
# Allow pandas to raise if appropriate
obj._meta.rolling(**self._rolling_kwargs())
# Using .rolling(window='2s'), pandas will convert the
# offset str to a window in nanoseconds. But pandas doesn't
# accept the integer window with win_type='freq', so we store
# that information here.
# See https://github.com/pandas-dev/pandas/issues/15969
self._win_type = None if isinstance(self.window, int) else "freq"
def _rolling_kwargs(self):
return {
"window": self.window,
"min_periods": self.min_periods,
"center": self.center,
"win_type": self.win_type,
"axis": self.axis,
}
@property
def _has_single_partition(self):
"""
Indicator for whether the object has a single partition (True)
or multiple (False).
"""
return (
self.axis in (1, "columns")
or (isinstance(self.window, Integral) and self.window <= 1)
or self.obj.npartitions == 1
)
@staticmethod
def pandas_rolling_method(df, rolling_kwargs, name, *args, **kwargs):
rolling = df.rolling(**rolling_kwargs)
return getattr(rolling, name)(*args, **kwargs)
def _call_method(self, method_name, *args, **kwargs):
rolling_kwargs = self._rolling_kwargs()
meta = self.pandas_rolling_method(
self.obj._meta_nonempty, rolling_kwargs, method_name, *args, **kwargs
)
if self._has_single_partition:
# There's no overlap just use map_partitions
return self.obj.map_partitions(
self.pandas_rolling_method,
rolling_kwargs,
method_name,
*args,
token=method_name,
meta=meta,
**kwargs,
)
# Convert window to overlap
if self.center:
before = self.window // 2
after = self.window - before - 1
elif self._win_type == "freq":
before = pd.Timedelta(self.window)
after = 0
else:
before = self.window - 1
after = 0
return map_overlap(
self.pandas_rolling_method,
self.obj,
before,
after,
rolling_kwargs,
method_name,
*args,
token=method_name,
meta=meta,
**kwargs,
)
@derived_from(pd_Rolling)
def count(self):
return self._call_method("count")
@derived_from(pd_Rolling)
def cov(self):
return self._call_method("cov")
@derived_from(pd_Rolling)
def sum(self):
return self._call_method("sum")
@derived_from(pd_Rolling)
def mean(self):
return self._call_method("mean")
@derived_from(pd_Rolling)
def median(self):
return self._call_method("median")
@derived_from(pd_Rolling)
def min(self):
return self._call_method("min")
@derived_from(pd_Rolling)
def max(self):
return self._call_method("max")
@derived_from(pd_Rolling)
def std(self, ddof=1):
return self._call_method("std", ddof=1)
@derived_from(pd_Rolling)
def var(self, ddof=1):
return self._call_method("var", ddof=1)
@derived_from(pd_Rolling)
def skew(self):
return self._call_method("skew")
@derived_from(pd_Rolling)
def kurt(self):
return self._call_method("kurt")
@derived_from(pd_Rolling)
def quantile(self, quantile):
return self._call_method("quantile", quantile)
@derived_from(pd_Rolling)
def apply(
self,
func,
raw=None,
engine="cython",
engine_kwargs=None,
args=None,
kwargs=None,
):
compat_kwargs = {}
kwargs = kwargs or {}
args = args or ()
meta = self.obj._meta.rolling(0)
if has_keyword(meta.apply, "engine"):
# PANDAS_GT_100
compat_kwargs = dict(engine=engine, engine_kwargs=engine_kwargs)
if raw is None:
# PANDAS_GT_100: The default changed from None to False
raw = inspect.signature(meta.apply).parameters["raw"]
return self._call_method(
"apply", func, raw=raw, args=args, kwargs=kwargs, **compat_kwargs
)
@derived_from(pd_Rolling)
def aggregate(self, func, args=(), kwargs=None, **kwds):
if kwargs is None:
kwargs = {}
return self._call_method("agg", func, args=args, kwargs=kwargs, **kwds)
agg = aggregate
def __repr__(self):
def order(item):
k, v = item
_order = {
"window": 0,
"min_periods": 1,
"center": 2,
"win_type": 3,
"axis": 4,
}
return _order[k]
rolling_kwargs = self._rolling_kwargs()
rolling_kwargs["window"] = self.window
rolling_kwargs["win_type"] = self._win_type
return "Rolling [{}]".format(
",".join(
f"{k}={v}"
for k, v in sorted(rolling_kwargs.items(), key=order)
if v is not None
)
)
class RollingGroupby(Rolling):
def __init__(
self,
groupby,
window=None,
min_periods=None,
center=False,
win_type=None,
axis=0,
):
self._groupby_kwargs = groupby._groupby_kwargs
self._groupby_slice = groupby._slice
obj = groupby.obj
if self._groupby_slice is not None:
if isinstance(self._groupby_slice, str):
sliced_plus = [self._groupby_slice]
else:
sliced_plus = list(self._groupby_slice)
if isinstance(groupby.by, str):
sliced_plus.append(groupby.by)
else:
sliced_plus.extend(groupby.by)
obj = obj[sliced_plus]
super().__init__(
obj,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
axis=axis,
)
@staticmethod
def pandas_rolling_method(
df,
rolling_kwargs,
name,
*args,
groupby_kwargs=None,
groupby_slice=None,
**kwargs,
):
groupby = df.groupby(**groupby_kwargs)
if groupby_slice:
groupby = groupby[groupby_slice]
rolling = groupby.rolling(**rolling_kwargs)
return getattr(rolling, name)(*args, **kwargs).sort_index(level=-1)
def _call_method(self, method_name, *args, **kwargs):
return super()._call_method(
method_name,
*args,
groupby_kwargs=self._groupby_kwargs,
groupby_slice=self._groupby_slice,
**kwargs,
)
| bsd-3-clause | 1492f6f373b82aa7a6f9210fcf0795a1 | 30.149275 | 93 | 0.56972 | 4.049934 | false | false | false | false |
dask/dask | dask/array/wrap.py | 2 | 6870 | from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from dask.array.backends import array_creation_dispatch
from dask.array.core import Array, normalize_chunks
from dask.array.utils import meta_from_array
from dask.base import tokenize
from dask.blockwise import blockwise as core_blockwise
from dask.layers import ArrayChunkShapeDep
from dask.utils import funcname
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
func = partial(func, dtype=dtype, **kwargs)
out_ind = dep_ind = tuple(range(len(shape)))
graph = core_blockwise(
func,
name,
out_ind,
ArrayChunkShapeDep(chunks),
dep_ind,
numblocks={},
)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get("meta", None))
def wrap_func_like(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
@curry
def wrap(wrap_func, func, func_like=None, **kwargs):
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
@curry
def _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):
# cupy-specific hack. numpy is happy with hardcoded shape=().
null_shape = () if shape == () else 1
return np.broadcast_to(func(meta, shape=null_shape, *args, **kwargs), shape)
def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
inner = _broadcast_trick_inner(func)
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
return inner
ones = array_creation_dispatch.register_inplace(
backend="numpy",
name="ones",
)(w(broadcast_trick(np.ones_like), dtype="f8"))
zeros = array_creation_dispatch.register_inplace(
backend="numpy",
name="zeros",
)(w(broadcast_trick(np.zeros_like), dtype="f8"))
empty = array_creation_dispatch.register_inplace(
backend="numpy",
name="empty",
)(w(broadcast_trick(np.empty_like), dtype="f8"))
w_like = wrap(wrap_func_like)
empty_like = w_like(np.empty, func_like=np.empty_like)
# full and full_like require special casing due to argument check on fill_value
# Generate wrapped functions only once
_full = array_creation_dispatch.register_inplace(
backend="numpy",
name="full",
)(w(broadcast_trick(np.full_like)))
_full_like = w_like(np.full, func_like=np.full_like)
# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472
if _full.__doc__ is not None:
_full.__doc__ = _full.__doc__.replace(
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
)
_full.__doc__ = _full.__doc__.replace(
">>> np.full_like(y, [0, 0, 255])",
">>> np.full_like(y, [0, 0, 255]) # doctest: +NORMALIZE_WHITESPACE",
)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
if kwargs.get("dtype", None) is None:
if hasattr(fill_value, "dtype"):
kwargs["dtype"] = fill_value.dtype
else:
kwargs["dtype"] = type(fill_value)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(
a=a,
fill_value=fill_value,
*args,
**kwargs,
)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__
| bsd-3-clause | 9b52abed3dcdf3753b02d41b0137049f | 27.625 | 87 | 0.619068 | 3.448795 | false | false | false | false |
dask/dask | dask/array/core.py | 2 | 190420 | from __future__ import annotations
import contextlib
import math
import operator
import os
import pickle
import re
import sys
import traceback
import uuid
import warnings
from bisect import bisect
from collections.abc import (
Collection,
Hashable,
Iterable,
Iterator,
Mapping,
MutableMapping,
)
from functools import partial, reduce, wraps
from itertools import product, zip_longest
from numbers import Integral, Number
from operator import add, mul
from threading import Lock
from typing import Any, TypeVar, Union, cast
import numpy as np
from fsspec import get_mapper
from tlz import accumulate, concat, first, frequencies, groupby, partition
from tlz.curried import pluck
from dask import compute, config, core
from dask.array import chunk
from dask.array.chunk import getitem
from dask.array.chunk_types import is_valid_array_chunk, is_valid_chunk_type
# Keep einsum_lookup and tensordot_lookup here for backwards compatibility
from dask.array.dispatch import ( # noqa: F401
concatenate_lookup,
einsum_lookup,
tensordot_lookup,
)
from dask.array.numpy_compat import ArrayLike, _numpy_120, _Recurser
from dask.array.slicing import replace_ellipsis, setitem_array, slice_array
from dask.base import (
DaskMethodsMixin,
compute_as_if_collection,
dont_optimize,
is_dask_collection,
named_schedulers,
persist,
tokenize,
)
from dask.blockwise import blockwise as core_blockwise
from dask.blockwise import broadcast_dimensions
from dask.context import globalmethod
from dask.core import quote
from dask.delayed import Delayed, delayed
from dask.highlevelgraph import HighLevelGraph, MaterializedLayer
from dask.layers import ArraySliceDep, reshapelist
from dask.sizeof import sizeof
from dask.utils import (
IndexCallable,
SerializableLock,
cached_cumsum,
cached_property,
concrete,
derived_from,
format_bytes,
funcname,
has_keyword,
is_arraylike,
is_dataframe_like,
is_index_like,
is_integer,
is_series_like,
maybe_pluralize,
ndeepmap,
ndimlist,
parse_bytes,
typename,
)
from dask.widgets import get_template
T_IntOrNaN = Union[int, float] # Should be Union[int, Literal[np.nan]]
DEFAULT_GET = named_schedulers.get("threads", named_schedulers["sync"])
config.update_defaults({"array": {"chunk-size": "128MiB", "rechunk-threshold": 4}})
unknown_chunk_message = (
"\n\n"
"A possible solution: "
"https://docs.dask.org/en/latest/array-chunks.html#unknown-chunks\n"
"Summary: to compute chunks sizes, use\n\n"
" x.compute_chunk_sizes() # for Dask Array `x`\n"
" ddf.to_dask_array(lengths=True) # for Dask DataFrame `ddf`"
)
class PerformanceWarning(Warning):
"""A warning given when bad chunking may cause poor performance"""
def getter(a, b, asarray=True, lock=None):
if isinstance(b, tuple) and any(x is None for x in b):
b2 = tuple(x for x in b if x is not None)
b3 = tuple(
None if x is None else slice(None, None)
for x in b
if not isinstance(x, Integral)
)
return getter(a, b2, asarray=asarray, lock=lock)[b3]
if lock:
lock.acquire()
try:
c = a[b]
# Below we special-case `np.matrix` to force a conversion to
# `np.ndarray` and preserve original Dask behavior for `getter`,
# as for all purposes `np.matrix` is array-like and thus
# `is_arraylike` evaluates to `True` in that case.
if asarray and (not is_arraylike(c) or isinstance(c, np.matrix)):
c = np.asarray(c)
finally:
if lock:
lock.release()
return c
def getter_nofancy(a, b, asarray=True, lock=None):
"""A simple wrapper around ``getter``.
Used to indicate to the optimization passes that the backend doesn't
support fancy indexing.
"""
return getter(a, b, asarray=asarray, lock=lock)
def getter_inline(a, b, asarray=True, lock=None):
"""A getter function that optimizations feel comfortable inlining
Slicing operations with this function may be inlined into a graph, such as
in the following rewrite
**Before**
>>> a = x[:10] # doctest: +SKIP
>>> b = a + 1 # doctest: +SKIP
>>> c = a * 2 # doctest: +SKIP
**After**
>>> b = x[:10] + 1 # doctest: +SKIP
>>> c = x[:10] * 2 # doctest: +SKIP
This inlining can be relevant to operations when running off of disk.
"""
return getter(a, b, asarray=asarray, lock=lock)
from dask.array.optimization import fuse_slice, optimize
# __array_function__ dict for mapping aliases and mismatching names
_HANDLED_FUNCTIONS = {}
def implements(*numpy_functions):
"""Register an __array_function__ implementation for dask.array.Array
Register that a function implements the API of a NumPy function (or several
NumPy functions in case of aliases) which is handled with
``__array_function__``.
Parameters
----------
\\*numpy_functions : callables
One or more NumPy functions that are handled by ``__array_function__``
and will be mapped by `implements` to a `dask.array` function.
"""
def decorator(dask_func):
for numpy_function in numpy_functions:
_HANDLED_FUNCTIONS[numpy_function] = dask_func
return dask_func
return decorator
def _should_delegate(self, other) -> bool:
"""Check whether Dask should delegate to the other.
This implementation follows NEP-13:
https://numpy.org/neps/nep-0013-ufunc-overrides.html#behavior-in-combination-with-python-s-binary-operations
"""
if hasattr(other, "__array_ufunc__") and other.__array_ufunc__ is None:
return True
elif (
hasattr(other, "__array_ufunc__")
and not is_valid_array_chunk(other)
# don't delegate to our own parent classes
and not isinstance(self, type(other))
and type(self) is not type(other)
):
return True
return False
def check_if_handled_given_other(f):
"""Check if method is handled by Dask given type of other
Ensures proper deferral to upcast types in dunder operations without
assuming unknown types are automatically downcast types.
"""
@wraps(f)
def wrapper(self, other):
if _should_delegate(self, other):
return NotImplemented
else:
return f(self, other)
return wrapper
def slices_from_chunks(chunks):
"""Translate chunks tuple to a set of slices in product order
>>> slices_from_chunks(((2, 2), (3, 3, 3))) # doctest: +NORMALIZE_WHITESPACE
[(slice(0, 2, None), slice(0, 3, None)),
(slice(0, 2, None), slice(3, 6, None)),
(slice(0, 2, None), slice(6, 9, None)),
(slice(2, 4, None), slice(0, 3, None)),
(slice(2, 4, None), slice(3, 6, None)),
(slice(2, 4, None), slice(6, 9, None))]
"""
cumdims = [cached_cumsum(bds, initial_zero=True) for bds in chunks]
slices = [
[slice(s, s + dim) for s, dim in zip(starts, shapes)]
for starts, shapes in zip(cumdims, chunks)
]
return list(product(*slices))
def graph_from_arraylike(
arr, # Any array-like which supports slicing
chunks,
shape,
name,
getitem=getter,
lock=False,
asarray=True,
dtype=None,
inline_array=False,
) -> HighLevelGraph:
"""
HighLevelGraph for slicing chunks from an array-like according to a chunk pattern.
If ``inline_array`` is True, this make a Blockwise layer of slicing tasks where the
array-like is embedded into every task.,
If ``inline_array`` is False, this inserts the array-like as a standalone value in
a MaterializedLayer, then generates a Blockwise layer of slicing tasks that refer
to it.
>>> dict(graph_from_arraylike(arr, chunks=(2, 3), shape=(4, 6), name="X", inline_array=True)) # doctest: +SKIP
{(arr, 0, 0): (getter, arr, (slice(0, 2), slice(0, 3))),
(arr, 1, 0): (getter, arr, (slice(2, 4), slice(0, 3))),
(arr, 1, 1): (getter, arr, (slice(2, 4), slice(3, 6))),
(arr, 0, 1): (getter, arr, (slice(0, 2), slice(3, 6)))}
>>> dict( # doctest: +SKIP
graph_from_arraylike(arr, chunks=((2, 2), (3, 3)), shape=(4,6), name="X", inline_array=False)
)
{"original-X": arr,
('X', 0, 0): (getter, 'original-X', (slice(0, 2), slice(0, 3))),
('X', 1, 0): (getter, 'original-X', (slice(2, 4), slice(0, 3))),
('X', 1, 1): (getter, 'original-X', (slice(2, 4), slice(3, 6))),
('X', 0, 1): (getter, 'original-X', (slice(0, 2), slice(3, 6)))}
"""
chunks = normalize_chunks(chunks, shape, dtype=dtype)
out_ind = tuple(range(len(shape)))
if (
has_keyword(getitem, "asarray")
and has_keyword(getitem, "lock")
and (not asarray or lock)
):
kwargs = {"asarray": asarray, "lock": lock}
else:
# Common case, drop extra parameters
kwargs = {}
if inline_array:
layer = core_blockwise(
getitem,
name,
out_ind,
arr,
None,
ArraySliceDep(chunks),
out_ind,
numblocks={},
**kwargs,
)
return HighLevelGraph.from_collections(name, layer)
else:
original_name = "original-" + name
layers = {}
layers[original_name] = MaterializedLayer({original_name: arr})
layers[name] = core_blockwise(
getitem,
name,
out_ind,
original_name,
None,
ArraySliceDep(chunks),
out_ind,
numblocks={},
**kwargs,
)
deps = {
original_name: set(),
name: {original_name},
}
return HighLevelGraph(layers, deps)
def dotmany(A, B, leftfunc=None, rightfunc=None, **kwargs):
"""Dot product of many aligned chunks
>>> x = np.array([[1, 2], [1, 2]])
>>> y = np.array([[10, 20], [10, 20]])
>>> dotmany([x, x, x], [y, y, y])
array([[ 90, 180],
[ 90, 180]])
Optionally pass in functions to apply to the left and right chunks
>>> dotmany([x, x, x], [y, y, y], rightfunc=np.transpose)
array([[150, 150],
[150, 150]])
"""
if leftfunc:
A = map(leftfunc, A)
if rightfunc:
B = map(rightfunc, B)
return sum(map(partial(np.dot, **kwargs), A, B))
def _concatenate2(arrays, axes=None):
"""Recursively concatenate nested lists of arrays along axes
Each entry in axes corresponds to each level of the nested list. The
length of axes should correspond to the level of nesting of arrays.
If axes is an empty list or tuple, return arrays, or arrays[0] if
arrays is a list.
>>> x = np.array([[1, 2], [3, 4]])
>>> _concatenate2([x, x], axes=[0])
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
>>> _concatenate2([x, x], axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> _concatenate2([[x, x], [x, x]], axes=[0, 1])
array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
Supports Iterators
>>> _concatenate2(iter([x, x]), axes=[1])
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
Special Case
>>> _concatenate2([x, x], axes=())
array([[1, 2],
[3, 4]])
"""
if axes is None:
axes = []
if axes == ():
if isinstance(arrays, list):
return arrays[0]
else:
return arrays
if isinstance(arrays, Iterator):
arrays = list(arrays)
if not isinstance(arrays, (list, tuple)):
return arrays
if len(axes) > 1:
arrays = [_concatenate2(a, axes=axes[1:]) for a in arrays]
concatenate = concatenate_lookup.dispatch(
type(max(arrays, key=lambda x: getattr(x, "__array_priority__", 0)))
)
if isinstance(arrays[0], dict):
# Handle concatenation of `dict`s, used as a replacement for structured
# arrays when that's not supported by the array library (e.g., CuPy).
keys = list(arrays[0].keys())
assert all(list(a.keys()) == keys for a in arrays)
ret = dict()
for k in keys:
ret[k] = concatenate(list(a[k] for a in arrays), axis=axes[0])
return ret
else:
return concatenate(arrays, axis=axes[0])
def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype="dtype", nout=None):
"""
Tries to infer output dtype of ``func`` for a small set of input arguments.
Parameters
----------
func: Callable
Function for which output dtype is to be determined
args: List of array like
Arguments to the function, which would usually be used. Only attributes
``ndim`` and ``dtype`` are used.
kwargs: dict
Additional ``kwargs`` to the ``func``
funcname: String
Name of calling function to improve potential error messages
suggest_dtype: None/False or String
If not ``None`` adds suggestion to potential error message to specify a dtype
via the specified kwarg. Defaults to ``'dtype'``.
nout: None or Int
``None`` if function returns single output, integer if many.
Deafults to ``None``.
Returns
-------
: dtype or List of dtype
One or many dtypes (depending on ``nout``)
"""
from dask.array.utils import meta_from_array
# make sure that every arg is an evaluated array
args = [
np.ones_like(meta_from_array(x), shape=((1,) * x.ndim), dtype=x.dtype)
if is_arraylike(x)
else x
for x in args
]
try:
with np.errstate(all="ignore"):
o = func(*args, **kwargs)
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = "".join(traceback.format_tb(exc_traceback))
suggest = (
(
"Please specify the dtype explicitly using the "
"`{dtype}` kwarg.\n\n".format(dtype=suggest_dtype)
)
if suggest_dtype
else ""
)
msg = (
f"`dtype` inference failed in `{funcname}`.\n\n"
f"{suggest}"
"Original error is below:\n"
"------------------------\n"
f"{e!r}\n\n"
"Traceback:\n"
"---------\n"
f"{tb}"
)
else:
msg = None
if msg is not None:
raise ValueError(msg)
return getattr(o, "dtype", type(o)) if nout is None else tuple(e.dtype for e in o)
def normalize_arg(x):
"""Normalize user provided arguments to blockwise or map_blocks
We do a few things:
1. If they are string literals that might collide with blockwise_token then we
quote them
2. IF they are large (as defined by sizeof) then we put them into the
graph on their own by using dask.delayed
"""
if is_dask_collection(x):
return x
elif isinstance(x, str) and re.match(r"_\d+", x):
return delayed(x)
elif isinstance(x, list) and len(x) >= 10:
return delayed(x)
elif sizeof(x) > 1e6:
return delayed(x)
else:
return x
def _pass_extra_kwargs(func, keys, *args, **kwargs):
"""Helper for :func:`dask.array.map_blocks` to pass `block_info` or `block_id`.
For each element of `keys`, a corresponding element of args is changed
to a keyword argument with that key, before all arguments re passed on
to `func`.
"""
kwargs.update(zip(keys, args))
return func(*args[len(keys) :], **kwargs)
def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=None,
new_axis=None,
enforce_ndim=False,
meta=None,
**kwargs,
):
"""Map a function across all blocks of a dask array.
Note that ``map_blocks`` will attempt to automatically determine the output
array type by calling ``func`` on 0-d versions of the inputs. Please refer to
the ``meta`` keyword argument below if you expect that the function will not
succeed when operating on 0-d arrays.
Parameters
----------
func : callable
Function to apply to every block in the array.
If ``func`` accepts ``block_info=`` or ``block_id=``
as keyword arguments, these will be passed dictionaries
containing information about input and output chunks/arrays
during computation. See examples for details.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
enforce_ndim : bool, default False
Whether to enforce at runtime that the dimensionality of the array
produced by ``func`` actually matches that of the array returned by
``map_blocks``.
If True, this will raise an error when there is a mismatch.
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
meta : array-like, optional
The ``meta`` of the output array, when specified is expected to be an
array of the same type and dtype of that returned when calling ``.compute()``
on the array returned by this function. When not provided, ``meta`` will be
inferred by applying the function to a small set of fake data, usually a
0-d array. It's important to ensure that ``func`` can successfully complete
computation without raising exceptions when 0-d is passed to it, providing
``meta`` will be required otherwise. If the output type is known beforehand
(e.g., ``np.ndarray``, ``cupy.ndarray``), an empty array of such type dtype
can be passed, for example: ``meta=np.array((), dtype=np.int32)``.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.map_overlap : Generalized operation with overlap between neighbors.
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = da.map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Note that ``map_blocks()`` will concatenate chunks along axes specified by
the keyword parameter ``drop_axis`` prior to applying the function.
This is illustrated in the figure below:
.. image:: /images/map_blocks_drop_axis.png
Due to memory-size-constraints, it is often not advisable to use ``drop_axis``
on an axis that is chunked. In that case, it is better not to use
``map_blocks`` but rather
``dask.array.reduction(..., axis=dropped_axes, concatenate=False)`` which
maintains a leaner memory footprint while it drops any axis.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function can get information about where it is in the array by
accepting a special ``block_info`` or ``block_id`` keyword argument.
During computation, they will contain information about each of the input
and output chunks (and dask arrays) relevant to each call of ``func``.
>>> def func(block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
The keys to the ``block_info`` dictionary indicate which is the input and
output Dask array:
- **Input Dask array(s):** ``block_info[0]`` refers to the first input Dask array.
The dictionary key is ``0`` because that is the argument index corresponding
to the first input Dask array.
In cases where multiple Dask arrays have been passed as input to the function,
you can access them with the number corresponding to the input argument,
eg: ``block_info[1]``, ``block_info[2]``, etc.
(Note that if you pass multiple Dask arrays as input to map_blocks,
the arrays must match each other by having matching numbers of chunks,
along corresponding dimensions up to broadcasting rules.)
- **Output Dask array:** ``block_info[None]`` refers to the output Dask array,
and contains information about the output chunks.
The output chunk shape and dtype may may be different than the input chunks.
For each dask array, ``block_info`` describes:
- ``shape``: the shape of the full Dask array,
- ``num-chunks``: the number of chunks of the full array in each dimension,
- ``chunk-location``: the chunk location (for example the fourth chunk over
in the first dimension), and
- ``array-location``: the array location within the full Dask array
(for example the slice corresponding to ``40:50``).
In addition to these, there are two extra parameters described by
``block_info`` for the output array (in ``block_info[None]``):
- ``chunk-shape``: the output chunk shape, and
- ``dtype``: the output dtype.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
``block_id`` is similar to ``block_info`` but contains only the ``chunk_location``:
>>> def func(block_id=None):
... pass
This will receive the following information:
>>> block_id # doctest: +SKIP
(4, 3)
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment')
dask.array<increment, shape=(1000,), dtype=int64, chunksize=(100,), chunktype=numpy.ndarray>
For functions that may not handle 0-d arrays, it's also possible to specify
``meta`` with an empty array matching the type of the expected result. In
the example below, ``func`` will result in an ``IndexError`` when computing
``meta``:
>>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
Similarly, it's possible to specify a non-NumPy array to ``meta``, and provide
a ``dtype``:
>>> import cupy # doctest: +SKIP
>>> rs = da.random.RandomState(RandomState=cupy.random.RandomState) # doctest: +SKIP
>>> dt = np.float32
>>> da.map_blocks(lambda x: x[2], rs.random(5, dtype=dt), meta=cupy.array((), dtype=dt)) # doctest: +SKIP
dask.array<lambda, shape=(5,), dtype=float32, chunksize=(5,), chunktype=cupy.ndarray>
"""
if drop_axis is None:
drop_axis = []
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
if token:
warnings.warn(
"The `token=` keyword to `map_blocks` has been moved to `name=`. "
"Please use `name=` instead as the `token=` keyword will be removed "
"in a future release.",
category=FutureWarning,
)
name = token
name = f"{name or funcname(func)}-{tokenize(func, dtype, chunks, drop_axis, new_axis, *args, **kwargs)}"
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = [
(a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)
for a in args
]
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
try:
meta = compute_meta(func, dtype, *args, **kwargs)
except Exception:
pass
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
ndim_out = len(out_ind)
if any(i < -ndim_out or i >= ndim_out for i in drop_axis):
raise ValueError(
f"drop_axis out of range (drop_axis={drop_axis}, "
f"but output is {ndim_out}d)."
)
drop_axis = [i % ndim_out for i in drop_axis]
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
f"Provided chunks have {len(chunks)} dims; expected {len(out_ind)} dims"
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
if enforce_ndim:
out = blockwise(
apply_and_enforce,
out_ind,
*concat(argpairs),
expected_ndim=len(out_ind),
_func=func,
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
else:
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an array of block IDs and
# prepare to inject it.
if has_keyword(func, "block_id"):
block_id_name = "block-id-" + out.name
block_id_dsk = {
(block_id_name,) + block_id: block_id
for block_id in product(*(range(len(c)) for c in out.chunks))
}
block_id_array = Array(
block_id_dsk,
block_id_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_id_array, out_ind))
extra_names.append("block_id")
# If func has block_info as an argument, construct an array of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_name = "block-info-" + out.name
block_info_dsk = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dsk[(block_info_name,) + block_id] = info
block_info = Array(
block_info_dsk,
block_info_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_info, out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
def apply_and_enforce(*args, **kwargs):
"""Apply a function, and enforce the output.ndim to match expected_ndim
Ensures the output has the expected dimensionality."""
func = kwargs.pop("_func")
expected_ndim = kwargs.pop("expected_ndim")
out = func(*args, **kwargs)
if getattr(out, "ndim", 0) != expected_ndim:
out_ndim = getattr(out, "ndim", 0)
raise ValueError(
f"Dimension mismatch: expected output of {func} "
f"to have dims = {expected_ndim}. Got {out_ndim} instead."
)
return out
def broadcast_chunks(*chunkss):
"""Construct a chunks tuple that broadcasts many chunks tuples
>>> a = ((5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((5, 5),)
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((1,), (5, 5),)
>>> broadcast_chunks(a, b)
((10, 10, 10), (5, 5))
>>> a = ((10, 10, 10), (5, 5),)
>>> b = ((3, 3,), (5, 5),)
>>> broadcast_chunks(a, b)
Traceback (most recent call last):
...
ValueError: Chunks do not align: [(10, 10, 10), (3, 3)]
"""
if not chunkss:
return ()
elif len(chunkss) == 1:
return chunkss[0]
n = max(map(len, chunkss))
chunkss2 = [((1,),) * (n - len(c)) + c for c in chunkss]
result = []
for i in range(n):
step1 = [c[i] for c in chunkss2]
if all(c == (1,) for c in step1):
step2 = step1
else:
step2 = [c for c in step1 if c != (1,)]
if len(set(step2)) != 1:
raise ValueError("Chunks do not align: %s" % str(step2))
result.append(step2[0])
return tuple(result)
def store(
sources: Array | Collection[Array],
targets: ArrayLike | Delayed | Collection[ArrayLike | Delayed],
lock: bool | Lock = True,
regions: tuple[slice, ...] | Collection[tuple[slice, ...]] | None = None,
compute: bool = True,
return_stored: bool = False,
**kwargs,
):
"""Store dask arrays in array-like objects, overwrite data in target
This stores dask arrays into object that supports numpy-style setitem
indexing. It stores values chunk by chunk so that it does not have to
fill up memory. For best performance you can align the block size of
the storage target with the block size of your array.
If your data fits in memory then you may prefer calling
``np.array(myarray)`` instead.
Parameters
----------
sources: Array or collection of Arrays
targets: array-like or Delayed or collection of array-likes and/or Delayeds
These should support setitem syntax ``target[10:20] = ...``.
If sources is a single item, targets must be a single item; if sources is a
collection of arrays, targets must be a matching collection.
lock: boolean or threading.Lock, optional
Whether or not to lock the data stores while storing.
Pass True (lock each file individually), False (don't lock) or a
particular :class:`threading.Lock` object to be shared among all writes.
regions: tuple of slices or collection of tuples of slices, optional
Each ``region`` tuple in ``regions`` should be such that
``target[region].shape = source.shape``
for the corresponding source and target in sources and targets,
respectively. If this is a tuple, the contents will be assumed to be
slices, so do not provide a tuple of tuples.
compute: boolean, optional
If true compute immediately; return :class:`dask.delayed.Delayed` otherwise.
return_stored: boolean, optional
Optionally return the stored result (default False).
kwargs:
Parameters passed to compute/persist (only used if compute=True)
Returns
-------
If return_stored=True
tuple of Arrays
If return_stored=False and compute=True
None
If return_stored=False and compute=False
Delayed
Examples
--------
>>> import h5py # doctest: +SKIP
>>> f = h5py.File('myfile.hdf5', mode='a') # doctest: +SKIP
>>> dset = f.create_dataset('/data', shape=x.shape,
... chunks=x.chunks,
... dtype='f8') # doctest: +SKIP
>>> store(x, dset) # doctest: +SKIP
Alternatively store many arrays at the same time
>>> store([x, y, z], [dset1, dset2, dset3]) # doctest: +SKIP
"""
if isinstance(sources, Array):
sources = [sources]
# There's no way to test that targets is a single array-like.
# We need to trust the user.
targets = [targets] # type: ignore
targets = cast("Collection[ArrayLike | Delayed]", targets)
if any(not isinstance(s, Array) for s in sources):
raise ValueError("All sources must be dask array objects")
if len(sources) != len(targets):
raise ValueError(
"Different number of sources [%d] and targets [%d]"
% (len(sources), len(targets))
)
if isinstance(regions, tuple) or regions is None:
regions_list = [regions] * len(sources)
else:
regions_list = list(regions)
if len(sources) != len(regions_list):
raise ValueError(
f"Different number of sources [{len(sources)}] and "
f"targets [{len(targets)}] than regions [{len(regions_list)}]"
)
del regions
# Optimize all sources together
sources_hlg = HighLevelGraph.merge(*[e.__dask_graph__() for e in sources])
sources_layer = Array.__dask_optimize__(
sources_hlg, list(core.flatten([e.__dask_keys__() for e in sources]))
)
sources_name = "store-sources-" + tokenize(sources)
layers = {sources_name: sources_layer}
dependencies: dict[str, set[str]] = {sources_name: set()}
# Optimize all targets together
targets_keys = []
targets_dsks = []
for t in targets:
if isinstance(t, Delayed):
targets_keys.append(t.key)
targets_dsks.append(t.__dask_graph__())
elif is_dask_collection(t):
raise TypeError("Targets must be either Delayed objects or array-likes")
if targets_dsks:
targets_hlg = HighLevelGraph.merge(*targets_dsks)
targets_layer = Delayed.__dask_optimize__(targets_hlg, targets_keys)
targets_name = "store-targets-" + tokenize(targets_keys)
layers[targets_name] = targets_layer
dependencies[targets_name] = set()
load_stored = return_stored and not compute
map_names = [
"store-map-" + tokenize(s, t if isinstance(t, Delayed) else id(t), r)
for s, t, r in zip(sources, targets, regions_list)
]
map_keys: list[tuple] = []
for s, t, n, r in zip(sources, targets, map_names, regions_list):
map_layer = insert_to_ooc(
keys=s.__dask_keys__(),
chunks=s.chunks,
out=t.key if isinstance(t, Delayed) else t,
name=n,
lock=lock,
region=r,
return_stored=return_stored,
load_stored=load_stored,
)
layers[n] = map_layer
if isinstance(t, Delayed):
dependencies[n] = {sources_name, targets_name}
else:
dependencies[n] = {sources_name}
map_keys += map_layer.keys()
if return_stored:
store_dsk = HighLevelGraph(layers, dependencies)
load_store_dsk: HighLevelGraph | dict[tuple, Any] = store_dsk
if compute:
store_dlyds = [Delayed(k, store_dsk, layer=k[0]) for k in map_keys]
store_dlyds = persist(*store_dlyds, **kwargs)
store_dsk_2 = HighLevelGraph.merge(*[e.dask for e in store_dlyds])
load_store_dsk = retrieve_from_ooc(map_keys, store_dsk, store_dsk_2)
map_names = ["load-" + n for n in map_names]
return tuple(
Array(load_store_dsk, n, s.chunks, meta=s)
for s, n in zip(sources, map_names)
)
elif compute:
store_dsk = HighLevelGraph(layers, dependencies)
compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
return None
else:
key = "store-" + tokenize(map_names)
layers[key] = {key: map_keys}
dependencies[key] = set(map_names)
store_dsk = HighLevelGraph(layers, dependencies)
return Delayed(key, store_dsk)
def blockdims_from_blockshape(shape, chunks):
"""
>>> blockdims_from_blockshape((10, 10), (4, 3))
((4, 4, 2), (3, 3, 3, 1))
>>> blockdims_from_blockshape((10, 0), (4, 0))
((4, 4, 2), (0,))
"""
if chunks is None:
raise TypeError("Must supply chunks= keyword argument")
if shape is None:
raise TypeError("Must supply shape= keyword argument")
if np.isnan(sum(shape)) or np.isnan(sum(chunks)):
raise ValueError(
"Array chunk sizes are unknown. shape: %s, chunks: %s%s"
% (shape, chunks, unknown_chunk_message)
)
if not all(map(is_integer, chunks)):
raise ValueError("chunks can only contain integers.")
if not all(map(is_integer, shape)):
raise ValueError("shape can only contain integers.")
shape = tuple(map(int, shape))
chunks = tuple(map(int, chunks))
return tuple(
((bd,) * (d // bd) + ((d % bd,) if d % bd else ()) if d else (0,))
for d, bd in zip(shape, chunks)
)
def finalize(results):
if not results:
return concatenate3(results)
results2 = results
while isinstance(results2, (tuple, list)):
if len(results2) > 1:
return concatenate3(results)
else:
results2 = results2[0]
return unpack_singleton(results)
CHUNKS_NONE_ERROR_MESSAGE = """
You must specify a chunks= keyword argument.
This specifies the chunksize of your array blocks.
See the following documentation page for details:
https://docs.dask.org/en/latest/array-creation.html#chunks
""".strip()
class Array(DaskMethodsMixin):
"""Parallel Dask Array
A parallel nd-array comprised of many numpy arrays arranged in a grid.
This constructor is for advanced uses only. For normal use see the
:func:`dask.array.from_array` function.
Parameters
----------
dask : dict
Task dependency graph
name : string
Name of array in dask
shape : tuple of ints
Shape of the entire array
chunks: iterable of tuples
block sizes along each dimension
dtype : str or dtype
Typecode or data-type for the new Dask Array
meta : empty ndarray
empty ndarray created with same NumPy backend, ndim and dtype as the
Dask Array being created (overrides dtype)
See Also
--------
dask.array.from_array
"""
__slots__ = "dask", "__name", "_cached_keys", "__chunks", "_meta", "__dict__"
def __new__(cls, dask, name, chunks, dtype=None, meta=None, shape=None):
self = super().__new__(cls)
assert isinstance(dask, Mapping)
if not isinstance(dask, HighLevelGraph):
dask = HighLevelGraph.from_collections(name, dask, dependencies=())
self.dask = dask
self._name = str(name)
meta = meta_from_array(meta, dtype=dtype)
if (
isinstance(chunks, str)
or isinstance(chunks, tuple)
and chunks
and any(isinstance(c, str) for c in chunks)
):
dt = meta.dtype
else:
dt = None
self._chunks = normalize_chunks(chunks, shape, dtype=dt)
if self.chunks is None:
raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)
self._meta = meta_from_array(meta, ndim=self.ndim, dtype=dtype)
for plugin in config.get("array_plugins", ()):
result = plugin(self)
if result is not None:
self = result
try:
layer = self.dask.layers[name]
except (AttributeError, KeyError):
# self is no longer an Array after applying the plugins, OR
# a plugin replaced the HighLevelGraph with a plain dict, OR
# name is not the top layer's name (this can happen after the layer is
# manipulated, to avoid a collision)
pass
else:
if layer.collection_annotations is None:
layer.collection_annotations = {
"shape": self.shape,
"dtype": self.dtype,
"chunksize": self.chunksize,
"chunks": self.chunks,
"type": typename(type(self)),
"chunk_type": typename(type(self._meta)),
}
else:
layer.collection_annotations.update(
{
"shape": self.shape,
"dtype": self.dtype,
"chunksize": self.chunksize,
"chunks": self.chunks,
"type": typename(type(self)),
"chunk_type": typename(type(self._meta)),
}
)
return self
def __reduce__(self):
return (Array, (self.dask, self.name, self.chunks, self.dtype, self._meta))
def __dask_graph__(self):
return self.dask
def __dask_layers__(self):
return (self.name,)
def __dask_keys__(self):
if self._cached_keys is not None:
return self._cached_keys
name, chunks, numblocks = self.name, self.chunks, self.numblocks
def keys(*args):
if not chunks:
return [(name,)]
ind = len(args)
if ind + 1 == len(numblocks):
result = [(name,) + args + (i,) for i in range(numblocks[ind])]
else:
result = [keys(*(args + (i,))) for i in range(numblocks[ind])]
return result
self._cached_keys = result = keys()
return result
def __dask_tokenize__(self):
return self.name
__dask_optimize__ = globalmethod(
optimize, key="array_optimize", falsey=dont_optimize
)
__dask_scheduler__ = staticmethod(DEFAULT_GET)
def __dask_postcompute__(self):
return finalize, ()
def __dask_postpersist__(self):
return self._rebuild, ()
def _rebuild(self, dsk, *, rename=None):
name = self._name
if rename:
name = rename.get(name, name)
return Array(dsk, name, self.chunks, self.dtype, self._meta)
def _reset_cache(self, key=None):
"""
Reset cached properties.
Parameters
----------
key : str, optional
Remove specified key. The default removes all items.
"""
if key is None:
self.__dict__.clear()
else:
self.__dict__.pop(key, None)
@cached_property
def _key_array(self):
return np.array(self.__dask_keys__(), dtype=object)
@cached_property
def numblocks(self):
return tuple(map(len, self.chunks))
@cached_property
def npartitions(self):
return reduce(mul, self.numblocks, 1)
def compute_chunk_sizes(self):
"""
Compute the chunk sizes for a Dask array. This is especially useful
when the chunk sizes are unknown (e.g., when indexing one Dask array
with another).
Notes
-----
This function modifies the Dask array in-place.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = da.from_array([-2, -1, 0, 1, 2], chunks=2)
>>> x.chunks
((2, 2, 1),)
>>> y = x[x <= 0]
>>> y.chunks
((nan, nan, nan),)
>>> y.compute_chunk_sizes() # in-place computation
dask.array<getitem, shape=(3,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> y.chunks
((2, 1, 0),)
"""
x = self
chunk_shapes = x.map_blocks(
_get_chunk_shape,
dtype=int,
chunks=tuple(len(c) * (1,) for c in x.chunks) + ((x.ndim,),),
new_axis=x.ndim,
)
c = []
for i in range(x.ndim):
s = x.ndim * [0] + [i]
s[i] = slice(None)
s = tuple(s)
c.append(tuple(chunk_shapes[s]))
# `map_blocks` assigns numpy dtypes
# cast chunk dimensions back to python int before returning
x._chunks = tuple(
tuple(int(chunk) for chunk in chunks) for chunks in compute(tuple(c))[0]
)
return x
@cached_property
def shape(self) -> tuple[T_IntOrNaN, ...]:
return tuple(cached_cumsum(c, initial_zero=True)[-1] for c in self.chunks)
@property
def chunksize(self) -> tuple[T_IntOrNaN, ...]:
return tuple(max(c) for c in self.chunks)
@property
def dtype(self):
if isinstance(self._meta, tuple):
dtype = self._meta[0].dtype
else:
dtype = self._meta.dtype
return dtype
@property
def _chunks(self):
"""Non-public chunks property. Allows setting a chunk value."""
return self.__chunks
@_chunks.setter
def _chunks(self, chunks):
self.__chunks = chunks
# When the chunks changes the cached properties that was
# dependent on it needs to be deleted:
for key in ["numblocks", "npartitions", "shape", "ndim", "size", "_key_array"]:
self._reset_cache(key)
@property
def chunks(self):
"""Chunks property."""
return self.__chunks
@chunks.setter
def chunks(self, chunks):
raise TypeError(
"Can not set chunks directly\n\n"
"Please use the rechunk method instead:\n"
f" x.rechunk({chunks})\n\n"
"If trying to avoid unknown chunks, use\n"
" x.compute_chunk_sizes()"
)
def __len__(self):
if not self.chunks:
raise TypeError("len() of unsized object")
if np.isnan(self.chunks[0]).any():
msg = (
"Cannot call len() on object with unknown chunk size."
f"{unknown_chunk_message}"
)
raise ValueError(msg)
return int(sum(self.chunks[0]))
def __array_ufunc__(self, numpy_ufunc, method, *inputs, **kwargs):
out = kwargs.get("out", ())
for x in inputs + out:
if _should_delegate(self, x):
return NotImplemented
if method == "__call__":
if numpy_ufunc is np.matmul:
from dask.array.routines import matmul
# special case until apply_gufunc handles optional dimensions
return matmul(*inputs, **kwargs)
if numpy_ufunc.signature is not None:
from dask.array.gufunc import apply_gufunc
return apply_gufunc(
numpy_ufunc, numpy_ufunc.signature, *inputs, **kwargs
)
if numpy_ufunc.nout > 1:
from dask.array import ufunc
try:
da_ufunc = getattr(ufunc, numpy_ufunc.__name__)
except AttributeError:
return NotImplemented
return da_ufunc(*inputs, **kwargs)
else:
return elemwise(numpy_ufunc, *inputs, **kwargs)
elif method == "outer":
from dask.array import ufunc
try:
da_ufunc = getattr(ufunc, numpy_ufunc.__name__)
except AttributeError:
return NotImplemented
return da_ufunc.outer(*inputs, **kwargs)
else:
return NotImplemented
def __repr__(self):
"""
>>> import dask.array as da
>>> da.ones((10, 10), chunks=(5, 5), dtype='i4')
dask.array<..., shape=(10, 10), dtype=int32, chunksize=(5, 5), chunktype=numpy.ndarray>
"""
chunksize = str(self.chunksize)
name = self.name.rsplit("-", 1)[0]
return (
"dask.array<{}, shape={}, dtype={}, chunksize={}, chunktype={}.{}>".format(
name,
self.shape,
self.dtype,
chunksize,
type(self._meta).__module__.split(".")[0],
type(self._meta).__name__,
)
)
def _repr_html_(self):
try:
grid = self.to_svg(size=config.get("array.svg.size", 120))
except NotImplementedError:
grid = ""
if "sparse" in typename(type(self._meta)):
nbytes = None
cbytes = None
elif not math.isnan(self.nbytes):
nbytes = format_bytes(self.nbytes)
cbytes = format_bytes(math.prod(self.chunksize) * self.dtype.itemsize)
else:
nbytes = "unknown"
cbytes = "unknown"
return get_template("array.html.j2").render(
array=self,
grid=grid,
nbytes=nbytes,
cbytes=cbytes,
layers=maybe_pluralize(len(self.dask.layers), "graph layer"),
)
@cached_property
def ndim(self) -> int:
return len(self.shape)
@cached_property
def size(self) -> T_IntOrNaN:
"""Number of elements in array"""
return reduce(mul, self.shape, 1)
@property
def nbytes(self) -> T_IntOrNaN:
"""Number of bytes in array"""
return self.size * self.dtype.itemsize
@property
def itemsize(self) -> int:
"""Length of one array element in bytes"""
return self.dtype.itemsize
@property
def _name(self):
return self.__name
@_name.setter
def _name(self, val):
self.__name = val
# Clear the key cache when the name is reset
self._cached_keys = None
self._reset_cache("_key_array")
@property
def name(self):
return self.__name
@name.setter
def name(self, val):
raise TypeError(
"Cannot set name directly\n\n"
"Name is used to relate the array to the task graph.\n"
"It is uncommon to need to change it, but if you do\n"
"please set ``._name``"
)
def __iter__(self):
for i in range(len(self)):
yield self[i]
__array_priority__ = 11 # higher than numpy.ndarray and numpy.matrix
def __array__(self, dtype=None, **kwargs):
x = self.compute()
if dtype and x.dtype != dtype:
x = x.astype(dtype)
if not isinstance(x, np.ndarray):
x = np.array(x)
return x
def __array_function__(self, func, types, args, kwargs):
import dask.array as module
def handle_nonmatching_names(func, args, kwargs):
if func not in _HANDLED_FUNCTIONS:
warnings.warn(
"The `{}` function is not implemented by Dask array. "
"You may want to use the da.map_blocks function "
"or something similar to silence this warning. "
"Your code may stop working in a future release.".format(
func.__module__ + "." + func.__name__
),
FutureWarning,
)
# Need to convert to array object (e.g. numpy.ndarray or
# cupy.ndarray) as needed, so we can call the NumPy function
# again and it gets the chance to dispatch to the right
# implementation.
args, kwargs = compute(args, kwargs)
return func(*args, **kwargs)
return _HANDLED_FUNCTIONS[func](*args, **kwargs)
# First, verify that all types are handled by Dask. Otherwise, return NotImplemented.
if not all(
# Accept our own superclasses as recommended by NEP-13
# (https://numpy.org/neps/nep-0013-ufunc-overrides.html#subclass-hierarchies)
issubclass(type(self), type_) or is_valid_chunk_type(type_)
for type_ in types
):
return NotImplemented
# Now try to find a matching function name. If that doesn't work, we may
# be dealing with an alias or a function that's simply not in the Dask API.
# Handle aliases via the _HANDLED_FUNCTIONS dict mapping, and warn otherwise.
for submodule in func.__module__.split(".")[1:]:
try:
module = getattr(module, submodule)
except AttributeError:
return handle_nonmatching_names(func, args, kwargs)
if not hasattr(module, func.__name__):
return handle_nonmatching_names(func, args, kwargs)
da_func = getattr(module, func.__name__)
if da_func is func:
return handle_nonmatching_names(func, args, kwargs)
# If ``like`` is contained in ``da_func``'s signature, add ``like=self``
# to the kwargs dictionary.
if has_keyword(da_func, "like"):
kwargs["like"] = self
return da_func(*args, **kwargs)
@property
def _elemwise(self):
return elemwise
@wraps(store)
def store(self, target, **kwargs):
r = store([self], [target], **kwargs)
if kwargs.get("return_stored", False):
r = r[0]
return r
def to_svg(self, size=500):
"""Convert chunks from Dask Array into an SVG Image
Parameters
----------
chunks: tuple
size: int
Rough size of the image
Examples
--------
>>> x.to_svg(size=500) # doctest: +SKIP
Returns
-------
text: An svg string depicting the array as a grid of chunks
"""
from dask.array.svg import svg
return svg(self.chunks, size=size)
def to_hdf5(self, filename, datapath, **kwargs):
"""Store array in HDF5 file
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> x.to_hdf5('myfile.hdf5', '/x', compression='lzf', shuffle=True) # doctest: +SKIP
See Also
--------
dask.array.store
h5py.File.create_dataset
"""
return to_hdf5(filename, datapath, self, **kwargs)
def to_dask_dataframe(self, columns=None, index=None, meta=None):
"""Convert dask Array to dask Dataframe
Parameters
----------
columns: list or string
list of column names if DataFrame, single string if Series
index : dask.dataframe.Index, optional
An optional *dask* Index to use for the output Series or DataFrame.
The default output index depends on whether the array has any unknown
chunks. If there are any unknown chunks, the output has ``None``
for all the divisions (one per chunk). If all the chunks are known,
a default index with known divsions is created.
Specifying ``index`` can be useful if you're conforming a Dask Array
to an existing dask Series or DataFrame, and you would like the
indices to match.
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to use for partitions of
the Dask dataframe. By default, pandas DataFrame is used.
See Also
--------
dask.dataframe.from_dask_array
"""
from dask.dataframe import from_dask_array
return from_dask_array(self, columns=columns, index=index, meta=meta)
def __bool__(self):
if self.size > 1:
raise ValueError(
f"The truth value of a {self.__class__.__name__} is ambiguous. "
"Use a.any() or a.all()."
)
else:
return bool(self.compute())
__nonzero__ = __bool__ # python 2
def _scalarfunc(self, cast_type):
if self.size > 1:
raise TypeError("Only length-1 arrays can be converted to Python scalars")
else:
return cast_type(self.compute())
def __int__(self):
return self._scalarfunc(int)
__long__ = __int__ # python 2
def __float__(self):
return self._scalarfunc(float)
def __complex__(self):
return self._scalarfunc(complex)
def __index__(self):
return self._scalarfunc(operator.index)
def __setitem__(self, key, value):
if value is np.ma.masked:
value = np.ma.masked_all((), dtype=self.dtype)
if not is_dask_collection(value) and np.isnan(value).any():
if issubclass(self.dtype.type, Integral):
raise ValueError("cannot convert float NaN to integer")
## Use the "where" method for cases when key is an Array
if isinstance(key, Array):
from dask.array.routines import where
if isinstance(value, Array) and value.ndim > 1:
raise ValueError("boolean index array should have 1 dimension")
try:
y = where(key, value, self)
except ValueError as e:
raise ValueError(
"Boolean index assignment in Dask "
"expects equally shaped arrays.\nExample: da1[da2] = da3 "
"where da1.shape == (4,), da2.shape == (4,) "
"and da3.shape == (4,).\n"
"Alternatively, you can use the extended API that supports"
"indexing with tuples.\nExample: da1[(da2,)] = da3."
) from e
self._meta = y._meta
self.dask = y.dask
self._name = y.name
self._chunks = y.chunks
return
if np.isnan(self.shape).any():
raise ValueError(f"Arrays chunk sizes are unknown. {unknown_chunk_message}")
# Still here? Then apply the assignment to other type of
# indices via the `setitem_array` function.
value = asanyarray(value)
out = "setitem-" + tokenize(self, key, value)
dsk = setitem_array(out, self, key, value)
meta = meta_from_array(self._meta)
if np.isscalar(meta):
meta = np.array(meta)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])
y = Array(graph, out, chunks=self.chunks, dtype=self.dtype, meta=meta)
self._meta = y._meta
self.dask = y.dask
self._name = y.name
self._chunks = y.chunks
def __getitem__(self, index):
# Field access, e.g. x['a'] or x[['a', 'b']]
if isinstance(index, str) or (
isinstance(index, list) and index and all(isinstance(i, str) for i in index)
):
if isinstance(index, str):
dt = self.dtype[index]
else:
dt = np.dtype(
{
"names": index,
"formats": [self.dtype.fields[name][0] for name in index],
"offsets": [self.dtype.fields[name][1] for name in index],
"itemsize": self.dtype.itemsize,
}
)
if dt.shape:
new_axis = list(range(self.ndim, self.ndim + len(dt.shape)))
chunks = self.chunks + tuple((i,) for i in dt.shape)
return self.map_blocks(
getitem, index, dtype=dt.base, chunks=chunks, new_axis=new_axis
)
else:
return self.map_blocks(getitem, index, dtype=dt)
if not isinstance(index, tuple):
index = (index,)
from dask.array.slicing import (
normalize_index,
slice_with_bool_dask_array,
slice_with_int_dask_array,
)
index2 = normalize_index(index, self.shape)
dependencies = {self.name}
for i in index2:
if isinstance(i, Array):
dependencies.add(i.name)
if any(isinstance(i, Array) and i.dtype.kind in "iu" for i in index2):
self, index2 = slice_with_int_dask_array(self, index2)
if any(isinstance(i, Array) and i.dtype == bool for i in index2):
self, index2 = slice_with_bool_dask_array(self, index2)
if all(isinstance(i, slice) and i == slice(None) for i in index2):
return self
out = "getitem-" + tokenize(self, index2)
dsk, chunks = slice_array(out, self.name, self.chunks, index2, self.itemsize)
graph = HighLevelGraph.from_collections(out, dsk, dependencies=[self])
meta = meta_from_array(self._meta, ndim=len(chunks))
if np.isscalar(meta):
meta = np.array(meta)
return Array(graph, out, chunks, meta=meta)
def _vindex(self, key):
if not isinstance(key, tuple):
key = (key,)
if any(k is None for k in key):
raise IndexError(
"vindex does not support indexing with None (np.newaxis), "
"got {}".format(key)
)
if all(isinstance(k, slice) for k in key):
if all(
k.indices(d) == slice(0, d).indices(d) for k, d in zip(key, self.shape)
):
return self
raise IndexError(
"vindex requires at least one non-slice to vectorize over "
"when the slices are not over the entire array (i.e, x[:]). "
"Use normal slicing instead when only using slices. Got: {}".format(key)
)
elif any(is_dask_collection(k) for k in key):
raise IndexError(
"vindex does not support indexing with dask objects. Call compute "
"on the indexer first to get an evalurated array. Got: {}".format(key)
)
return _vindex(self, *key)
@property
def vindex(self):
"""Vectorized indexing with broadcasting.
This is equivalent to numpy's advanced indexing, using arrays that are
broadcast against each other. This allows for pointwise indexing:
>>> import dask.array as da
>>> x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> x = da.from_array(x, chunks=2)
>>> x.vindex[[0, 1, 2], [0, 1, 2]].compute()
array([1, 5, 9])
Mixed basic/advanced indexing with slices/arrays is also supported. The
order of dimensions in the result follows those proposed for
`ndarray.vindex <https://github.com/numpy/numpy/pull/6256>`_:
the subspace spanned by arrays is followed by all slices.
Note: ``vindex`` provides more general functionality than standard
indexing, but it also has fewer optimizations and can be significantly
slower.
"""
return IndexCallable(self._vindex)
@property
def blocks(self):
"""An array-like interface to the blocks of an array.
This returns a ``Blockview`` object that provides an array-like interface
to the blocks of a dask array. Numpy-style indexing of a ``Blockview`` object
returns a selection of blocks as a new dask array.
You can index ``array.blocks`` like a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.blocks.size). The dimensionality of the output array matches
the dimension of this array, even if integer indices are passed.
Slicing with ``np.newaxis`` or multiple lists is not supported.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(8, chunks=2)
>>> x.blocks.shape # aliases x.numblocks
(4,)
>>> x.blocks[0].compute()
array([0, 1])
>>> x.blocks[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> x.blocks[::2].compute()
array([0, 1, 4, 5])
>>> x.blocks[[-1, 0]].compute()
array([6, 7, 0, 1])
>>> x.blocks.ravel() # doctest: +NORMALIZE_WHITESPACE
[dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>]
Returns
-------
An instance of ``dask.array.Blockview``
"""
return BlockView(self)
@property
def partitions(self):
"""Slice an array by partitions. Alias of dask array .blocks attribute.
This alias allows you to write agnostic code that works with both
dask arrays and dask dataframes.
This returns a ``Blockview`` object that provides an array-like interface
to the blocks of a dask array. Numpy-style indexing of a ``Blockview`` object
returns a selection of blocks as a new dask array.
You can index ``array.blocks`` like a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.blocks.size). The dimensionality of the output array matches
the dimension of this array, even if integer indices are passed.
Slicing with ``np.newaxis`` or multiple lists is not supported.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(8, chunks=2)
>>> x.partitions.shape # aliases x.numblocks
(4,)
>>> x.partitions[0].compute()
array([0, 1])
>>> x.partitions[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> x.partitions[::2].compute()
array([0, 1, 4, 5])
>>> x.partitions[[-1, 0]].compute()
array([6, 7, 0, 1])
>>> x.partitions.ravel() # doctest: +NORMALIZE_WHITESPACE
[dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>]
Returns
-------
An instance of ``da.array.Blockview``
"""
return self.blocks
def dot(self, other):
"""Dot product of self and other.
Refer to :func:`dask.array.tensordot` for full documentation.
See Also
--------
dask.array.dot : equivalent function
"""
from dask.array.routines import tensordot
return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))
@property
def A(self):
return self
@property
def T(self):
return self.transpose()
def transpose(self, *axes):
"""Reverse or permute the axes of an array. Return the modified array.
Refer to :func:`dask.array.transpose` for full documentation.
See Also
--------
dask.array.transpose : equivalent function
"""
from dask.array.routines import transpose
if not axes:
axes = None
elif len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
if (axes == tuple(range(self.ndim))) or (axes == tuple(range(-self.ndim, 0))):
# no transpose necessary
return self
else:
return transpose(self, axes=axes)
def ravel(self):
"""Return a flattened array.
Refer to :func:`dask.array.ravel` for full documentation.
See Also
--------
dask.array.ravel : equivalent function
"""
from dask.array.routines import ravel
return ravel(self)
flatten = ravel
def choose(self, choices):
"""Use an index array to construct a new array from a set of choices.
Refer to :func:`dask.array.choose` for full documentation.
See Also
--------
dask.array.choose : equivalent function
"""
from dask.array.routines import choose
return choose(self, choices)
def reshape(self, *shape, merge_chunks=True, limit=None):
"""Reshape array to new shape
Refer to :func:`dask.array.reshape` for full documentation.
See Also
--------
dask.array.reshape : equivalent function
"""
from dask.array.reshape import reshape
if len(shape) == 1 and not isinstance(shape[0], Number):
shape = shape[0]
return reshape(self, shape, merge_chunks=merge_chunks, limit=limit)
def topk(self, k, axis=-1, split_every=None):
"""The top k elements of an array.
Refer to :func:`dask.array.topk` for full documentation.
See Also
--------
dask.array.topk : equivalent function
"""
from dask.array.reductions import topk
return topk(self, k, axis=axis, split_every=split_every)
def argtopk(self, k, axis=-1, split_every=None):
"""The indices of the top k elements of an array.
Refer to :func:`dask.array.argtopk` for full documentation.
See Also
--------
dask.array.argtopk : equivalent function
"""
from dask.array.reductions import argtopk
return argtopk(self, k, axis=axis, split_every=split_every)
def astype(self, dtype, **kwargs):
"""Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to False and the `dtype` requirement is satisfied, the input
array is returned instead of a copy.
.. note::
Dask does not respect the contiguous memory layout of the array,
and will ignore the ``order`` keyword argument.
The default order is 'C' contiguous.
"""
kwargs.pop("order", None) # `order` is not respected, so we remove this kwarg
# Scalars don't take `casting` or `copy` kwargs - as such we only pass
# them to `map_blocks` if specified by user (different than defaults).
extra = set(kwargs) - {"casting", "copy"}
if extra:
raise TypeError(
f"astype does not take the following keyword arguments: {list(extra)}"
)
casting = kwargs.get("casting", "unsafe")
dtype = np.dtype(dtype)
if self.dtype == dtype:
return self
elif not np.can_cast(self.dtype, dtype, casting=casting):
raise TypeError(
f"Cannot cast array from {self.dtype!r} to {dtype!r} "
f"according to the rule {casting!r}"
)
return self.map_blocks(chunk.astype, dtype=dtype, astype_dtype=dtype, **kwargs)
def __abs__(self):
return elemwise(operator.abs, self)
@check_if_handled_given_other
def __add__(self, other):
return elemwise(operator.add, self, other)
@check_if_handled_given_other
def __radd__(self, other):
return elemwise(operator.add, other, self)
@check_if_handled_given_other
def __and__(self, other):
return elemwise(operator.and_, self, other)
@check_if_handled_given_other
def __rand__(self, other):
return elemwise(operator.and_, other, self)
@check_if_handled_given_other
def __div__(self, other):
return elemwise(operator.div, self, other)
@check_if_handled_given_other
def __rdiv__(self, other):
return elemwise(operator.div, other, self)
@check_if_handled_given_other
def __eq__(self, other):
return elemwise(operator.eq, self, other)
@check_if_handled_given_other
def __gt__(self, other):
return elemwise(operator.gt, self, other)
@check_if_handled_given_other
def __ge__(self, other):
return elemwise(operator.ge, self, other)
def __invert__(self):
return elemwise(operator.invert, self)
@check_if_handled_given_other
def __lshift__(self, other):
return elemwise(operator.lshift, self, other)
@check_if_handled_given_other
def __rlshift__(self, other):
return elemwise(operator.lshift, other, self)
@check_if_handled_given_other
def __lt__(self, other):
return elemwise(operator.lt, self, other)
@check_if_handled_given_other
def __le__(self, other):
return elemwise(operator.le, self, other)
@check_if_handled_given_other
def __mod__(self, other):
return elemwise(operator.mod, self, other)
@check_if_handled_given_other
def __rmod__(self, other):
return elemwise(operator.mod, other, self)
@check_if_handled_given_other
def __mul__(self, other):
return elemwise(operator.mul, self, other)
@check_if_handled_given_other
def __rmul__(self, other):
return elemwise(operator.mul, other, self)
@check_if_handled_given_other
def __ne__(self, other):
return elemwise(operator.ne, self, other)
def __neg__(self):
return elemwise(operator.neg, self)
@check_if_handled_given_other
def __or__(self, other):
return elemwise(operator.or_, self, other)
def __pos__(self):
return self
@check_if_handled_given_other
def __ror__(self, other):
return elemwise(operator.or_, other, self)
@check_if_handled_given_other
def __pow__(self, other):
return elemwise(operator.pow, self, other)
@check_if_handled_given_other
def __rpow__(self, other):
return elemwise(operator.pow, other, self)
@check_if_handled_given_other
def __rshift__(self, other):
return elemwise(operator.rshift, self, other)
@check_if_handled_given_other
def __rrshift__(self, other):
return elemwise(operator.rshift, other, self)
@check_if_handled_given_other
def __sub__(self, other):
return elemwise(operator.sub, self, other)
@check_if_handled_given_other
def __rsub__(self, other):
return elemwise(operator.sub, other, self)
@check_if_handled_given_other
def __truediv__(self, other):
return elemwise(operator.truediv, self, other)
@check_if_handled_given_other
def __rtruediv__(self, other):
return elemwise(operator.truediv, other, self)
@check_if_handled_given_other
def __floordiv__(self, other):
return elemwise(operator.floordiv, self, other)
@check_if_handled_given_other
def __rfloordiv__(self, other):
return elemwise(operator.floordiv, other, self)
@check_if_handled_given_other
def __xor__(self, other):
return elemwise(operator.xor, self, other)
@check_if_handled_given_other
def __rxor__(self, other):
return elemwise(operator.xor, other, self)
@check_if_handled_given_other
def __matmul__(self, other):
from dask.array.routines import matmul
return matmul(self, other)
@check_if_handled_given_other
def __rmatmul__(self, other):
from dask.array.routines import matmul
return matmul(other, self)
@check_if_handled_given_other
def __divmod__(self, other):
from dask.array.ufunc import divmod
return divmod(self, other)
@check_if_handled_given_other
def __rdivmod__(self, other):
from dask.array.ufunc import divmod
return divmod(other, self)
def any(self, axis=None, keepdims=False, split_every=None, out=None):
"""Returns True if any of the elements evaluate to True.
Refer to :func:`dask.array.any` for full documentation.
See Also
--------
dask.array.any : equivalent function
"""
from dask.array.reductions import any
return any(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
def all(self, axis=None, keepdims=False, split_every=None, out=None):
"""Returns True if all elements evaluate to True.
Refer to :func:`dask.array.all` for full documentation.
See Also
--------
dask.array.all : equivalent function
"""
from dask.array.reductions import all
return all(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
def min(self, axis=None, keepdims=False, split_every=None, out=None):
"""Return the minimum along a given axis.
Refer to :func:`dask.array.min` for full documentation.
See Also
--------
dask.array.min : equivalent function
"""
from dask.array.reductions import min
return min(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
def max(self, axis=None, keepdims=False, split_every=None, out=None):
"""Return the maximum along a given axis.
Refer to :func:`dask.array.max` for full documentation.
See Also
--------
dask.array.max : equivalent function
"""
from dask.array.reductions import max
return max(self, axis=axis, keepdims=keepdims, split_every=split_every, out=out)
def argmin(self, axis=None, *, keepdims=False, split_every=None, out=None):
"""Return indices of the minimum values along the given axis.
Refer to :func:`dask.array.argmin` for full documentation.
See Also
--------
dask.array.argmin : equivalent function
"""
from dask.array.reductions import argmin
return argmin(
self, axis=axis, keepdims=keepdims, split_every=split_every, out=out
)
def argmax(self, axis=None, *, keepdims=False, split_every=None, out=None):
"""Return indices of the maximum values along the given axis.
Refer to :func:`dask.array.argmax` for full documentation.
See Also
--------
dask.array.argmax : equivalent function
"""
from dask.array.reductions import argmax
return argmax(
self, axis=axis, keepdims=keepdims, split_every=split_every, out=out
)
def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
"""
Return the sum of the array elements over the given axis.
Refer to :func:`dask.array.sum` for full documentation.
See Also
--------
dask.array.sum : equivalent function
"""
from dask.array.reductions import sum
return sum(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
"""Return the sum along diagonals of the array.
Refer to :func:`dask.array.trace` for full documentation.
See Also
--------
dask.array.trace : equivalent function
"""
from dask.array.reductions import trace
return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
def prod(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
"""Return the product of the array elements over the given axis
Refer to :func:`dask.array.prod` for full documentation.
See Also
--------
dask.array.prod : equivalent function
"""
from dask.array.reductions import prod
return prod(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
def mean(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
"""Returns the average of the array elements along given axis.
Refer to :func:`dask.array.mean` for full documentation.
See Also
--------
dask.array.mean : equivalent function
"""
from dask.array.reductions import mean
return mean(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
split_every=split_every,
out=out,
)
def std(
self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
"""Returns the standard deviation of the array elements along given axis.
Refer to :func:`dask.array.std` for full documentation.
See Also
--------
dask.array.std : equivalent function
"""
from dask.array.reductions import std
return std(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
def var(
self, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None, out=None
):
"""Returns the variance of the array elements, along given axis.
Refer to :func:`dask.array.var` for full documentation.
See Also
--------
dask.array.var : equivalent function
"""
from dask.array.reductions import var
return var(
self,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
def moment(
self,
order,
axis=None,
dtype=None,
keepdims=False,
ddof=0,
split_every=None,
out=None,
):
"""Calculate the nth centralized moment.
Refer to :func:`dask.array.moment` for the full documentation.
See Also
--------
dask.array.moment : equivalent function
"""
from dask.array.reductions import moment
return moment(
self,
order,
axis=axis,
dtype=dtype,
keepdims=keepdims,
ddof=ddof,
split_every=split_every,
out=out,
)
@wraps(map_blocks)
def map_blocks(self, func, *args, **kwargs):
return map_blocks(func, self, *args, **kwargs)
def map_overlap(self, func, depth, boundary=None, trim=True, **kwargs):
"""Map a function over blocks of the array with some overlap
Refer to :func:`dask.array.map_overlap` for full documentation.
See Also
--------
dask.array.map_overlap : equivalent function
"""
from dask.array.overlap import map_overlap
return map_overlap(
func, self, depth=depth, boundary=boundary, trim=trim, **kwargs
)
def cumsum(self, axis, dtype=None, out=None, *, method="sequential"):
"""Return the cumulative sum of the elements along the given axis.
Refer to :func:`dask.array.cumsum` for full documentation.
See Also
--------
dask.array.cumsum : equivalent function
"""
from dask.array.reductions import cumsum
return cumsum(self, axis, dtype, out=out, method=method)
def cumprod(self, axis, dtype=None, out=None, *, method="sequential"):
"""Return the cumulative product of the elements along the given axis.
Refer to :func:`dask.array.cumprod` for full documentation.
See Also
--------
dask.array.cumprod : equivalent function
"""
from dask.array.reductions import cumprod
return cumprod(self, axis, dtype, out=out, method=method)
def squeeze(self, axis=None):
"""Remove axes of length one from array.
Refer to :func:`dask.array.squeeze` for full documentation.
See Also
--------
dask.array.squeeze : equivalent function
"""
from dask.array.routines import squeeze
return squeeze(self, axis)
def rechunk(
self, chunks="auto", threshold=None, block_size_limit=None, balance=False
):
"""Convert blocks in dask array x for new chunks.
Refer to :func:`dask.array.rechunk` for full documentation.
See Also
--------
dask.array.rechunk : equivalent function
"""
from dask.array.rechunk import rechunk # avoid circular import
return rechunk(self, chunks, threshold, block_size_limit, balance)
@property
def real(self):
from dask.array.ufunc import real
return real(self)
@property
def imag(self):
from dask.array.ufunc import imag
return imag(self)
def conj(self):
"""Complex-conjugate all elements.
Refer to :func:`dask.array.conj` for full documentation.
See Also
--------
dask.array.conj : equivalent function
"""
from dask.array.ufunc import conj
return conj(self)
def clip(self, min=None, max=None):
"""Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to :func:`dask.array.clip` for full documentation.
See Also
--------
dask.array.clip : equivalent function
"""
from dask.array.ufunc import clip
return clip(self, min, max)
def view(self, dtype=None, order="C"):
"""Get a view of the array as a new data type
Parameters
----------
dtype:
The dtype by which to view the array.
The default, None, results in the view having the same data-type
as the original array.
order: string
'C' or 'F' (Fortran) ordering
This reinterprets the bytes of the array under a new dtype. If that
dtype does not have the same size as the original array then the shape
will change.
Beware that both numpy and dask.array can behave oddly when taking
shape-changing views of arrays under Fortran ordering. Under some
versions of NumPy this function will fail when taking shape-changing
views of Fortran ordered arrays if the first dimension has chunks of
size one.
"""
if dtype is None:
dtype = self.dtype
else:
dtype = np.dtype(dtype)
mult = self.dtype.itemsize / dtype.itemsize
if order == "C":
chunks = self.chunks[:-1] + (
tuple(ensure_int(c * mult) for c in self.chunks[-1]),
)
elif order == "F":
chunks = (
tuple(ensure_int(c * mult) for c in self.chunks[0]),
) + self.chunks[1:]
else:
raise ValueError("Order must be one of 'C' or 'F'")
return self.map_blocks(
chunk.view, dtype, order=order, dtype=dtype, chunks=chunks
)
def swapaxes(self, axis1, axis2):
"""Return a view of the array with ``axis1`` and ``axis2`` interchanged.
Refer to :func:`dask.array.swapaxes` for full documentation.
See Also
--------
dask.array.swapaxes : equivalent function
"""
from dask.array.routines import swapaxes
return swapaxes(self, axis1, axis2)
def round(self, decimals=0):
"""Return array with each element rounded to the given number of decimals.
Refer to :func:`dask.array.round` for full documentation.
See Also
--------
dask.array.round : equivalent function
"""
from dask.array.routines import round
return round(self, decimals=decimals)
def copy(self):
"""
Copy array. This is a no-op for dask.arrays, which are immutable
"""
return Array(self.dask, self.name, self.chunks, meta=self)
def __deepcopy__(self, memo):
c = self.copy()
memo[id(self)] = c
return c
def to_delayed(self, optimize_graph=True):
"""Convert into an array of :class:`dask.delayed.Delayed` objects, one per chunk.
Parameters
----------
optimize_graph : bool, optional
If True [default], the graph is optimized before converting into
:class:`dask.delayed.Delayed` objects.
See Also
--------
dask.array.from_delayed
"""
keys = self.__dask_keys__()
graph = self.__dask_graph__()
layer = self.__dask_layers__()[0]
if optimize_graph:
graph = self.__dask_optimize__(graph, keys) # TODO, don't collape graph
layer = "delayed-" + self.name
graph = HighLevelGraph.from_collections(layer, graph, dependencies=())
L = ndeepmap(self.ndim, lambda k: Delayed(k, graph, layer=layer), keys)
return np.array(L, dtype=object)
def repeat(self, repeats, axis=None):
"""Repeat elements of an array.
Refer to :func:`dask.array.repeat` for full documentation.
See Also
--------
dask.array.repeat : equivalent function
"""
from dask.array.creation import repeat
return repeat(self, repeats, axis=axis)
def nonzero(self):
"""Return the indices of the elements that are non-zero.
Refer to :func:`dask.array.nonzero` for full documentation.
See Also
--------
dask.array.nonzero : equivalent function
"""
from dask.array.routines import nonzero
return nonzero(self)
def to_zarr(self, *args, **kwargs):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Refer to :func:`dask.array.to_zarr` for full documentation.
See also
--------
dask.array.to_zarr : equivalent function
"""
return to_zarr(self, *args, **kwargs)
def to_tiledb(self, uri, *args, **kwargs):
"""Save array to the TileDB storage manager
See https://docs.tiledb.io for details about the format and engine.
See function :func:`dask.array.to_tiledb` for argument documentation.
See also
--------
dask.array.to_tiledb : equivalent function
"""
from dask.array.tiledb_io import to_tiledb
return to_tiledb(self, uri, *args, **kwargs)
def ensure_int(f):
i = int(f)
if i != f:
raise ValueError("Could not coerce %f to integer" % f)
return i
def normalize_chunks(chunks, shape=None, limit=None, dtype=None, previous_chunks=None):
"""Normalize chunks to tuple of tuples
This takes in a variety of input types and information and produces a full
tuple-of-tuples result for chunks, suitable to be passed to Array or
rechunk or any other operation that creates a Dask array.
Parameters
----------
chunks: tuple, int, dict, or string
The chunks to be normalized. See examples below for more details
shape: Tuple[int]
The shape of the array
limit: int (optional)
The maximum block size to target in bytes,
if freedom is given to choose
dtype: np.dtype
previous_chunks: Tuple[Tuple[int]] optional
Chunks from a previous array that we should use for inspiration when
rechunking auto dimensions. If not provided but auto-chunking exists
then auto-dimensions will prefer square-like chunk shapes.
Examples
--------
Specify uniform chunk sizes
>>> from dask.array.core import normalize_chunks
>>> normalize_chunks((2, 2), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
Also passes through fully explicit tuple-of-tuples
>>> normalize_chunks(((2, 2, 1), (2, 2, 2)), shape=(5, 6))
((2, 2, 1), (2, 2, 2))
Cleans up lists to tuples
>>> normalize_chunks([[2, 2], [3, 3]])
((2, 2), (3, 3))
Expands integer inputs 10 -> (10, 10)
>>> normalize_chunks(10, shape=(30, 5))
((10, 10, 10), (5,))
Expands dict inputs
>>> normalize_chunks({0: 2, 1: 3}, shape=(6, 6))
((2, 2, 2), (3, 3))
The values -1 and None get mapped to full size
>>> normalize_chunks((5, -1), shape=(10, 10))
((5, 5), (10,))
Use the value "auto" to automatically determine chunk sizes along certain
dimensions. This uses the ``limit=`` and ``dtype=`` keywords to
determine how large to make the chunks. The term "auto" can be used
anywhere an integer can be used. See array chunking documentation for more
information.
>>> normalize_chunks(("auto",), shape=(20,), limit=5, dtype='uint8')
((5, 5, 5, 5),)
You can also use byte sizes (see :func:`dask.utils.parse_bytes`) in place of
"auto" to ask for a particular size
>>> normalize_chunks("1kiB", shape=(2000,), dtype='float32')
((256, 256, 256, 256, 256, 256, 256, 208),)
Respects null dimensions
>>> normalize_chunks((), shape=(0, 0))
((0,), (0,))
"""
if dtype and not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if chunks is None:
raise ValueError(CHUNKS_NONE_ERROR_MESSAGE)
if isinstance(chunks, list):
chunks = tuple(chunks)
if isinstance(chunks, (Number, str)):
chunks = (chunks,) * len(shape)
if isinstance(chunks, dict):
chunks = tuple(chunks.get(i, None) for i in range(len(shape)))
if isinstance(chunks, np.ndarray):
chunks = chunks.tolist()
if not chunks and shape and all(s == 0 for s in shape):
chunks = ((0,),) * len(shape)
if (
shape
and len(shape) == 1
and len(chunks) > 1
and all(isinstance(c, (Number, str)) for c in chunks)
):
chunks = (chunks,)
if shape and len(chunks) != len(shape):
raise ValueError(
"Chunks and shape must be of the same length/dimension. "
"Got chunks=%s, shape=%s" % (chunks, shape)
)
if -1 in chunks or None in chunks:
chunks = tuple(s if c == -1 or c is None else c for c, s in zip(chunks, shape))
# If specifying chunk size in bytes, use that value to set the limit.
# Verify there is only one consistent value of limit or chunk-bytes used.
for c in chunks:
if isinstance(c, str) and c != "auto":
parsed = parse_bytes(c)
if limit is None:
limit = parsed
elif parsed != limit:
raise ValueError(
"Only one consistent value of limit or chunk is allowed."
"Used %s != %s" % (parsed, limit)
)
# Substitute byte limits with 'auto' now that limit is set.
chunks = tuple("auto" if isinstance(c, str) and c != "auto" else c for c in chunks)
if any(c == "auto" for c in chunks):
chunks = auto_chunks(chunks, shape, limit, dtype, previous_chunks)
if shape is not None:
chunks = tuple(c if c not in {None, -1} else s for c, s in zip(chunks, shape))
if chunks and shape is not None:
chunks = sum(
(
blockdims_from_blockshape((s,), (c,))
if not isinstance(c, (tuple, list))
else (c,)
for s, c in zip(shape, chunks)
),
(),
)
for c in chunks:
if not c:
raise ValueError(
"Empty tuples are not allowed in chunks. Express "
"zero length dimensions with 0(s) in chunks"
)
if shape is not None:
if len(chunks) != len(shape):
raise ValueError(
"Input array has %d dimensions but the supplied "
"chunks has only %d dimensions" % (len(shape), len(chunks))
)
if not all(
c == s or (math.isnan(c) or math.isnan(s))
for c, s in zip(map(sum, chunks), shape)
):
raise ValueError(
"Chunks do not add up to shape. "
"Got chunks=%s, shape=%s" % (chunks, shape)
)
return tuple(tuple(int(x) if not math.isnan(x) else x for x in c) for c in chunks)
def _compute_multiplier(limit: int, dtype, largest_block: int, result):
"""
Utility function for auto_chunk, to fin how much larger or smaller the ideal
chunk size is relative to what we have now.
"""
return (
limit
/ dtype.itemsize
/ largest_block
/ math.prod(r for r in result.values() if r)
)
def auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):
"""Determine automatic chunks
This takes in a chunks value that contains ``"auto"`` values in certain
dimensions and replaces those values with concrete dimension sizes that try
to get chunks to be of a certain size in bytes, provided by the ``limit=``
keyword. If multiple dimensions are marked as ``"auto"`` then they will
all respond to meet the desired byte limit, trying to respect the aspect
ratio of their dimensions in ``previous_chunks=``, if given.
Parameters
----------
chunks: Tuple
A tuple of either dimensions or tuples of explicit chunk dimensions
Some entries should be "auto"
shape: Tuple[int]
limit: int, str
The maximum allowable size of a chunk in bytes
previous_chunks: Tuple[Tuple[int]]
See also
--------
normalize_chunks: for full docstring and parameters
"""
if previous_chunks is not None:
previous_chunks = tuple(
c if isinstance(c, tuple) else (c,) for c in previous_chunks
)
chunks = list(chunks)
autos = {i for i, c in enumerate(chunks) if c == "auto"}
if not autos:
return tuple(chunks)
if limit is None:
limit = config.get("array.chunk-size")
if isinstance(limit, str):
limit = parse_bytes(limit)
if dtype is None:
raise TypeError("dtype must be known for auto-chunking")
if dtype.hasobject:
raise NotImplementedError(
"Can not use auto rechunking with object dtype. "
"We are unable to estimate the size in bytes of object data"
)
for x in tuple(chunks) + tuple(shape):
if (
isinstance(x, Number)
and np.isnan(x)
or isinstance(x, tuple)
and np.isnan(x).any()
):
raise ValueError(
"Can not perform automatic rechunking with unknown "
"(nan) chunk sizes.%s" % unknown_chunk_message
)
limit = max(1, limit)
largest_block = math.prod(
cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != "auto"
)
if previous_chunks:
# Base ideal ratio on the median chunk size of the previous chunks
result = {a: np.median(previous_chunks[a]) for a in autos}
ideal_shape = []
for i, s in enumerate(shape):
chunk_frequencies = frequencies(previous_chunks[i])
mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])
if mode > 1 and count >= len(previous_chunks[i]) / 2:
ideal_shape.append(mode)
else:
ideal_shape.append(s)
# How much larger or smaller the ideal chunk size is relative to what we have now
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
last_multiplier = 0
last_autos = set()
while (
multiplier != last_multiplier or autos != last_autos
): # while things change
last_multiplier = multiplier # record previous values
last_autos = set(autos) # record previous values
# Expand or contract each of the dimensions appropriately
for a in sorted(autos):
if ideal_shape[a] == 0:
result[a] = 0
continue
proposed = result[a] * multiplier ** (1 / len(autos))
if proposed > shape[a]: # we've hit the shape boundary
autos.remove(a)
largest_block *= shape[a]
chunks[a] = shape[a]
del result[a]
else:
result[a] = round_to(proposed, ideal_shape[a])
# recompute how much multiplier we have left, repeat
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
for k, v in result.items():
chunks[k] = v
return tuple(chunks)
else:
# Check if dtype.itemsize is greater than 0
if dtype.itemsize == 0:
raise ValueError(
"auto-chunking with dtype.itemsize == 0 is not supported, please pass in `chunks` explicitly"
)
size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))
small = [i for i in autos if shape[i] < size]
if small:
for i in small:
chunks[i] = (shape[i],)
return auto_chunks(chunks, shape, limit, dtype)
for i in autos:
chunks[i] = round_to(size, shape[i])
return tuple(chunks)
def round_to(c, s):
"""Return a chunk dimension that is close to an even multiple or factor
We want values for c that are nicely aligned with s.
If c is smaller than s we use the original chunk size and accept an
uneven chunk at the end.
If c is larger than s then we want the largest multiple of s that is still
smaller than c.
"""
if c <= s:
return max(1, int(c))
else:
return c // s * s
def _get_chunk_shape(a):
s = np.asarray(a.shape, dtype=int)
return s[len(s) * (None,) + (slice(None),)]
def from_array(
x,
chunks="auto",
name=None,
lock=False,
asarray=None,
fancy=True,
getitem=None,
meta=None,
inline_array=False,
):
"""Create dask array from something that looks like an array.
Input must have a ``.shape``, ``.ndim``, ``.dtype`` and support numpy-style slicing.
Parameters
----------
x : array_like
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
- A size in bytes, like "100 MiB" which will choose a uniform
block-like shape
- The word "auto" which acts like the above, but uses a configuration
value ``array.chunk-size`` for the chunk size
-1 or None as a blocksize indicate the size of the corresponding
dimension.
name : str or bool, optional
The key name to use for the array. Defaults to a hash of ``x``.
Hashing is useful if the same value of ``x`` is used to create multiple
arrays, as Dask can then recognise that they're the same and
avoid duplicate computations. However, it can also be slow, and if the
array is not contiguous it is copied for hashing. If the array uses
stride tricks (such as :func:`numpy.broadcast_to` or
:func:`skimage.util.view_as_windows`) to have a larger logical
than physical size, this copy can cause excessive memory usage.
If you don't need the deduplication provided by hashing, use
``name=False`` to generate a random name instead of hashing, which
avoids the pitfalls described above. Using ``name=True`` is
equivalent to the default.
By default, hashing uses python's standard sha1. This behaviour can be
changed by installing cityhash, xxhash or murmurhash. If installed,
a large-factor speedup can be obtained in the tokenisation step.
.. note::
Because this ``name`` is used as the key in task graphs, you should
ensure that it uniquely identifies the data contained within. If
you'd like to provide a descriptive name that is still unique, combine
the descriptive name with :func:`dask.base.tokenize` of the
``array_like``. See :ref:`graphs` for more.
lock : bool or Lock, optional
If ``x`` doesn't support concurrent reads then provide a lock here, or
pass in True to have dask.array create one for you.
asarray : bool, optional
If True then call np.asarray on chunks to convert them to numpy arrays.
If False then chunks are passed through unchanged.
If None (default) then we use True if the ``__array_function__`` method
is undefined.
.. note::
Dask does not preserve the memory layout of the original array when
the array is created using Fortran rather than C ordering.
fancy : bool, optional
If ``x`` doesn't support fancy indexing (e.g. indexing with lists or
arrays) then set to False. Default is True.
meta : Array-like, optional
The metadata for the resulting dask array. This is the kind of array
that will result from slicing the input array.
Defaults to the input array.
inline_array : bool, default False
How to include the array in the task graph. By default
(``inline_array=False``) the array is included in a task by itself,
and each chunk refers to that task by its key.
.. code-block:: python
>>> x = h5py.File("data.h5")["/x"] # doctest: +SKIP
>>> a = da.from_array(x, chunks=500) # doctest: +SKIP
>>> dict(a.dask) # doctest: +SKIP
{
'array-original-<name>': <HDF5 dataset ...>,
('array-<name>', 0): (getitem, "array-original-<name>", ...),
('array-<name>', 1): (getitem, "array-original-<name>", ...)
}
With ``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph.
.. code-block:: python
>>> a = da.from_array(x, chunks=500, inline_array=True) # doctest: +SKIP
>>> dict(a.dask) # doctest: +SKIP
{
('array-<name>', 0): (getitem, <HDF5 dataset ...>, ...),
('array-<name>', 1): (getitem, <HDF5 dataset ...>, ...)
}
Note that there's no key in the task graph with just the array `x`
anymore. Instead it's placed directly in the values.
The right choice for ``inline_array`` depends on several factors,
including the size of ``x``, how expensive it is to create, which
scheduler you're using, and the pattern of downstream computations.
As a heuristic, ``inline_array=True`` may be the right choice when
the array ``x`` is cheap to serialize and deserialize (since it's
included in the graph many times) and if you're experiencing ordering
issues (see :ref:`order` for more).
This has no effect when ``x`` is a NumPy array.
Examples
--------
>>> x = h5py.File('...')['/data/path'] # doctest: +SKIP
>>> a = da.from_array(x, chunks=(1000, 1000)) # doctest: +SKIP
If your underlying datastore does not support concurrent reads then include
the ``lock=True`` keyword argument or ``lock=mylock`` if you want multiple
arrays to coordinate around the same lock.
>>> a = da.from_array(x, chunks=(1000, 1000), lock=True) # doctest: +SKIP
If your underlying datastore has a ``.chunks`` attribute (as h5py and zarr
datasets do) then a multiple of that chunk shape will be used if you
do not provide a chunk shape.
>>> a = da.from_array(x, chunks='auto') # doctest: +SKIP
>>> a = da.from_array(x, chunks='100 MiB') # doctest: +SKIP
>>> a = da.from_array(x) # doctest: +SKIP
If providing a name, ensure that it is unique
>>> import dask.base
>>> token = dask.base.tokenize(x) # doctest: +SKIP
>>> a = da.from_array('myarray-' + token) # doctest: +SKIP
NumPy ndarrays are eagerly sliced and then embedded in the graph.
>>> import dask.array
>>> a = dask.array.from_array(np.array([[1, 2], [3, 4]]), chunks=(1,1))
>>> a.dask[a.name, 0, 0][0]
array([1])
Chunks with exactly-specified, different sizes can be created.
>>> import numpy as np
>>> import dask.array as da
>>> x = np.random.random((100, 6))
>>> a = da.from_array(x, chunks=((67, 33), (6,)))
"""
if isinstance(x, Array):
raise ValueError(
"Array is already a dask array. Use 'asarray' or " "'rechunk' instead."
)
elif is_dask_collection(x):
warnings.warn(
"Passing an object to dask.array.from_array which is already a "
"Dask collection. This can lead to unexpected behavior."
)
if isinstance(x, (list, tuple, memoryview) + np.ScalarType):
x = np.array(x)
if asarray is None:
asarray = not hasattr(x, "__array_function__")
previous_chunks = getattr(x, "chunks", None)
chunks = normalize_chunks(
chunks, x.shape, dtype=x.dtype, previous_chunks=previous_chunks
)
if name in (None, True):
token = tokenize(x, chunks, lock, asarray, fancy, getitem, inline_array)
name = name or "array-" + token
elif name is False:
name = "array-" + str(uuid.uuid1())
if lock is True:
lock = SerializableLock()
is_ndarray = type(x) in (np.ndarray, np.ma.core.MaskedArray)
is_single_block = all(len(c) == 1 for c in chunks)
# Always use the getter for h5py etc. Not using isinstance(x, np.ndarray)
# because np.matrix is a subclass of np.ndarray.
if is_ndarray and not is_single_block and not lock:
# eagerly slice numpy arrays to prevent memory blowup
# GH5367, GH5601
slices = slices_from_chunks(chunks)
keys = product([name], *(range(len(bds)) for bds in chunks))
values = [x[slc] for slc in slices]
dsk = dict(zip(keys, values))
elif is_ndarray and is_single_block:
# No slicing needed
dsk = {(name,) + (0,) * x.ndim: x}
else:
if getitem is None:
if fancy:
getitem = getter
else:
getitem = getter_nofancy
dsk = graph_from_arraylike(
x,
chunks,
x.shape,
name,
getitem=getitem,
lock=lock,
asarray=asarray,
dtype=x.dtype,
inline_array=inline_array,
)
# Workaround for TileDB, its indexing is 1-based,
# and doesn't seems to support 0-length slicing
if x.__class__.__module__.split(".")[0] == "tiledb" and hasattr(x, "_ctx_"):
return Array(dsk, name, chunks, dtype=x.dtype)
if meta is None:
meta = x
return Array(dsk, name, chunks, meta=meta, dtype=getattr(x, "dtype", None))
def from_zarr(
url,
component=None,
storage_options=None,
chunks=None,
name=None,
inline_array=False,
**kwargs,
):
"""Load array from the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be loaded, something like ``'foo/bar'``.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
chunks: tuple of ints or tuples of ints
Passed to :func:`dask.array.from_array`, allows setting the chunks on
initialisation, if the chunking scheme in the on-disc dataset is not
optimal for the calculations to follow.
name : str, optional
An optional keyname for the array. Defaults to hashing the input
kwargs:
Passed to :class:`zarr.core.Array`.
inline_array : bool, default False
Whether to inline the zarr Array in the values of the task graph.
See :meth:`dask.array.from_array` for an explanation.
See Also
--------
from_array
"""
import zarr
storage_options = storage_options or {}
if isinstance(url, zarr.Array):
z = url
elif isinstance(url, (str, os.PathLike)):
if isinstance(url, os.PathLike):
url = os.fspath(url)
mapper = get_mapper(url, **storage_options)
z = zarr.Array(mapper, read_only=True, path=component, **kwargs)
else:
mapper = url
z = zarr.Array(mapper, read_only=True, path=component, **kwargs)
chunks = chunks if chunks is not None else z.chunks
if name is None:
name = "from-zarr-" + tokenize(z, component, storage_options, chunks, **kwargs)
return from_array(z, chunks, name=name, inline_array=inline_array)
def to_zarr(
arr,
url,
component=None,
storage_options=None,
overwrite=False,
region=None,
compute=True,
return_stored=False,
**kwargs,
):
"""Save array to the zarr storage format
See https://zarr.readthedocs.io for details about the format.
Parameters
----------
arr: dask.array
Data to store
url: Zarr Array or str or MutableMapping
Location of the data. A URL can include a protocol specifier like s3://
for remote data. Can also be any MutableMapping instance, which should
be serializable if used in multiple processes.
component: str or None
If the location is a zarr group rather than an array, this is the
subcomponent that should be created/over-written.
storage_options: dict
Any additional parameters for the storage backend (ignored for local
paths)
overwrite: bool
If given array already exists, overwrite=False will cause an error,
where overwrite=True will replace the existing data.
region: tuple of slices or None
The region of data that should be written if ``url`` is a zarr.Array.
Not to be used with other types of ``url``.
compute: bool
See :func:`~dask.array.store` for more details.
return_stored: bool
See :func:`~dask.array.store` for more details.
**kwargs:
Passed to the :func:`zarr.creation.create` function, e.g., compression options.
Raises
------
ValueError
If ``arr`` has unknown chunk sizes, which is not supported by Zarr.
If ``region`` is specified and ``url`` is not a zarr.Array
See Also
--------
dask.array.store
dask.array.Array.compute_chunk_sizes
"""
import zarr
if np.isnan(arr.shape).any():
raise ValueError(
"Saving a dask array with unknown chunk sizes is not "
"currently supported by Zarr.%s" % unknown_chunk_message
)
if isinstance(url, zarr.Array):
z = url
if isinstance(z.store, (dict, MutableMapping)) and config.get(
"scheduler", ""
) in ("dask.distributed", "distributed"):
raise RuntimeError(
"Cannot store into in memory Zarr Array using "
"the Distributed Scheduler."
)
if region is None:
arr = arr.rechunk(z.chunks)
regions = None
else:
from dask.array.slicing import new_blockdim, normalize_index
old_chunks = normalize_chunks(z.chunks, z.shape)
index = normalize_index(region, z.shape)
chunks = tuple(
tuple(new_blockdim(s, c, r))
for s, c, r in zip(z.shape, old_chunks, index)
)
arr = arr.rechunk(chunks)
regions = [region]
return arr.store(
z, lock=False, regions=regions, compute=compute, return_stored=return_stored
)
if region is not None:
raise ValueError("Cannot use `region` keyword when url is not a `zarr.Array`.")
if not _check_regular_chunks(arr.chunks):
raise ValueError(
"Attempt to save array to zarr with irregular "
"chunking, please call `arr.rechunk(...)` first."
)
storage_options = storage_options or {}
if isinstance(url, str):
mapper = get_mapper(url, **storage_options)
else:
# assume the object passed is already a mapper
mapper = url
chunks = [c[0] for c in arr.chunks]
z = zarr.create(
shape=arr.shape,
chunks=chunks,
dtype=arr.dtype,
store=mapper,
path=component,
overwrite=overwrite,
**kwargs,
)
return arr.store(z, lock=False, compute=compute, return_stored=return_stored)
def _check_regular_chunks(chunkset):
"""Check if the chunks are regular
"Regular" in this context means that along every axis, the chunks all
have the same size, except the last one, which may be smaller
Parameters
----------
chunkset: tuple of tuples of ints
From the ``.chunks`` attribute of an ``Array``
Returns
-------
True if chunkset passes, else False
Examples
--------
>>> import dask.array as da
>>> arr = da.zeros(10, chunks=(5, ))
>>> _check_regular_chunks(arr.chunks)
True
>>> arr = da.zeros(10, chunks=((3, 3, 3, 1), ))
>>> _check_regular_chunks(arr.chunks)
True
>>> arr = da.zeros(10, chunks=((3, 1, 3, 3), ))
>>> _check_regular_chunks(arr.chunks)
False
"""
for chunks in chunkset:
if len(chunks) == 1:
continue
if len(set(chunks[:-1])) > 1:
return False
if chunks[-1] > chunks[0]:
return False
return True
def from_delayed(value, shape, dtype=None, meta=None, name=None):
"""Create a dask array from a dask delayed value
This routine is useful for constructing dask arrays in an ad-hoc fashion
using dask delayed, particularly when combined with stack and concatenate.
The dask array will consist of a single chunk.
Examples
--------
>>> import dask
>>> import dask.array as da
>>> import numpy as np
>>> value = dask.delayed(np.ones)(5)
>>> array = da.from_delayed(value, (5,), dtype=float)
>>> array
dask.array<from-value, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
>>> array.compute()
array([1., 1., 1., 1., 1.])
"""
from dask.delayed import Delayed, delayed
if not isinstance(value, Delayed) and hasattr(value, "key"):
value = delayed(value)
name = name or "from-value-" + tokenize(value, shape, dtype, meta)
dsk = {(name,) + (0,) * len(shape): value.key}
chunks = tuple((d,) for d in shape)
# TODO: value._key may not be the name of the layer in value.dask
# This should be fixed after we build full expression graphs
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[value])
return Array(graph, name, chunks, dtype=dtype, meta=meta)
def from_func(func, shape, dtype=None, name=None, args=(), kwargs=None):
"""Create dask array in a single block by calling a function
Calling the provided function with func(*args, **kwargs) should return a
NumPy array of the indicated shape and dtype.
Examples
--------
>>> a = from_func(np.arange, (3,), dtype='i8', args=(3,))
>>> a.compute()
array([0, 1, 2])
This works particularly well when coupled with dask.array functions like
concatenate and stack:
>>> arrays = [from_func(np.array, (), dtype='i8', args=(n,)) for n in range(5)]
>>> stack(arrays).compute()
array([0, 1, 2, 3, 4])
"""
if kwargs is None:
kwargs = {}
name = name or "from_func-" + tokenize(func, shape, dtype, args, kwargs)
if args or kwargs:
func = partial(func, *args, **kwargs)
dsk = {(name,) + (0,) * len(shape): (func,)}
chunks = tuple((i,) for i in shape)
return Array(dsk, name, chunks, dtype)
def common_blockdim(blockdims):
"""Find the common block dimensions from the list of block dimensions
Currently only implements the simplest possible heuristic: the common
block-dimension is the only one that does not span fully span a dimension.
This is a conservative choice that allows us to avoid potentially very
expensive rechunking.
Assumes that each element of the input block dimensions has all the same
sum (i.e., that they correspond to dimensions of the same size).
Examples
--------
>>> common_blockdim([(3,), (2, 1)])
(2, 1)
>>> common_blockdim([(1, 2), (2, 1)])
(1, 1, 1)
>>> common_blockdim([(2, 2), (3, 1)]) # doctest: +SKIP
Traceback (most recent call last):
...
ValueError: Chunks do not align
"""
if not any(blockdims):
return ()
non_trivial_dims = {d for d in blockdims if len(d) > 1}
if len(non_trivial_dims) == 1:
return first(non_trivial_dims)
if len(non_trivial_dims) == 0:
return max(blockdims, key=first)
if np.isnan(sum(map(sum, blockdims))):
raise ValueError(
"Arrays' chunk sizes (%s) are unknown.\n\n"
"A possible solution:\n"
" x.compute_chunk_sizes()" % blockdims
)
if len(set(map(sum, non_trivial_dims))) > 1:
raise ValueError("Chunks do not add up to same value", blockdims)
# We have multiple non-trivial chunks on this axis
# e.g. (5, 2) and (4, 3)
# We create a single chunk tuple with the same total length
# that evenly divides both, e.g. (4, 1, 2)
# To accomplish this we walk down all chunk tuples together, finding the
# smallest element, adding it to the output, and subtracting it from all
# other elements and remove the element itself. We stop once we have
# burned through all of the chunk tuples.
# For efficiency's sake we reverse the lists so that we can pop off the end
rchunks = [list(ntd)[::-1] for ntd in non_trivial_dims]
total = sum(first(non_trivial_dims))
i = 0
out = []
while i < total:
m = min(c[-1] for c in rchunks)
out.append(m)
for c in rchunks:
c[-1] -= m
if c[-1] == 0:
c.pop()
i += m
return tuple(out)
def unify_chunks(*args, **kwargs):
"""
Unify chunks across a sequence of arrays
This utility function is used within other common operations like
:func:`dask.array.core.map_blocks` and :func:`dask.array.core.blockwise`.
It is not commonly used by end-users directly.
Parameters
----------
*args: sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
Examples
--------
>>> import dask.array as da
>>> x = da.ones(10, chunks=((5, 2, 3),))
>>> y = da.ones(10, chunks=((2, 3, 5),))
>>> chunkss, arrays = unify_chunks(x, 'i', y, 'i')
>>> chunkss
{'i': (2, 3, 2, 3)}
>>> x = da.ones((100, 10), chunks=(20, 5))
>>> y = da.ones((10, 100), chunks=(4, 50))
>>> chunkss, arrays = unify_chunks(x, 'ij', y, 'jk', 'constant', None)
>>> chunkss # doctest: +SKIP
{'k': (50, 50), 'i': (20, 20, 20, 20, 20), 'j': (4, 1, 3, 2)}
>>> unify_chunks(0, None)
({}, [0])
Returns
-------
chunkss : dict
Map like {index: chunks}.
arrays : list
List of rechunked arrays.
See Also
--------
common_blockdim
"""
if not args:
return {}, []
arginds = [
(asanyarray(a) if ind is not None else a, ind) for a, ind in partition(2, args)
] # [x, ij, y, jk]
warn = kwargs.get("warn", True)
arrays, inds = zip(*arginds)
if all(ind is None for ind in inds):
return {}, list(arrays)
if all(ind == inds[0] for ind in inds) and all(
a.chunks == arrays[0].chunks for a in arrays
):
return dict(zip(inds[0], arrays[0].chunks)), arrays
nameinds = []
blockdim_dict = dict()
max_parts = 0
for a, ind in arginds:
if ind is not None:
nameinds.append((a.name, ind))
blockdim_dict[a.name] = a.chunks
max_parts = max(max_parts, a.npartitions)
else:
nameinds.append((a, ind))
chunkss = broadcast_dimensions(nameinds, blockdim_dict, consolidate=common_blockdim)
nparts = math.prod(map(len, chunkss.values()))
if warn and nparts and nparts >= max_parts * 10:
warnings.warn(
"Increasing number of chunks by factor of %d" % (nparts / max_parts),
PerformanceWarning,
stacklevel=3,
)
arrays = []
for a, i in arginds:
if i is None:
arrays.append(a)
else:
chunks = tuple(
chunkss[j]
if a.shape[n] > 1
else a.shape[n]
if not np.isnan(sum(chunkss[j]))
else None
for n, j in enumerate(i)
)
if chunks != a.chunks and all(a.chunks):
arrays.append(a.rechunk(chunks))
else:
arrays.append(a)
return chunkss, arrays
def unpack_singleton(x):
"""
>>> unpack_singleton([[[[1]]]])
1
>>> unpack_singleton(np.array(np.datetime64('2000-01-01')))
array('2000-01-01', dtype='datetime64[D]')
"""
while isinstance(x, (list, tuple)):
try:
x = x[0]
except (IndexError, TypeError, KeyError):
break
return x
def block(arrays, allow_unknown_chunksizes=False):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated along the last
dimension (-1), then these are concatenated along the second-last
dimension (-2), and so on until the outermost list is reached
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays together.
stack : Stack arrays in sequence along a new dimension.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
When called with only scalars, ``block`` is equivalent to an ndarray
call. So ``block([[1, 2], [3, 4]])`` is equivalent to
``array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``block([[A, B, ...], [p, q, ...]])``.
"""
# This was copied almost verbatim from numpy.core.shape_base.block
# See numpy license at https://github.com/numpy/numpy/blob/master/LICENSE.txt
# or NUMPY_LICENSE.txt within this directory
def atleast_nd(x, ndim):
x = asanyarray(x)
diff = max(ndim - x.ndim, 0)
if diff == 0:
return x
else:
return x[(None,) * diff + (Ellipsis,)]
def format_index(index):
return "arrays" + "".join(f"[{i}]" for i in index)
rec = _Recurser(recurse_if=lambda x: type(x) is list)
# ensure that the lists are all matched in depth
list_ndim = None
any_empty = False
for index, value, entering in rec.walk(arrays):
if type(value) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
"{} is a tuple. "
"Only lists can be used to arrange blocks, and np.block does "
"not allow implicit conversion from tuple to ndarray.".format(
format_index(index)
)
)
if not entering:
curr_depth = len(index)
elif len(value) == 0:
curr_depth = len(index) + 1
any_empty = True
else:
continue
if list_ndim is not None and list_ndim != curr_depth:
raise ValueError(
"List depths are mismatched. First element was at depth {}, "
"but there is an element at depth {} ({})".format(
list_ndim, curr_depth, format_index(index)
)
)
list_ndim = curr_depth
# do this here so we catch depth mismatches first
if any_empty:
raise ValueError("Lists cannot be empty")
# convert all the arrays to ndarrays
arrays = rec.map_reduce(arrays, f_map=asanyarray, f_reduce=list)
# determine the maximum dimension of the elements
elem_ndim = rec.map_reduce(arrays, f_map=lambda xi: xi.ndim, f_reduce=max)
ndim = max(list_ndim, elem_ndim)
# first axis to concatenate along
first_axis = ndim - list_ndim
# Make all the elements the same dimension
arrays = rec.map_reduce(
arrays, f_map=lambda xi: atleast_nd(xi, ndim), f_reduce=list
)
# concatenate innermost lists on the right, outermost on the left
return rec.map_reduce(
arrays,
f_reduce=lambda xs, axis: concatenate(
list(xs), axis=axis, allow_unknown_chunksizes=allow_unknown_chunksizes
),
f_kwargs=lambda axis: dict(axis=(axis + 1)),
axis=first_axis,
)
def concatenate(seq, axis=0, allow_unknown_chunksizes=False):
"""
Concatenate arrays along an existing axis
Given a sequence of dask Arrays form a new dask Array by stacking them
along an existing dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays. If axis is None,
arrays are flattened before use.
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.concatenate(data, axis=0)
>>> x.shape
(12, 4)
>>> da.concatenate(data, axis=1).shape
(4, 12)
Result is a new dask Array
See Also
--------
stack
"""
from dask.array import wrap
seq = [asarray(a, allow_unknown_chunksizes=allow_unknown_chunksizes) for a in seq]
if not seq:
raise ValueError("Need array(s) to concatenate")
if axis is None:
seq = [a.flatten() for a in seq]
axis = 0
seq_metas = [meta_from_array(s) for s in seq]
_concatenate = concatenate_lookup.dispatch(
type(max(seq_metas, key=lambda x: getattr(x, "__array_priority__", 0)))
)
meta = _concatenate(seq_metas, axis=axis)
# Promote types to match meta
seq = [a.astype(meta.dtype) for a in seq]
# Find output array shape
ndim = len(seq[0].shape)
shape = tuple(
sum(a.shape[i] for a in seq) if i == axis else seq[0].shape[i]
for i in range(ndim)
)
# Drop empty arrays
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
if axis < 0:
axis = ndim + axis
if axis >= ndim:
msg = (
"Axis must be less than than number of dimensions"
"\nData has %d dimensions, but got axis=%d"
)
raise ValueError(msg % (ndim, axis))
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
elif n == 1:
return seq2[0]
if not allow_unknown_chunksizes and not all(
i == axis or all(x.shape[i] == seq2[0].shape[i] for x in seq2)
for i in range(ndim)
):
if any(map(np.isnan, seq2[0].shape)):
raise ValueError(
"Tried to concatenate arrays with unknown"
" shape %s.\n\nTwo solutions:\n"
" 1. Force concatenation pass"
" allow_unknown_chunksizes=True.\n"
" 2. Compute shapes with "
"[x.compute_chunk_sizes() for x in seq]" % str(seq2[0].shape)
)
raise ValueError("Shapes do not align: %s", [x.shape for x in seq2])
inds = [list(range(ndim)) for i in range(n)]
for i, ind in enumerate(inds):
ind[axis] = -(i + 1)
uc_args = list(concat(zip(seq2, inds)))
_, seq2 = unify_chunks(*uc_args, warn=False)
bds = [a.chunks for a in seq2]
chunks = (
seq2[0].chunks[:axis]
+ (sum((bd[axis] for bd in bds), ()),)
+ seq2[0].chunks[axis + 1 :]
)
cum_dims = [0] + list(accumulate(add, [len(a.chunks[axis]) for a in seq2]))
names = [a.name for a in seq2]
name = "concatenate-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
values = [
(names[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[1 : axis + 1]
+ (key[axis + 1] - cum_dims[bisect(cum_dims, key[axis + 1]) - 1],)
+ key[axis + 2 :]
for key in keys
]
dsk = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
def load_store_chunk(
x: Any,
out: Any,
index: slice,
lock: Any,
return_stored: bool,
load_stored: bool,
):
"""
A function inserted in a Dask graph for storing a chunk.
Parameters
----------
x: array-like
An array (potentially a NumPy one)
out: array-like
Where to store results.
index: slice-like
Where to store result from ``x`` in ``out``.
lock: Lock-like or False
Lock to use before writing to ``out``.
return_stored: bool
Whether to return ``out``.
load_stored: bool
Whether to return the array stored in ``out``.
Ignored if ``return_stored`` is not ``True``.
Returns
-------
If return_stored=True and load_stored=False
out
If return_stored=True and load_stored=True
out[index]
If return_stored=False and compute=False
None
Examples
--------
>>> a = np.ones((5, 6))
>>> b = np.empty(a.shape)
>>> load_store_chunk(a, b, (slice(None), slice(None)), False, False, False)
"""
if lock:
lock.acquire()
try:
if x is not None:
if is_arraylike(x):
out[index] = x
else:
out[index] = np.asanyarray(x)
if return_stored and load_stored:
return out[index]
elif return_stored and not load_stored:
return out
else:
return None
finally:
if lock:
lock.release()
def store_chunk(
x: ArrayLike, out: ArrayLike, index: slice, lock: Any, return_stored: bool
):
return load_store_chunk(x, out, index, lock, return_stored, False)
A = TypeVar("A", bound=ArrayLike)
def load_chunk(out: A, index: slice, lock: Any) -> A:
return load_store_chunk(None, out, index, lock, True, True)
def insert_to_ooc(
keys: list,
chunks: tuple[tuple[int, ...], ...],
out: ArrayLike,
name: str,
*,
lock: Lock | bool = True,
region: tuple[slice, ...] | slice | None = None,
return_stored: bool = False,
load_stored: bool = False,
) -> dict:
"""
Creates a Dask graph for storing chunks from ``arr`` in ``out``.
Parameters
----------
keys: list
Dask keys of the input array
chunks: tuple
Dask chunks of the input array
out: array-like
Where to store results to
name: str
First element of dask keys
lock: Lock-like or bool, optional
Whether to lock or with what (default is ``True``,
which means a :class:`threading.Lock` instance).
region: slice-like, optional
Where in ``out`` to store ``arr``'s results
(default is ``None``, meaning all of ``out``).
return_stored: bool, optional
Whether to return ``out``
(default is ``False``, meaning ``None`` is returned).
load_stored: bool, optional
Whether to handling loading from ``out`` at the same time.
Ignored if ``return_stored`` is not ``True``.
(default is ``False``, meaning defer to ``return_stored``).
Returns
-------
dask graph of store operation
Examples
--------
>>> import dask.array as da
>>> d = da.ones((5, 6), chunks=(2, 3))
>>> a = np.empty(d.shape)
>>> insert_to_ooc(d.__dask_keys__(), d.chunks, a, "store-123") # doctest: +SKIP
"""
if lock is True:
lock = Lock()
slices = slices_from_chunks(chunks)
if region:
slices = [fuse_slice(region, slc) for slc in slices]
if return_stored and load_stored:
func = load_store_chunk
args = (load_stored,)
else:
func = store_chunk # type: ignore
args = () # type: ignore
dsk = {
(name,) + t[1:]: (func, t, out, slc, lock, return_stored) + args
for t, slc in zip(core.flatten(keys), slices)
}
return dsk
def retrieve_from_ooc(
keys: Collection[Hashable], dsk_pre: Mapping, dsk_post: Mapping
) -> dict[tuple, Any]:
"""
Creates a Dask graph for loading stored ``keys`` from ``dsk``.
Parameters
----------
keys: Collection
A sequence containing Dask graph keys to load
dsk_pre: Mapping
A Dask graph corresponding to a Dask Array before computation
dsk_post: Mapping
A Dask graph corresponding to a Dask Array after computation
Examples
--------
>>> import dask.array as da
>>> d = da.ones((5, 6), chunks=(2, 3))
>>> a = np.empty(d.shape)
>>> g = insert_to_ooc(d.__dask_keys__(), d.chunks, a, "store-123")
>>> retrieve_from_ooc(g.keys(), g, {k: k for k in g.keys()}) # doctest: +SKIP
"""
load_dsk = {
("load-" + k[0],) + k[1:]: (load_chunk, dsk_post[k]) + dsk_pre[k][3:-1] # type: ignore
for k in keys
}
return load_dsk
def asarray(
a, allow_unknown_chunksizes=False, dtype=None, order=None, *, like=None, **kwargs
):
"""Convert the input to a dask array.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples of
lists and ndarrays.
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {‘C’, ‘F’, ‘A’, ‘K’}, optional
Memory layout. ‘A’ and ‘K’ depend on the order of input array a.
‘C’ row-major (C-style), ‘F’ column-major (Fortran-style) memory
representation. ‘A’ (any) means ‘F’ if a is Fortran contiguous, ‘C’
otherwise ‘K’ (keep) preserve input order. Defaults to ‘C’.
like: array-like
Reference object to allow the creation of Dask arrays with chunks
that are not NumPy arrays. If an array-like passed in as ``like``
supports the ``__array_function__`` protocol, the chunk type of the
resulting array will be definde by it. In this case, it ensures the
creation of a Dask array compatible with that passed in via this
argument. If ``like`` is a Dask array, the chunk type of the
resulting array will be defined by the chunk type of ``like``.
Requires NumPy 1.20.0 or higher.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if like is None:
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.split(".")[0] == "xarray" and hasattr(a, "data"):
return asarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
return stack(a, allow_unknown_chunksizes=allow_unknown_chunksizes)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asarray(a, dtype=dtype, order=order)
else:
if not _numpy_120:
raise RuntimeError("The use of ``like`` required NumPy >= 1.20")
like_meta = meta_from_array(like)
if isinstance(a, Array):
return a.map_blocks(np.asarray, like=like_meta, dtype=dtype, order=order)
else:
a = np.asarray(a, like=like_meta, dtype=dtype, order=order)
return from_array(a, getitem=getter_inline, **kwargs)
def asanyarray(a, dtype=None, order=None, *, like=None, inline_array=False):
"""Convert the input to a dask array.
Subclasses of ``np.ndarray`` will be passed through as chunks unchanged.
Parameters
----------
a : array-like
Input data, in any form that can be converted to a dask array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples of
lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {‘C’, ‘F’, ‘A’, ‘K’}, optional
Memory layout. ‘A’ and ‘K’ depend on the order of input array a.
‘C’ row-major (C-style), ‘F’ column-major (Fortran-style) memory
representation. ‘A’ (any) means ‘F’ if a is Fortran contiguous, ‘C’
otherwise ‘K’ (keep) preserve input order. Defaults to ‘C’.
like: array-like
Reference object to allow the creation of Dask arrays with chunks
that are not NumPy arrays. If an array-like passed in as ``like``
supports the ``__array_function__`` protocol, the chunk type of the
resulting array will be definde by it. In this case, it ensures the
creation of a Dask array compatible with that passed in via this
argument. If ``like`` is a Dask array, the chunk type of the
resulting array will be defined by the chunk type of ``like``.
Requires NumPy 1.20.0 or higher.
inline_array:
Whether to inline the array in the resulting dask graph. For more information,
see the documentation for ``dask.array.from_array()``.
Returns
-------
out : dask array
Dask array interpretation of a.
Examples
--------
>>> import dask.array as da
>>> import numpy as np
>>> x = np.arange(3)
>>> da.asanyarray(x)
dask.array<array, shape=(3,), dtype=int64, chunksize=(3,), chunktype=numpy.ndarray>
>>> y = [[1, 2, 3], [4, 5, 6]]
>>> da.asanyarray(y)
dask.array<array, shape=(2, 3), dtype=int64, chunksize=(2, 3), chunktype=numpy.ndarray>
"""
if like is None:
if isinstance(a, Array):
return a
elif hasattr(a, "to_dask_array"):
return a.to_dask_array()
elif type(a).__module__.split(".")[0] == "xarray" and hasattr(a, "data"):
return asanyarray(a.data)
elif isinstance(a, (list, tuple)) and any(isinstance(i, Array) for i in a):
return stack(a)
elif not isinstance(getattr(a, "shape", None), Iterable):
a = np.asanyarray(a, dtype=dtype, order=order)
else:
if not _numpy_120:
raise RuntimeError("The use of ``like`` required NumPy >= 1.20")
like_meta = meta_from_array(like)
if isinstance(a, Array):
return a.map_blocks(np.asanyarray, like=like_meta, dtype=dtype, order=order)
else:
a = np.asanyarray(a, like=like_meta, dtype=dtype, order=order)
return from_array(
a,
chunks=a.shape,
getitem=getter_inline,
asarray=False,
inline_array=inline_array,
)
def is_scalar_for_elemwise(arg):
"""
>>> is_scalar_for_elemwise(42)
True
>>> is_scalar_for_elemwise('foo')
True
>>> is_scalar_for_elemwise(True)
True
>>> is_scalar_for_elemwise(np.array(42))
True
>>> is_scalar_for_elemwise([1, 2, 3])
True
>>> is_scalar_for_elemwise(np.array([1, 2, 3]))
False
>>> is_scalar_for_elemwise(from_array(np.array(0), chunks=()))
False
>>> is_scalar_for_elemwise(np.dtype('i4'))
True
"""
# the second half of shape_condition is essentially just to ensure that
# dask series / frame are treated as scalars in elemwise.
maybe_shape = getattr(arg, "shape", None)
shape_condition = not isinstance(maybe_shape, Iterable) or any(
is_dask_collection(x) for x in maybe_shape
)
return (
np.isscalar(arg)
or shape_condition
or isinstance(arg, np.dtype)
or (isinstance(arg, np.ndarray) and arg.ndim == 0)
)
def broadcast_shapes(*shapes):
"""
Determines output shape from broadcasting arrays.
Parameters
----------
shapes : tuples
The shapes of the arguments.
Returns
-------
output_shape : tuple
Raises
------
ValueError
If the input shapes cannot be successfully broadcast together.
"""
if len(shapes) == 1:
return shapes[0]
out = []
for sizes in zip_longest(*map(reversed, shapes), fillvalue=-1):
if np.isnan(sizes).any():
dim = np.nan
else:
dim = 0 if 0 in sizes else np.max(sizes)
if any(i not in [-1, 0, 1, dim] and not np.isnan(i) for i in sizes):
raise ValueError(
"operands could not be broadcast together with "
"shapes {}".format(" ".join(map(str, shapes)))
)
out.append(dim)
return tuple(reversed(out))
def elemwise(op, *args, out=None, where=True, dtype=None, name=None, **kwargs):
"""Apply an elementwise ufunc-like function blockwise across arguments.
Like numpy ufuncs, broadcasting rules are respected.
Parameters
----------
op : callable
The function to apply. Should be numpy ufunc-like in the parameters
that it accepts.
*args : Any
Arguments to pass to `op`. Non-dask array-like objects are first
converted to dask arrays, then all arrays are broadcast together before
applying the function blockwise across all arguments. Any scalar
arguments are passed as-is following normal numpy ufunc behavior.
out : dask array, optional
If out is a dask.array then this overwrites the contents of that array
with the result.
where : array_like, optional
An optional boolean mask marking locations where the ufunc should be
applied. Can be a scalar, dask array, or any other array-like object.
Mirrors the ``where`` argument to numpy ufuncs, see e.g. ``numpy.add``
for more information.
dtype : dtype, optional
If provided, overrides the output array dtype.
name : str, optional
A unique key name to use when building the backing dask graph. If not
provided, one will be automatically generated based on the input
arguments.
Examples
--------
>>> elemwise(add, x, y) # doctest: +SKIP
>>> elemwise(sin, x) # doctest: +SKIP
>>> elemwise(sin, x, out=dask_array) # doctest: +SKIP
See Also
--------
blockwise
"""
if kwargs:
raise TypeError(
f"{op.__name__} does not take the following keyword arguments "
f"{sorted(kwargs)}"
)
out = _elemwise_normalize_out(out)
where = _elemwise_normalize_where(where)
args = [np.asarray(a) if isinstance(a, (list, tuple)) else a for a in args]
shapes = []
for arg in args:
shape = getattr(arg, "shape", ())
if any(is_dask_collection(x) for x in shape):
# Want to exclude Delayed shapes and dd.Scalar
shape = ()
shapes.append(shape)
if isinstance(where, Array):
shapes.append(where.shape)
if isinstance(out, Array):
shapes.append(out.shape)
shapes = [s if isinstance(s, Iterable) else () for s in shapes]
out_ndim = len(
broadcast_shapes(*shapes)
) # Raises ValueError if dimensions mismatch
expr_inds = tuple(range(out_ndim))[::-1]
if dtype is not None:
need_enforce_dtype = True
else:
# We follow NumPy's rules for dtype promotion, which special cases
# scalars and 0d ndarrays (which it considers equivalent) by using
# their values to compute the result dtype:
# https://github.com/numpy/numpy/issues/6240
# We don't inspect the values of 0d dask arrays, because these could
# hold potentially very expensive calculations. Instead, we treat
# them just like other arrays, and if necessary cast the result of op
# to match.
vals = [
np.empty((1,) * max(1, a.ndim), dtype=a.dtype)
if not is_scalar_for_elemwise(a)
else a
for a in args
]
try:
dtype = apply_infer_dtype(op, vals, {}, "elemwise", suggest_dtype=False)
except Exception:
return NotImplemented
need_enforce_dtype = any(
not is_scalar_for_elemwise(a) and a.ndim == 0 for a in args
)
if not name:
name = f"{funcname(op)}-{tokenize(op, dtype, *args, where)}"
blockwise_kwargs = dict(dtype=dtype, name=name, token=funcname(op).strip("_"))
if where is not True:
blockwise_kwargs["elemwise_where_function"] = op
op = _elemwise_handle_where
args.extend([where, out])
if need_enforce_dtype:
blockwise_kwargs["enforce_dtype"] = dtype
blockwise_kwargs["enforce_dtype_function"] = op
op = _enforce_dtype
result = blockwise(
op,
expr_inds,
*concat(
(a, tuple(range(a.ndim)[::-1]) if not is_scalar_for_elemwise(a) else None)
for a in args
),
**blockwise_kwargs,
)
return handle_out(out, result)
def _elemwise_normalize_where(where):
if where is True:
return True
elif where is False or where is None:
return False
return asarray(where)
def _elemwise_handle_where(*args, **kwargs):
function = kwargs.pop("elemwise_where_function")
*args, where, out = args
if hasattr(out, "copy"):
out = out.copy()
return function(*args, where=where, out=out, **kwargs)
def _elemwise_normalize_out(out):
if isinstance(out, tuple):
if len(out) == 1:
out = out[0]
elif len(out) > 1:
raise NotImplementedError("The out parameter is not fully supported")
else:
out = None
if not (out is None or isinstance(out, Array)):
raise NotImplementedError(
f"The out parameter is not fully supported."
f" Received type {type(out).__name__}, expected Dask Array"
)
return out
def handle_out(out, result):
"""Handle out parameters
If out is a dask.array then this overwrites the contents of that array with
the result
"""
out = _elemwise_normalize_out(out)
if isinstance(out, Array):
if out.shape != result.shape:
raise ValueError(
"Mismatched shapes between result and out parameter. "
"out=%s, result=%s" % (str(out.shape), str(result.shape))
)
out._chunks = result.chunks
out.dask = result.dask
out._meta = result._meta
out._name = result.name
return out
else:
return result
def _enforce_dtype(*args, **kwargs):
"""Calls a function and converts its result to the given dtype.
The parameters have deliberately been given unwieldy names to avoid
clashes with keyword arguments consumed by blockwise
A dtype of `object` is treated as a special case and not enforced,
because it is used as a dummy value in some places when the result will
not be a block in an Array.
Parameters
----------
enforce_dtype : dtype
Result dtype
enforce_dtype_function : callable
The wrapped function, which will be passed the remaining arguments
"""
dtype = kwargs.pop("enforce_dtype")
function = kwargs.pop("enforce_dtype_function")
result = function(*args, **kwargs)
if hasattr(result, "dtype") and dtype != result.dtype and dtype != object:
if not np.can_cast(result, dtype, casting="same_kind"):
raise ValueError(
"Inferred dtype from function %r was %r "
"but got %r, which can't be cast using "
"casting='same_kind'"
% (funcname(function), str(dtype), str(result.dtype))
)
if np.isscalar(result):
# scalar astype method doesn't take the keyword arguments, so
# have to convert via 0-dimensional array and back.
result = result.astype(dtype)
else:
try:
result = result.astype(dtype, copy=False)
except TypeError:
# Missing copy kwarg
result = result.astype(dtype)
return result
def broadcast_to(x, shape, chunks=None, meta=None):
"""Broadcast an array to a new shape.
Parameters
----------
x : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
chunks : tuple, optional
If provided, then the result will use these chunks instead of the same
chunks as the source array. Setting chunks explicitly as part of
broadcast_to is more efficient than rechunking afterwards. Chunks are
only allowed to differ from the original shape along dimensions that
are new on the result or have size 1 the input array.
meta : empty ndarray
empty ndarray created with same NumPy backend, ndim and dtype as the
Dask Array being created (overrides dtype)
Returns
-------
broadcast : dask array
See Also
--------
:func:`numpy.broadcast_to`
"""
x = asarray(x)
shape = tuple(shape)
if meta is None:
meta = meta_from_array(x)
if x.shape == shape and (chunks is None or chunks == x.chunks):
return x
ndim_new = len(shape) - x.ndim
if ndim_new < 0 or any(
new != old for new, old in zip(shape[ndim_new:], x.shape) if old != 1
):
raise ValueError(f"cannot broadcast shape {x.shape} to shape {shape}")
if chunks is None:
chunks = tuple((s,) for s in shape[:ndim_new]) + tuple(
bd if old > 1 else (new,)
for bd, old, new in zip(x.chunks, x.shape, shape[ndim_new:])
)
else:
chunks = normalize_chunks(
chunks, shape, dtype=x.dtype, previous_chunks=x.chunks
)
for old_bd, new_bd in zip(x.chunks, chunks[ndim_new:]):
if old_bd != new_bd and old_bd != (1,):
raise ValueError(
"cannot broadcast chunks %s to chunks %s: "
"new chunks must either be along a new "
"dimension or a dimension of size 1" % (x.chunks, chunks)
)
name = "broadcast_to-" + tokenize(x, shape, chunks)
dsk = {}
enumerated_chunks = product(*(enumerate(bds) for bds in chunks))
for new_index, chunk_shape in (zip(*ec) for ec in enumerated_chunks):
old_index = tuple(
0 if bd == (1,) else i for bd, i in zip(x.chunks, new_index[ndim_new:])
)
old_key = (x.name,) + old_index
new_key = (name,) + new_index
dsk[new_key] = (np.broadcast_to, old_key, quote(chunk_shape))
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
return Array(graph, name, chunks, dtype=x.dtype, meta=meta)
@derived_from(np)
def broadcast_arrays(*args, subok=False):
subok = bool(subok)
to_array = asanyarray if subok else asarray
args = tuple(to_array(e) for e in args)
# Unify uneven chunking
inds = [list(reversed(range(x.ndim))) for x in args]
uc_args = concat(zip(args, inds))
_, args = unify_chunks(*uc_args, warn=False)
shape = broadcast_shapes(*(e.shape for e in args))
chunks = broadcast_chunks(*(e.chunks for e in args))
result = [broadcast_to(e, shape=shape, chunks=chunks) for e in args]
return result
def offset_func(func, offset, *args):
"""Offsets inputs by offset
>>> double = lambda x: x * 2
>>> f = offset_func(double, (10,))
>>> f(1)
22
>>> f(300)
620
"""
def _offset(*args):
args2 = list(map(add, args, offset))
return func(*args2)
with contextlib.suppress(Exception):
_offset.__name__ = "offset_" + func.__name__
return _offset
def chunks_from_arrays(arrays):
"""Chunks tuple from nested list of arrays
>>> x = np.array([1, 2])
>>> chunks_from_arrays([x, x])
((2, 2),)
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x], [x]])
((1, 1), (2,))
>>> x = np.array([[1, 2]])
>>> chunks_from_arrays([[x, x]])
((1,), (2, 2))
>>> chunks_from_arrays([1, 1])
((1, 1),)
"""
if not arrays:
return ()
result = []
dim = 0
def shape(x):
try:
return x.shape if x.shape else (1,)
except AttributeError:
return (1,)
while isinstance(arrays, (list, tuple)):
result.append(tuple(shape(deepfirst(a))[dim] for a in arrays))
arrays = arrays[0]
dim += 1
return tuple(result)
def deepfirst(seq):
"""First element in a nested list
>>> deepfirst([[[1, 2], [3, 4]], [5, 6], [7, 8]])
1
"""
if not isinstance(seq, (list, tuple)):
return seq
else:
return deepfirst(seq[0])
def shapelist(a):
"""Get the shape of nested list"""
if type(a) is list:
return tuple([len(a)] + list(shapelist(a[0])))
else:
return ()
def transposelist(arrays, axes, extradims=0):
"""Permute axes of nested list
>>> transposelist([[1,1,1],[1,1,1]], [2,1])
[[[1, 1], [1, 1], [1, 1]]]
>>> transposelist([[1,1,1],[1,1,1]], [2,1], extradims=1)
[[[[1], [1]], [[1], [1]], [[1], [1]]]]
"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
if extradims < 0:
raise ValueError("`newdims` should be positive")
if len(axes) > len(set(axes)):
raise ValueError("`axes` should be unique")
ndim = max(axes) + 1
shape = shapelist(arrays)
newshape = [
shape[axes.index(i)] if i in axes else 1 for i in range(ndim + extradims)
]
result = list(core.flatten(arrays))
return reshapelist(newshape, result)
def stack(seq, axis=0, allow_unknown_chunksizes=False):
"""
Stack arrays along a new axis
Given a sequence of dask arrays, form a new dask array by stacking them
along a new dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [da.from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
from dask.array import wrap
seq = [asarray(a, allow_unknown_chunksizes=allow_unknown_chunksizes) for a in seq]
if not seq:
raise ValueError("Need array(s) to stack")
if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):
idx = first(i for i in enumerate(seq) if i[1].shape != seq[0].shape)
raise ValueError(
"Stacked arrays must have the same shape. The first array had shape "
f"{seq[0].shape}, while array {idx[0] + 1} has shape {idx[1].shape}."
)
meta = np.stack([meta_from_array(a) for a in seq], axis=axis)
seq = [x.astype(meta.dtype) for x in seq]
ndim = meta.ndim - 1
if axis < 0:
axis = ndim + axis + 1
shape = tuple(
len(seq)
if i == axis
else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])
for i in range(meta.ndim)
)
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq2))
_, seq2 = unify_chunks(*uc_args)
assert len({a.chunks for a in seq2}) == 1 # same chunks
chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]
names = [a.name for a in seq2]
name = "stack-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [
(names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys
]
values = [
(
getitem,
inp,
(slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis),
)
for inp in inputs
]
layer = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
def concatenate3(arrays):
"""Recursive np.concatenate
Input should be a nested list of numpy arrays arranged in the order they
should appear in the array itself. Each array should have the same number
of dimensions as the desired output and the nesting of the lists.
>>> x = np.array([[1, 2]])
>>> concatenate3([[x, x, x], [x, x, x]])
array([[1, 2, 1, 2, 1, 2],
[1, 2, 1, 2, 1, 2]])
>>> concatenate3([[x, x], [x, x], [x, x]])
array([[1, 2, 1, 2],
[1, 2, 1, 2],
[1, 2, 1, 2]])
"""
# We need this as __array_function__ may not exist on older NumPy versions.
# And to reduce verbosity.
NDARRAY_ARRAY_FUNCTION = getattr(np.ndarray, "__array_function__", None)
arrays = concrete(arrays)
if not arrays:
return np.empty(0)
advanced = max(
core.flatten(arrays, container=(list, tuple)),
key=lambda x: getattr(x, "__array_priority__", 0),
)
if not all(
NDARRAY_ARRAY_FUNCTION
is getattr(type(arr), "__array_function__", NDARRAY_ARRAY_FUNCTION)
for arr in core.flatten(arrays, container=(list, tuple))
):
try:
x = unpack_singleton(arrays)
return _concatenate2(arrays, axes=tuple(range(x.ndim)))
except TypeError:
pass
if concatenate_lookup.dispatch(type(advanced)) is not np.concatenate:
x = unpack_singleton(arrays)
return _concatenate2(arrays, axes=list(range(x.ndim)))
ndim = ndimlist(arrays)
if not ndim:
return arrays
chunks = chunks_from_arrays(arrays)
shape = tuple(map(sum, chunks))
def dtype(x):
try:
return x.dtype
except AttributeError:
return type(x)
result = np.empty(shape=shape, dtype=dtype(deepfirst(arrays)))
for (idx, arr) in zip(
slices_from_chunks(chunks), core.flatten(arrays, container=(list, tuple))
):
if hasattr(arr, "ndim"):
while arr.ndim < ndim:
arr = arr[None, ...]
result[idx] = arr
return result
def concatenate_axes(arrays, axes):
"""Recursively call np.concatenate along axes"""
if len(axes) != ndimlist(arrays):
raise ValueError("Length of axes should equal depth of nested arrays")
extradims = max(0, deepfirst(arrays).ndim - (max(axes) + 1))
return concatenate3(transposelist(arrays, axes, extradims=extradims))
def to_hdf5(filename, *args, chunks=True, **kwargs):
"""Store arrays in HDF5 file
This saves several dask arrays into several datapaths in an HDF5 file.
It creates the necessary datasets and handles clean file opening/closing.
Parameters
----------
chunks: tuple or ``True``
Chunk shape, or ``True`` to pass the chunks from the dask array.
Defaults to ``True``.
Examples
--------
>>> da.to_hdf5('myfile.hdf5', '/x', x) # doctest: +SKIP
or
>>> da.to_hdf5('myfile.hdf5', {'/x': x, '/y': y}) # doctest: +SKIP
Optionally provide arguments as though to ``h5py.File.create_dataset``
>>> da.to_hdf5('myfile.hdf5', '/x', x, compression='lzf', shuffle=True) # doctest: +SKIP
>>> da.to_hdf5('myfile.hdf5', '/x', x, chunks=(10,20,30)) # doctest: +SKIP
This can also be used as a method on a single Array
>>> x.to_hdf5('myfile.hdf5', '/x') # doctest: +SKIP
See Also
--------
da.store
h5py.File.create_dataset
"""
if len(args) == 1 and isinstance(args[0], dict):
data = args[0]
elif len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Array):
data = {args[0]: args[1]}
else:
raise ValueError("Please provide {'/data/path': array} dictionary")
import h5py
with h5py.File(filename, mode="a") as f:
dsets = [
f.require_dataset(
dp,
shape=x.shape,
dtype=x.dtype,
chunks=tuple(c[0] for c in x.chunks) if chunks is True else chunks,
**kwargs,
)
for dp, x in data.items()
]
store(list(data.values()), dsets)
def interleave_none(a, b):
"""
>>> interleave_none([0, None, 2, None], [1, 3])
(0, 1, 2, 3)
"""
result = []
i = j = 0
n = len(a) + len(b)
while i + j < n:
if a[i] is not None:
result.append(a[i])
i += 1
else:
result.append(b[j])
i += 1
j += 1
return tuple(result)
def keyname(name, i, okey):
"""
>>> keyname('x', 3, [None, None, 0, 2])
('x', 3, 0, 2)
"""
return (name, i) + tuple(k for k in okey if k is not None)
def _vindex(x, *indexes):
"""Point wise indexing with broadcasting.
>>> x = np.arange(56).reshape((7, 8))
>>> x
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 53, 54, 55]])
>>> d = from_array(x, chunks=(3, 4))
>>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])
>>> result.compute()
array([ 0, 9, 48, 7])
"""
indexes = replace_ellipsis(x.ndim, indexes)
nonfancy_indexes = []
reduced_indexes = []
for ind in indexes:
if isinstance(ind, Number):
nonfancy_indexes.append(ind)
elif isinstance(ind, slice):
nonfancy_indexes.append(ind)
reduced_indexes.append(slice(None))
else:
nonfancy_indexes.append(slice(None))
reduced_indexes.append(ind)
nonfancy_indexes = tuple(nonfancy_indexes)
reduced_indexes = tuple(reduced_indexes)
x = x[nonfancy_indexes]
array_indexes = {}
for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):
if not isinstance(ind, slice):
ind = np.array(ind, copy=True)
if ind.dtype.kind == "b":
raise IndexError("vindex does not support indexing with boolean arrays")
if ((ind >= size) | (ind < -size)).any():
raise IndexError(
"vindex key has entries out of bounds for "
"indexing along axis %s of size %s: %r" % (i, size, ind)
)
ind %= size
array_indexes[i] = ind
if array_indexes:
x = _vindex_array(x, array_indexes)
return x
def _vindex_array(x, dict_indexes):
"""Point wise indexing with only NumPy Arrays."""
try:
broadcast_indexes = np.broadcast_arrays(*dict_indexes.values())
except ValueError as e:
# note: error message exactly matches numpy
shapes_str = " ".join(str(a.shape) for a in dict_indexes.values())
raise IndexError(
"shape mismatch: indexing arrays could not be "
"broadcast together with shapes " + shapes_str
) from e
broadcast_shape = broadcast_indexes[0].shape
lookup = dict(zip(dict_indexes, broadcast_indexes))
flat_indexes = [
lookup[i].ravel().tolist() if i in lookup else None for i in range(x.ndim)
]
flat_indexes.extend([None] * (x.ndim - len(flat_indexes)))
flat_indexes = [
list(index) if index is not None else index for index in flat_indexes
]
bounds = [list(accumulate(add, (0,) + c)) for c in x.chunks]
bounds2 = [b for i, b in zip(flat_indexes, bounds) if i is not None]
axis = _get_axis(flat_indexes)
token = tokenize(x, flat_indexes)
out_name = "vindex-merge-" + token
points = list()
for i, idx in enumerate(zip(*[i for i in flat_indexes if i is not None])):
block_idx = [bisect(b, ind) - 1 for b, ind in zip(bounds2, idx)]
inblock_idx = [
ind - bounds2[k][j] for k, (ind, j) in enumerate(zip(idx, block_idx))
]
points.append((i, tuple(block_idx), tuple(inblock_idx)))
chunks = [c for i, c in zip(flat_indexes, x.chunks) if i is None]
chunks.insert(0, (len(points),) if points else (0,))
chunks = tuple(chunks)
if points:
per_block = groupby(1, points)
per_block = {k: v for k, v in per_block.items() if v}
other_blocks = list(
product(
*[
list(range(len(c))) if i is None else [None]
for i, c in zip(flat_indexes, x.chunks)
]
)
)
full_slices = [slice(None, None) if i is None else None for i in flat_indexes]
name = "vindex-slice-" + token
vindex_merge_name = "vindex-merge-" + token
dsk = {}
for okey in other_blocks:
for i, key in enumerate(per_block):
dsk[keyname(name, i, okey)] = (
_vindex_transpose,
(
_vindex_slice,
(x.name,) + interleave_none(okey, key),
interleave_none(
full_slices, list(zip(*pluck(2, per_block[key])))
),
),
axis,
)
dsk[keyname(vindex_merge_name, 0, okey)] = (
_vindex_merge,
[list(pluck(0, per_block[key])) for key in per_block],
[keyname(name, i, okey) for i in range(len(per_block))],
)
result_1d = Array(
HighLevelGraph.from_collections(out_name, dsk, dependencies=[x]),
out_name,
chunks,
x.dtype,
meta=x._meta,
)
return result_1d.reshape(broadcast_shape + result_1d.shape[1:])
# output has a zero dimension, just create a new zero-shape array with the
# same dtype
from dask.array.wrap import empty
result_1d = empty(
tuple(map(sum, chunks)), chunks=chunks, dtype=x.dtype, name=out_name
)
return result_1d.reshape(broadcast_shape + result_1d.shape[1:])
def _get_axis(indexes):
"""Get axis along which point-wise slicing results lie
This is mostly a hack because I can't figure out NumPy's rule on this and
can't be bothered to go reading.
>>> _get_axis([[1, 2], None, [1, 2], None])
0
>>> _get_axis([None, [1, 2], [1, 2], None])
1
>>> _get_axis([None, None, [1, 2], [1, 2]])
2
"""
ndim = len(indexes)
indexes = [slice(None, None) if i is None else [0] for i in indexes]
x = np.empty((2,) * ndim)
x2 = x[tuple(indexes)]
return x2.shape.index(1)
def _vindex_slice(block, points):
"""Pull out point-wise slices from block"""
points = [p if isinstance(p, slice) else list(p) for p in points]
return block[tuple(points)]
def _vindex_transpose(block, axis):
"""Rotate block so that points are on the first dimension"""
axes = [axis] + list(range(axis)) + list(range(axis + 1, block.ndim))
return block.transpose(axes)
def _vindex_merge(locations, values):
"""
>>> locations = [0], [2, 1]
>>> values = [np.array([[1, 2, 3]]),
... np.array([[10, 20, 30], [40, 50, 60]])]
>>> _vindex_merge(locations, values)
array([[ 1, 2, 3],
[40, 50, 60],
[10, 20, 30]])
"""
locations = list(map(list, locations))
values = list(values)
n = sum(map(len, locations))
shape = list(values[0].shape)
shape[0] = n
shape = tuple(shape)
dtype = values[0].dtype
x = np.empty_like(values[0], dtype=dtype, shape=shape)
ind = [slice(None, None) for i in range(x.ndim)]
for loc, val in zip(locations, values):
ind[0] = loc
x[tuple(ind)] = val
return x
def to_npy_stack(dirname, x, axis=0):
"""Write dask array to a stack of .npy files
This partitions the dask.array along one axis and stores each block along
that axis as a single .npy file in the specified directory
Examples
--------
>>> x = da.ones((5, 10, 10), chunks=(2, 4, 4)) # doctest: +SKIP
>>> da.to_npy_stack('data/', x, axis=0) # doctest: +SKIP
The ``.npy`` files store numpy arrays for ``x[0:2], x[2:4], and x[4:5]``
respectively, as is specified by the chunk size along the zeroth axis::
$ tree data/
data/
|-- 0.npy
|-- 1.npy
|-- 2.npy
|-- info
The ``info`` file stores the dtype, chunks, and axis information of the array.
You can load these stacks with the :func:`dask.array.from_npy_stack` function.
>>> y = da.from_npy_stack('data/') # doctest: +SKIP
See Also
--------
from_npy_stack
"""
chunks = tuple((c if i == axis else (sum(c),)) for i, c in enumerate(x.chunks))
xx = x.rechunk(chunks)
if not os.path.exists(dirname):
os.mkdir(dirname)
meta = {"chunks": chunks, "dtype": x.dtype, "axis": axis}
with open(os.path.join(dirname, "info"), "wb") as f:
pickle.dump(meta, f)
name = "to-npy-stack-" + str(uuid.uuid1())
dsk = {
(name, i): (np.save, os.path.join(dirname, "%d.npy" % i), key)
for i, key in enumerate(core.flatten(xx.__dask_keys__()))
}
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[xx])
compute_as_if_collection(Array, graph, list(dsk))
def from_npy_stack(dirname, mmap_mode="r"):
"""Load dask array from stack of npy files
Parameters
----------
dirname: string
Directory of .npy files
mmap_mode: (None or 'r')
Read data in memory map mode
See Also
--------
to_npy_stack
"""
with open(os.path.join(dirname, "info"), "rb") as f:
info = pickle.load(f)
dtype = info["dtype"]
chunks = info["chunks"]
axis = info["axis"]
name = "from-npy-stack-%s" % dirname
keys = list(product([name], *[range(len(c)) for c in chunks]))
values = [
(np.load, os.path.join(dirname, "%d.npy" % i), mmap_mode)
for i in range(len(chunks[axis]))
]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype)
def new_da_object(dsk, name, chunks, meta=None, dtype=None):
"""Generic constructor for dask.array or dask.dataframe objects.
Decides the appropriate output class based on the type of `meta` provided.
"""
if is_dataframe_like(meta) or is_series_like(meta) or is_index_like(meta):
from dask.dataframe.core import new_dd_object
assert all(len(c) == 1 for c in chunks[1:])
divisions = [None] * (len(chunks[0]) + 1)
return new_dd_object(dsk, name, meta, divisions)
else:
return Array(dsk, name=name, chunks=chunks, meta=meta, dtype=dtype)
class BlockView:
"""An array-like interface to the blocks of an array.
``BlockView`` provides an array-like interface
to the blocks of a dask array. Numpy-style indexing of a
``BlockView`` returns a selection of blocks as a new dask array.
You can index ``BlockView`` like a numpy array of shape
equal to the number of blocks in each dimension, (available as
array.blocks.size). The dimensionality of the output array matches
the dimension of this array, even if integer indices are passed.
Slicing with ``np.newaxis`` or multiple lists is not supported.
Examples
--------
>>> import dask.array as da
>>> from dask.array.core import BlockView
>>> x = da.arange(8, chunks=2)
>>> bv = BlockView(x)
>>> bv.shape # aliases x.numblocks
(4,)
>>> bv.size
4
>>> bv[0].compute()
array([0, 1])
>>> bv[:3].compute()
array([0, 1, 2, 3, 4, 5])
>>> bv[::2].compute()
array([0, 1, 4, 5])
>>> bv[[-1, 0]].compute()
array([6, 7, 0, 1])
>>> bv.ravel() # doctest: +NORMALIZE_WHITESPACE
[dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>,
dask.array<blocks, shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>]
Returns
-------
An instance of ``da.array.Blockview``
"""
def __init__(self, array: Array):
self._array = array
def __getitem__(self, index: Any) -> Array:
from dask.array.slicing import normalize_index
if not isinstance(index, tuple):
index = (index,)
if sum(isinstance(ind, (np.ndarray, list)) for ind in index) > 1:
raise ValueError("Can only slice with a single list")
if any(ind is None for ind in index):
raise ValueError("Slicing with np.newaxis or None is not supported")
index = normalize_index(index, self._array.numblocks)
index = tuple(
slice(k, k + 1) if isinstance(k, Number) else k # type: ignore
for k in index
)
name = "blocks-" + tokenize(self._array, index)
new_keys = self._array._key_array[index]
chunks = tuple(
tuple(np.array(c)[i].tolist()) for c, i in zip(self._array.chunks, index)
)
keys = product(*(range(len(c)) for c in chunks))
layer = {(name,) + key: tuple(new_keys[key].tolist()) for key in keys}
graph = HighLevelGraph.from_collections(name, layer, dependencies=[self._array])
return Array(graph, name, chunks, meta=self._array)
def __eq__(self, other: Any) -> bool:
if isinstance(other, BlockView):
return self._array is other._array
else:
return NotImplemented
@property
def size(self) -> int:
"""
The total number of blocks in the array.
"""
return math.prod(self.shape)
@property
def shape(self) -> tuple[int, ...]:
"""
The number of blocks per axis. Alias of ``dask.array.numblocks``.
"""
return self._array.numblocks
def ravel(self) -> list[Array]:
"""
Return a flattened list of all the blocks in the array in C order.
"""
return [self[idx] for idx in np.ndindex(self.shape)]
from dask.array.blockwise import blockwise
from dask.array.utils import compute_meta, meta_from_array
| bsd-3-clause | 5cad56ae8e5ac4859e78364827e91c18 | 31.73409 | 115 | 0.578244 | 3.863578 | false | false | false | false |
dask/dask | dask/diagnostics/profile_visualize.py | 2 | 15882 | import random
import warnings
from bisect import bisect_left
from itertools import cycle
from operator import add, itemgetter
from tlz import accumulate, groupby, pluck, unique
from dask.core import istask
from dask.utils import apply, funcname, import_required
def BOKEH_VERSION():
import bokeh
from packaging.version import parse as parse_version
return parse_version(bokeh.__version__)
_BOKEH_MISSING_MSG = "Diagnostics plots require `bokeh` to be installed"
def unquote(expr):
if istask(expr):
if expr[0] in (tuple, list, set):
return expr[0](map(unquote, expr[1]))
elif (
expr[0] == dict
and isinstance(expr[1], list)
and isinstance(expr[1][0], list)
):
return dict(map(unquote, expr[1]))
return expr
def pprint_task(task, keys, label_size=60):
"""Return a nicely formatted string for a task.
Parameters
----------
task:
Value within dask graph to render as text
keys: iterable
List of keys within dask graph
label_size: int (optional)
Maximum size of output label, defaults to 60
Examples
--------
>>> from operator import add, mul
>>> dsk = {'a': 1,
... 'b': 2,
... 'c': (add, 'a', 'b'),
... 'd': (add, (mul, 'a', 'b'), 'c'),
... 'e': (sum, ['a', 'b', 5]),
... 'f': (add,),
... 'g': []}
>>> pprint_task(dsk['c'], dsk)
'add(_, _)'
>>> pprint_task(dsk['d'], dsk)
'add(mul(_, _), _)'
>>> pprint_task(dsk['e'], dsk)
'sum([_, _, *])'
>>> pprint_task(dsk['f'], dsk)
'add()'
>>> pprint_task(dsk['g'], dsk)
'[]'
"""
if istask(task):
func = task[0]
if func is apply:
head = funcname(task[1])
tail = ")"
args = unquote(task[2]) if len(task) > 2 else ()
kwargs = unquote(task[3]) if len(task) > 3 else {}
else:
if hasattr(func, "funcs"):
head = "(".join(funcname(f) for f in func.funcs)
tail = ")" * len(func.funcs)
else:
head = funcname(task[0])
tail = ")"
args = task[1:]
kwargs = {}
if args or kwargs:
label_size2 = int(
(label_size - len(head) - len(tail)) // (len(args) + len(kwargs))
)
pprint = lambda t: pprint_task(t, keys, label_size2)
if args:
if label_size2 > 5:
args = ", ".join(pprint(t) for t in args)
else:
args = "..."
else:
args = ""
if kwargs:
if label_size2 > 5:
kwargs = ", " + ", ".join(
f"{k}={pprint(v)}" for k, v in sorted(kwargs.items())
)
else:
kwargs = ", ..."
else:
kwargs = ""
return f"{head}({args}{kwargs}{tail}"
elif isinstance(task, list):
if not task:
return "[]"
elif len(task) > 3:
result = pprint_task(task[:3], keys, label_size)
return result[:-1] + ", ...]"
else:
label_size2 = int((label_size - 2 - 2 * len(task)) // len(task))
args = ", ".join(pprint_task(t, keys, label_size2) for t in task)
return f"[{args}]"
else:
try:
if task in keys:
return "_"
else:
return "*"
except TypeError:
return "*"
def get_colors(palette, funcs):
"""Get a dict mapping funcs to colors from palette.
Parameters
----------
palette : string
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
funcs : iterable
Iterable of function names
"""
palettes = import_required("bokeh.palettes", _BOKEH_MISSING_MSG)
unique_funcs = sorted(unique(funcs))
n_funcs = len(unique_funcs)
palette_lookup = palettes.all_palettes[palette]
keys = list(sorted(palette_lookup.keys()))
index = keys[min(bisect_left(keys, n_funcs), len(keys) - 1)]
palette = palette_lookup[index]
# Some bokeh palettes repeat colors, we want just the unique set
palette = list(unique(palette))
if len(palette) > n_funcs:
# Consistently shuffle palette - prevents just using low-range
random.Random(42).shuffle(palette)
color_lookup = dict(zip(unique_funcs, cycle(palette)))
return [color_lookup[n] for n in funcs]
def visualize(
profilers, filename="profile.html", show=True, save=None, mode=None, **kwargs
):
"""Visualize the results of profiling in a bokeh plot.
If multiple profilers are passed in, the plots are stacked vertically.
Parameters
----------
profilers : profiler or list
Profiler or list of profilers.
filename : string, optional
Name of the plot output file.
show : boolean, optional
If True (default), the plot is opened in a browser.
save : boolean, optional
If True (default when not in notebook), the plot is saved to disk.
mode : str, optional
Mode passed to bokeh.output_file()
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required("bokeh.plotting", _BOKEH_MISSING_MSG)
from bokeh.io import state
if "file_path" in kwargs:
warnings.warn(
"The file_path keyword argument is deprecated "
"and will be removed in a future release. "
"Please use filename instead.",
category=FutureWarning,
stacklevel=2,
)
filename = kwargs.pop("file_path")
if save is None:
save = not state.curstate().notebook
if not isinstance(profilers, list):
profilers = [profilers]
figs = [prof._plot(**kwargs) for prof in profilers]
# Stack the plots
if len(figs) == 1:
p = figs[0]
else:
top = figs[0]
for f in figs[1:]:
f.x_range = top.x_range
f.title = None
f.min_border_top = 20
if BOKEH_VERSION().major < 3:
f.plot_height -= 30
else:
f.height -= 30
for f in figs[:-1]:
f.xaxis.axis_label = None
f.min_border_bottom = 20
if BOKEH_VERSION().major < 3:
f.plot_height -= 30
else:
f.height -= 30
for f in figs:
f.min_border_left = 75
f.min_border_right = 75
p = bp.gridplot([[f] for f in figs])
if show:
bp.show(p)
if save:
bp.output_file(filename, mode=mode)
bp.save(p)
return p
def plot_tasks(results, dsk, palette="Viridis", label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of Profiler.results
dsk : dict
The dask graph being profiled.
palette : string, optional
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required("bokeh.plotting", _BOKEH_MISSING_MSG)
from bokeh.models import HoverTool
defaults = dict(
title="Profile Results",
tools="hover,save,reset,xwheel_zoom,xpan",
toolbar_location="above",
width=800,
height=300,
)
# Support plot_width and plot_height for backwards compatibility
if "plot_width" in kwargs:
kwargs["width"] = kwargs.pop("plot_width")
if "plot_height" in kwargs:
kwargs["height"] = kwargs.pop("plot_height")
defaults.update(**kwargs)
if results:
keys, tasks, starts, ends, ids = zip(*results)
id_group = groupby(itemgetter(4), results)
timings = {
k: [i.end_time - i.start_time for i in v] for (k, v) in id_group.items()
}
id_lk = {
t[0]: n
for (n, t) in enumerate(
sorted(timings.items(), key=itemgetter(1), reverse=True)
)
}
left = min(starts)
right = max(ends)
p = bp.figure(
y_range=[str(i) for i in range(len(id_lk))],
x_range=[0, right - left],
**defaults,
)
data = {}
data["width"] = width = [e - s for (s, e) in zip(starts, ends)]
data["x"] = [w / 2 + s - left for (w, s) in zip(width, starts)]
data["y"] = [id_lk[i] + 1 for i in ids]
data["function"] = funcs = [pprint_task(i, dsk, label_size) for i in tasks]
data["color"] = get_colors(palette, funcs)
data["key"] = [str(i) for i in keys]
source = bp.ColumnDataSource(data=data)
p.rect(
source=source,
x="x",
y="y",
height=1,
width="width",
color="color",
line_color="gray",
)
else:
p = bp.figure(y_range=[str(i) for i in range(8)], x_range=[0, 10], **defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Worker ID"
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Key:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@key</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@function</span>
</div>
"""
hover.point_policy = "follow_mouse"
return p
def plot_resources(results, palette="Viridis", **kwargs):
"""Plot resource usage in a bokeh plot.
Parameters
----------
results : sequence
Output of ResourceProfiler.results
palette : string, optional
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by plot_resources.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required("bokeh.plotting", _BOKEH_MISSING_MSG)
from bokeh import palettes
from bokeh.models import LinearAxis, Range1d
defaults = dict(
title="Profile Results",
tools="save,reset,xwheel_zoom,xpan",
toolbar_location="above",
width=800,
height=300,
)
# Support plot_width and plot_height for backwards compatibility
if "plot_width" in kwargs:
kwargs["width"] = kwargs.pop("plot_width")
if BOKEH_VERSION().major >= 3:
warnings.warn("Use width instead of plot_width with Bokeh >= 3")
if "plot_height" in kwargs:
kwargs["height"] = kwargs.pop("plot_height")
if BOKEH_VERSION().major >= 3:
warnings.warn("Use height instead of plot_height with Bokeh >= 3")
# Drop `label_size` to match `plot_cache` and `plot_tasks` kwargs
if "label_size" in kwargs:
kwargs.pop("label_size")
defaults.update(**kwargs)
if results:
t, mem, cpu = zip(*results)
left, right = min(t), max(t)
t = [i - left for i in t]
p = bp.figure(
y_range=fix_bounds(0, max(cpu), 100),
x_range=fix_bounds(0, right - left, 1),
**defaults,
)
else:
t = mem = cpu = []
p = bp.figure(y_range=(0, 100), x_range=(0, 1), **defaults)
colors = palettes.all_palettes[palette][6]
p.line(
t,
cpu,
color=colors[0],
line_width=4,
legend_label="% CPU",
)
p.yaxis.axis_label = "% CPU"
p.extra_y_ranges = {
"memory": Range1d(
*fix_bounds(min(mem) if mem else 0, max(mem) if mem else 100, 100)
)
}
p.line(
t,
mem,
color=colors[2],
y_range_name="memory",
line_width=4,
legend_label="Memory",
)
p.add_layout(LinearAxis(y_range_name="memory", axis_label="Memory (MB)"), "right")
p.xaxis.axis_label = "Time (s)"
return p
def fix_bounds(start, end, min_span):
"""Adjust end point to ensure span of at least `min_span`"""
return start, max(end, start + min_span)
def plot_cache(
results, dsk, start_time, metric_name, palette="Viridis", label_size=60, **kwargs
):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of CacheProfiler.results
dsk : dict
The dask graph being profiled.
start_time : float
Start time of the profile.
metric_name : string
Metric used to measure cache size
palette : string, optional
Name of the bokeh palette to use, must be a member of
bokeh.palettes.all_palettes.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
bp = import_required("bokeh.plotting", _BOKEH_MISSING_MSG)
from bokeh.models import HoverTool
defaults = dict(
title="Profile Results",
tools="hover,save,reset,wheel_zoom,xpan",
toolbar_location="above",
width=800,
height=300,
)
# Support plot_width and plot_height for backwards compatibility
if "plot_width" in kwargs:
kwargs["width"] = kwargs.pop("plot_width")
if BOKEH_VERSION().major >= 3:
warnings.warn("Use width instead of plot_width with Bokeh >= 3")
if "plot_height" in kwargs:
kwargs["height"] = kwargs.pop("plot_height")
if BOKEH_VERSION().major >= 3:
warnings.warn("Use height instead of plot_height with Bokeh >= 3")
defaults.update(**kwargs)
if results:
starts, ends = list(zip(*results))[3:]
tics = sorted(unique(starts + ends))
groups = groupby(lambda d: pprint_task(d[1], dsk, label_size), results)
data = {}
for k, vals in groups.items():
cnts = dict.fromkeys(tics, 0)
for v in vals:
cnts[v.cache_time] += v.metric
cnts[v.free_time] -= v.metric
data[k] = [0] + list(accumulate(add, pluck(1, sorted(cnts.items()))))
tics = [0] + [i - start_time for i in tics]
p = bp.figure(x_range=[0, max(tics)], **defaults)
for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):
p.line(
"x",
"y",
line_color=color,
line_width=3,
source=bp.ColumnDataSource(
{"x": tics, "y": val, "label": [key for i in val]}
),
)
else:
p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)
p.yaxis.axis_label = f"Cache Size ({metric_name})"
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@label</span>
</div>
"""
return p
| bsd-3-clause | eadeadcd77296c97d930ce09911b628a | 29.838835 | 87 | 0.545649 | 3.699511 | false | false | false | false |
rapidpro/casepro | casepro/cases/migrations/0007_outgoing.py | 2 | 1351 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("orgs", "0008_org_timezone"),
("cases", "0006_auto_20150508_0912"),
]
operations = [
migrations.CreateModel(
name="Outgoing",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("broadcast_id", models.IntegerField()),
("created_on", models.DateTimeField(db_index=True)),
(
"case",
models.ForeignKey(related_name="outgoing", to="cases.Case", null=True, on_delete=models.PROTECT),
),
(
"created_by",
models.ForeignKey(related_name="outgoing", to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT),
),
(
"org",
models.ForeignKey(
related_name="outgoing", verbose_name="Organization", to="orgs.Org", on_delete=models.PROTECT
),
),
],
)
]
| bsd-3-clause | 0aac434667d0d4f673b4bb6cd805b583 | 33.641026 | 118 | 0.510733 | 4.67474 | false | false | false | false |
rapidpro/casepro | casepro/msgs/views.py | 1 | 25232 | import logging
from collections import defaultdict
import iso639
from dash.orgs.views import OrgObjPermsMixin, OrgPermsMixin
from el_pagination.paginators import LazyPaginator
from smartmin.csv_imports.models import ImportTask
from smartmin.mixins import NonAtomicMixin
from smartmin.views import (
SmartCreateView,
SmartCRUDL,
SmartCSVImportView,
SmartDeleteView,
SmartListView,
SmartReadView,
SmartTemplateView,
SmartUpdateView,
)
from temba_client.utils import parse_iso8601
from django import forms
from django.core.validators import FileExtensionValidator
from django.http import HttpResponse, HttpResponseBadRequest, JsonResponse
from django.urls import reverse
from django.utils.timesince import timesince
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from casepro.rules.mixins import RuleFormMixin
from casepro.statistics.models import DailyCount
from casepro.utils import JSONEncoder, month_range, str_to_bool
from casepro.utils.export import BaseDownloadView
from .forms import FaqForm, LabelForm
from .models import FAQ, Label, Message, MessageExport, MessageFolder, Outgoing, OutgoingFolder, ReplyExport
from .tasks import message_export, reply_export
RESPONSE_DELAY_WARN_SECONDS = 24 * 60 * 60 # show response delays > 1 day as warning
logger = logging.getLogger(__name__)
# Override the ImportTask start method so we can use our self-defined task
def override_start(self, org): # pragma: no cover
from .tasks import faq_csv_import
self.log("Queued import at %s" % now())
self.save(update_fields=("import_log",))
# trigger task
result = faq_csv_import.delay(org.id, self.id)
self.task_id = result.task_id
self.save(update_fields=("task_id",))
ImportTask.start = override_start
class LabelCRUDL(SmartCRUDL):
actions = ("create", "update", "read", "delete", "list", "watch", "unwatch")
model = Label
class Create(RuleFormMixin, OrgPermsMixin, SmartCreateView):
form_class = LabelForm
def get_form_kwargs(self):
kwargs = super(LabelCRUDL.Create, self).get_form_kwargs()
kwargs["org"] = self.request.org
kwargs["is_create"] = True
return kwargs
def derive_initial(self):
# label created manually in casepro aren't synced by default
initial = super(LabelCRUDL.Create, self).derive_initial()
initial["is_synced"] = False
return initial
def save(self, obj):
data = self.form.cleaned_data
org = self.request.org
name = data["name"]
description = data["description"]
tests = self.construct_tests()
is_synced = data["is_synced"]
self.object = Label.create(org, name, description, tests, is_synced)
def get_success_url(self):
return reverse("msgs.label_read", args=[self.object.pk])
class Update(RuleFormMixin, OrgObjPermsMixin, SmartUpdateView):
form_class = LabelForm
success_url = "id@msgs.label_read"
def get_form_kwargs(self):
kwargs = super(LabelCRUDL.Update, self).get_form_kwargs()
kwargs["org"] = self.request.org
kwargs["is_create"] = False
return kwargs
def post_save(self, obj):
obj = super(LabelCRUDL.Update, self).post_save(obj)
tests = self.construct_tests()
obj.update_tests(tests)
return obj
class Read(OrgObjPermsMixin, SmartReadView):
def get_queryset(self):
return Label.get_all(self.request.org, self.request.user)
def get_context_data(self, **kwargs):
context = super(LabelCRUDL.Read, self).get_context_data(**kwargs)
# augment usual label JSON
label_json = self.object.as_json()
label_json["watching"] = self.object.is_watched_by(self.request.user)
# angular app requires context data in JSON format
context["context_data_json"] = {"label": label_json}
context["rule_tests"] = self.object.rule.get_tests_description() if self.object.rule else ""
return context
class Delete(OrgObjPermsMixin, SmartDeleteView):
cancel_url = "@msgs.label_list"
def post(self, request, *args, **kwargs):
label = self.get_object()
label.release()
return HttpResponse(status=204)
class List(OrgPermsMixin, SmartListView):
def get(self, request, *args, **kwargs):
with_activity = str_to_bool(self.request.GET.get("with_activity", ""))
labels = list(Label.get_all(self.request.org, self.request.user).order_by("name"))
Label.bulk_cache_initialize(labels)
if with_activity:
# get message statistics
this_month = DailyCount.get_by_label(labels, DailyCount.TYPE_INCOMING, *month_range(0)).scope_totals()
last_month = DailyCount.get_by_label(labels, DailyCount.TYPE_INCOMING, *month_range(-1)).scope_totals()
def as_json(label):
obj = label.as_json()
if with_activity:
obj["activity"] = {"this_month": this_month.get(label, 0), "last_month": last_month.get(label, 0)}
return obj
return JsonResponse({"results": [as_json(l) for l in labels]})
class Watch(OrgObjPermsMixin, SmartReadView):
"""
Endpoint for watching a label
"""
permission = "msgs.label_read"
def post(self, request, *args, **kwargs):
self.get_object().watch(request.user)
return HttpResponse(status=204)
class Unwatch(OrgObjPermsMixin, SmartReadView):
"""
Endpoint for unwatching a label
"""
permission = "msgs.label_read"
def post(self, request, *args, **kwargs):
self.get_object().unwatch(request.user)
return HttpResponse(status=204)
class MessageSearchMixin(object):
def derive_search(self):
"""
Collects and prepares message search parameters into JSON serializable dict
"""
folder = MessageFolder[self.request.GET["folder"]]
if folder == MessageFolder.flagged and str_to_bool(self.request.GET.get("archived", "")):
folder = MessageFolder.flagged_with_archived
label_id = self.request.GET.get("label", None)
text = self.request.GET.get("text", None)
contact_id = self.request.GET.get("contact", None)
after = parse_iso8601(self.request.GET.get("after", None))
before = parse_iso8601(self.request.GET.get("before", None))
return {
"folder": folder,
"label": label_id,
"text": text,
"contact": contact_id,
"after": after,
"before": before,
}
class MessageCRUDL(SmartCRUDL):
actions = ("search", "lock", "action", "label", "bulk_reply", "forward", "history")
model = Message
class Search(OrgPermsMixin, MessageSearchMixin, SmartTemplateView):
"""
JSON endpoint for fetching incoming messages
"""
page_size = 50
def get_messages(self, search, last_refresh=None):
org = self.request.org
user = self.request.user
queryset = Message.search(org, user, search, modified_after=last_refresh, all=False)
return queryset.prefetch_related("contact", "labels", "case__assignee", "case__user_assignee")
def get_context_data(self, **kwargs):
context = super(MessageCRUDL.Search, self).get_context_data(**kwargs)
page = int(self.request.GET.get("page", 1))
last_refresh = self.request.GET.get("last_refresh")
search = self.derive_search()
# this is a refresh of new and modified messages
if last_refresh:
messages = self.get_messages(search, last_refresh)
# don't use paging for these messages
context["object_list"] = list(messages)
context["has_more"] = False
else:
messages = self.get_messages(search)
paginator = LazyPaginator(messages, per_page=self.page_size)
context["object_list"] = paginator.page(page)
context["has_more"] = paginator.num_pages > page
return context
def render_to_response(self, context, **response_kwargs):
results = []
for m in context["object_list"]:
msg = m.as_json()
msg["lock"] = m.get_lock(self.request.user)
results.append(msg)
return JsonResponse({"results": results, "has_more": context["has_more"]}, encoder=JSONEncoder)
class Lock(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for updating messages with a date and user id.
Takes a list of message ids.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/lock/(?P<action>\w+)/$"
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
action = kwargs["action"]
message_ids = request.json["messages"]
messages = org.incoming_messages.filter(org=org, backend_id__in=message_ids)
lock_messages = []
if action == "lock":
for message in messages:
if message.get_lock(request.user):
lock_messages.append(message.backend_id)
if not lock_messages:
for message in messages:
message.user_lock(user)
elif action == "unlock":
for message in messages:
message.user_unlock()
else: # pragma: no cover
return HttpResponseBadRequest("Invalid action: %s", action)
return JsonResponse({"messages": lock_messages}, encoder=JSONEncoder)
class Action(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for bulk message actions. Takes a list of message ids.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/action/(?P<action>\w+)/$"
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
action = kwargs["action"]
message_ids = request.json["messages"]
messages = org.incoming_messages.filter(org=org, backend_id__in=message_ids)
label_id = request.json.get("label")
label = Label.get_all(org, user).get(pk=label_id) if label_id else None
if action == "flag":
Message.bulk_flag(org, user, messages)
elif action == "unflag":
Message.bulk_unflag(org, user, messages)
elif action == "label":
Message.bulk_label(org, user, messages, label)
elif action == "unlabel":
Message.bulk_unlabel(org, user, messages, label)
elif action == "archive":
Message.bulk_archive(org, user, messages)
elif action == "restore":
Message.bulk_restore(org, user, messages)
else: # pragma: no cover
return HttpResponseBadRequest("Invalid action: %s", action)
return HttpResponse(status=204)
class Label(OrgPermsMixin, SmartTemplateView):
"""
AJAX endpoint for labelling a message.
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/label/(?P<id>\d+)/$"
def post(self, request, *args, **kwargs):
org = request.org
user = request.user
user_labels = Label.get_all(self.org, user)
message_id = int(kwargs["id"])
message = org.incoming_messages.filter(org=org, backend_id=message_id).first()
label_ids = request.json["labels"]
specified_labels = list(user_labels.filter(pk__in=label_ids))
# user can't remove labels that they can't see
unseen_labels = [l for l in message.labels.all() if l not in user_labels]
message.update_labels(user, specified_labels + unseen_labels)
return HttpResponse(status=204)
class BulkReply(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for bulk messages replies
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/bulk_reply/$"
def post(self, request, *args, **kwargs):
text = request.json["text"]
message_ids = request.json["messages"]
messages = Message.objects.filter(org=request.org, backend_id__in=message_ids).select_related("contact")
# organize messages by contact
messages_by_contact = defaultdict(list)
for msg in messages:
messages_by_contact[msg.contact].append(msg)
# the actual message that will be replied to is the oldest selected message for each contact
reply_tos = []
for contact, contact_messages in messages_by_contact.items():
contact_messages = sorted(contact_messages, key=lambda m: m.created_on, reverse=True)
reply_tos.append(contact_messages[0])
outgoing = Outgoing.create_bulk_replies(request.org, request.user, text, reply_tos)
return JsonResponse({"messages": len(outgoing)})
class Forward(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for forwarding a message to a URN
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/forward/(?P<id>\d+)/$"
def post(self, request, *args, **kwargs):
text = request.json["text"]
message = Message.objects.get(org=request.org, backend_id=int(kwargs["id"]))
urns = request.json["urns"]
outgoing = Outgoing.create_forwards(request.org, request.user, text, urns, message)
return JsonResponse({"messages": len(outgoing)})
class History(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for fetching message history. Takes a message backend id
"""
@classmethod
def derive_url_pattern(cls, path, action):
return r"^message/history/(?P<id>\d+)/$"
def get(self, request, *args, **kwargs):
message = Message.objects.get(org=request.org, backend_id=int(kwargs["id"]))
actions = [a.as_json() for a in message.get_history()]
return JsonResponse({"actions": actions}, encoder=JSONEncoder)
class MessageExportCRUDL(SmartCRUDL):
model = MessageExport
actions = ("create", "read")
class Create(NonAtomicMixin, OrgPermsMixin, MessageSearchMixin, SmartCreateView):
def post(self, request, *args, **kwargs):
search = self.derive_search()
export = MessageExport.create(self.request.org, self.request.user, search)
message_export.delay(export.pk)
return JsonResponse({"export_id": export.pk})
class Read(BaseDownloadView):
title = _("Download Messages")
filename = "message_export.xls"
class ReplySearchMixin(object):
def derive_search(self):
"""
Collects and prepares reply search parameters into JSON serializable dict
"""
params = self.request.GET
partner = params.get("partner")
after = parse_iso8601(params.get("after"))
before = parse_iso8601(params.get("before"))
return {"partner": partner, "after": after, "before": before}
class OutgoingCRUDL(SmartCRUDL):
actions = ("search", "search_replies")
model = Outgoing
class Search(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for fetching outgoing messages
"""
def derive_search(self):
folder = OutgoingFolder[self.request.GET["folder"]]
text = self.request.GET.get("text", None)
contact = self.request.GET.get("contact", None)
return {"folder": folder, "text": text, "contact": contact}
def get_context_data(self, **kwargs):
context = super(OutgoingCRUDL.Search, self).get_context_data(**kwargs)
org = self.request.org
user = self.request.user
page = int(self.request.GET.get("page", 1))
search = self.derive_search()
messages = Outgoing.search(org, user, search)
paginator = LazyPaginator(messages, per_page=50)
context["object_list"] = paginator.page(page)
context["has_more"] = paginator.num_pages > page
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse(
{"results": [m.as_json() for m in context["object_list"]], "has_more": context["has_more"]},
encoder=JSONEncoder,
)
class SearchReplies(OrgPermsMixin, ReplySearchMixin, SmartTemplateView):
"""
JSON endpoint to fetch replies made by users
"""
def get(self, request, *args, **kwargs):
org = self.request.org
user = self.request.user
page = int(self.request.GET.get("page", 1))
search = self.derive_search()
items = Outgoing.search_replies(org, user, search).exclude(reply_to=None)
paginator = LazyPaginator(items, 50)
outgoing = paginator.page(page)
has_more = paginator.num_pages > page
def as_json(msg):
delay = (msg.created_on - msg.reply_to.created_on).total_seconds()
obj = msg.as_json()
obj.update(
{
"reply_to": {
"text": msg.reply_to.text,
"flagged": msg.reply_to.is_flagged,
"labels": [l.as_json(full=False) for l in msg.reply_to.labels.all()],
},
"response": {
"delay": timesince(msg.reply_to.created_on, now=msg.created_on),
"warning": delay > RESPONSE_DELAY_WARN_SECONDS,
},
}
)
return obj
return JsonResponse({"results": [as_json(o) for o in outgoing], "has_more": has_more}, encoder=JSONEncoder)
class ReplyExportCRUDL(SmartCRUDL):
model = ReplyExport
actions = ("create", "read")
class Create(NonAtomicMixin, OrgPermsMixin, ReplySearchMixin, SmartCreateView):
def post(self, request, *args, **kwargs):
search = self.derive_search()
export = self.model.create(self.request.org, self.request.user, search)
reply_export.delay(export.pk)
return JsonResponse({"export_id": export.pk})
class Read(BaseDownloadView):
title = _("Download Replies")
filename = "reply_export.xls"
class FaqSearchMixin(object):
def derive_search(self):
"""
Collects and prepares FAQ search parameters into JSON serializable dict
"""
label = self.request.GET.get("label", None)
text = self.request.GET.get("text", None)
language = self.request.GET.get("language", None)
return {"label": label, "text": text, "language": language}
class FaqCRUDL(SmartCRUDL):
model = FAQ
actions = ("list", "create", "read", "update", "delete", "search", "import", "languages")
class List(OrgPermsMixin, SmartListView):
fields = ("question", "answer", "language", "parent")
default_order = ("-parent", "question")
def derive_queryset(self, **kwargs):
return FAQ.get_all(self.request.org)
class Create(OrgPermsMixin, SmartCreateView):
form_class = FaqForm
def get_form_kwargs(self):
kwargs = super(FaqCRUDL.Create, self).get_form_kwargs()
# Get the data for post requests that didn't come through a form
if self.request.method == "POST" and not self.request.POST and hasattr(self.request, "json"):
kwargs["data"] = self.request.json
kwargs["org"] = self.request.org
return kwargs
def save(self, obj):
data = self.form.cleaned_data
org = self.request.org
question = data["question"]
answer = data["answer"]
language = data["language"]
parent = data["parent"]
labels = data["labels"]
faq = FAQ.create(org, question, answer, language, parent, labels)
self.object = faq
class Read(OrgPermsMixin, SmartReadView):
fields = ["question", "answer", "language", "parent"]
def derive_queryset(self, **kwargs):
return FAQ.get_all(self.request.org)
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Read, self).get_context_data(**kwargs)
edit_button_url = reverse("msgs.faq_update", args=[self.object.pk])
context["context_data_json"] = {"faq": self.object.as_json()}
context["edit_button_url"] = edit_button_url
context["can_delete"] = True
labels = []
for label in self.object.labels.all():
labels.append(label.name)
context["labels"] = ", ".join(labels)
return context
class Update(OrgPermsMixin, SmartUpdateView):
form_class = FaqForm
def get_form_kwargs(self):
kwargs = super(FaqCRUDL.Update, self).get_form_kwargs()
# Get the data for post requests that didn't come through a form
if self.request.method == "POST" and not self.request.POST and hasattr(self.request, "json"):
kwargs["data"] = self.request.json
kwargs["org"] = self.request.org
return kwargs
def derive_initial(self):
initial = super(FaqCRUDL.Update, self).derive_initial()
initial["labels"] = self.object.labels.all()
return initial
def derive_fields(self):
fields = ["question", "answer", "language", "parent"]
if not self.object.parent:
fields.append("labels")
return tuple(fields)
class Delete(OrgPermsMixin, SmartDeleteView):
cancel_url = "@msgs.faq_list"
def post(self, request, *args, **kwargs):
faq = self.get_object()
faq.release()
return HttpResponse(status=204)
class Search(OrgPermsMixin, FaqSearchMixin, SmartTemplateView):
"""
JSON endpoint for searching FAQs
"""
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Search, self).get_context_data(**kwargs)
org = self.request.org
user = self.request.user
search = self.derive_search()
faqs = FAQ.search(org, user, search)
context["object_list"] = faqs
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse({"results": [m.as_json() for m in context["object_list"]]}, encoder=JSONEncoder)
class Import(OrgPermsMixin, SmartCSVImportView):
class Form(forms.ModelForm):
csv_file = forms.FileField(label=_("Import file"), validators=[FileExtensionValidator(["csv"])])
class Meta:
model = ImportTask
fields = ("csv_file",)
model = ImportTask
success_message = "File uploaded successfully. If your FAQs don't appear here soon, something went wrong."
success_url = "@msgs.faq_list"
def get_form_class(self):
return FaqCRUDL.Import.Form
def post_save(self, task):
task.start(self.org)
return task
class Languages(OrgPermsMixin, SmartTemplateView):
"""
JSON endpoint for getting a list of currently all available languages
"""
def get_context_data(self, **kwargs):
context = super(FaqCRUDL.Languages, self).get_context_data(**kwargs)
org = self.request.org
langs = FAQ.get_all_languages(org)
lang_list = []
for lang in langs:
lang_list.append(FAQ.get_language_from_code(lang["language"]))
context["language_list"] = lang_list
iso_list = [{"name": l.name, "code": l.part3} for l in iso639.languages]
context["iso_list"] = iso_list
return context
def render_to_response(self, context, **response_kwargs):
return JsonResponse(
{"results": context["language_list"], "iso_list": context["iso_list"]}, encoder=JSONEncoder
)
| bsd-3-clause | 67011f3564cca7682998b47963849f04 | 34.994294 | 119 | 0.589093 | 4.130976 | false | false | false | false |
autoreject/autoreject | examples/plot_autoreject_workflow.py | 1 | 9018 | """
.. _plot_autoreject_workflow:
===================================================
Preprocessing workflow with ``autoreject`` and ICA
===================================================
This example demonstrates how to visualize data when preprocessing
with :mod:`autoreject` and discusses decisions about when and which
other preprocessing steps to use in combination.
**tldr**: We recommend that you first highpass filter the data,
then run autoreject (local) and supply the bad epochs detected by it
to the ICA algorithm for a robust fit, and finally run
autoreject (local) again.
"""
# Author: Alex Rockhill <aprockhill@mailbox.org>
# Mainak Jas <mjas@mgh.harvard.edu>
# Apoorva Karekal <apoorvak@uoregon.edu>
#
# License: BSD-3-Clause
# sphinx_gallery_thumbnail_number = 9
# %%
# .. contents:: Table of Contents
# :local:
#
# First, we download resting-state EEG data from a Parkinson's patient
# from OpenNeuro. We will do this using ``openneuro-py`` which can be
# installed with the command ``pip install openneuro-py``.
import os.path as op
import matplotlib.pyplot as plt
import openneuro
import mne
import autoreject
dataset = 'ds002778' # The id code on OpenNeuro for this example dataset
subject_id = 'pd14'
target_dir = op.join(op.dirname(autoreject.__file__), '..', 'examples')
openneuro.download(dataset=dataset, target_dir=target_dir,
include=[f'sub-{subject_id}/ses-off'])
# %%
# We will now load in the raw data from the bdf file downloaded from OpenNeuro
# and, since this is resting-state data without any events, make regularly
# spaced events with which to epoch the raw data. In the averaged plot,
# we can see that there may be some eyeblink
# artifact contamination but, overall, the data is typical of
# resting-state EEG.
raw_fname = op.join(target_dir, f'sub-{subject_id}',
'ses-off', 'eeg', 'sub-pd14_ses-off_task-rest_eeg.bdf')
raw = mne.io.read_raw_bdf(raw_fname, preload=True)
dig_montage = mne.channels.make_standard_montage('biosemi32')
raw.drop_channels([ch for ch in raw.ch_names
if ch not in dig_montage.ch_names])
raw.set_montage(dig_montage) # use the standard montage
epochs = mne.make_fixed_length_epochs(raw, duration=3, preload=True)
# plot the data
epochs.average().detrend().plot_joint()
# %%
# Autoreject without any other preprocessing
# ------------------------------------------
# Now, we'll naively apply autoreject as our first preprocessing step.
#
# As we can see in the plot of the rejected epochs, there are many eyeblinks
# that caused the epoch to be dropped. This resulted in a lot of the data
# being lost.
#
# The data looks fairly clean already and we don't want to interpolate
# more than a few sensors since we only have 32 to start, so the
# number of channels to interpolate was set to check some low numbers
ar = autoreject.AutoReject(n_interpolate=[1, 2, 3, 4], random_state=11,
n_jobs=1, verbose=True)
ar.fit(epochs[:20]) # fit on a few epochs to save time
epochs_ar, reject_log = ar.transform(epochs, return_log=True)
# %%
# visualize the dropped epochs
epochs[reject_log.bad_epochs].plot(scalings=dict(eeg=100e-6))
# %%
# and the reject log
reject_log.plot('horizontal')
# %%
# Autoreject with high-pass filter
# --------------------------------
# The data may be very valuable and the time for the experiment
# limited and so we may want to take steps to reduce the number of
# epochs dropped by first using other steps to preprocess the data.
# We will use a high-pass filter first to remove slow drift that could
# cause epochs to be dropped.
#
# When making this decision to filter the data, we do want to be careful
# because filtering can spread sharp, high-frequency transients and
# distort the phase of the signal. Most evoked response potential
# analyses use filtering since the interest is in the time series, but
# if you are doing a frequency based analysis, filtering before the
# Fourier transform could potentially be avoided by detrending instead.
raw.filter(l_freq=1, h_freq=None)
epochs = mne.make_fixed_length_epochs(raw, duration=3, preload=True)
ar = autoreject.AutoReject(n_interpolate=[1, 2, 3, 4], random_state=11,
n_jobs=1, verbose=True)
ar.fit(epochs[:20]) # fit on a few epochs to save time
epochs_ar, reject_log = ar.transform(epochs, return_log=True)
# %%
# visualize the dropped epochs
epochs[reject_log.bad_epochs].plot(scalings=dict(eeg=100e-6))
# %%
# and the reject log. As we can see in the plot, high-pass filtering reduced
# the number of epochs marked as bad by autoreject substantially.
reject_log.plot('horizontal')
# %%
# ICA
# ---
# Finally, we can apply independent components analysis (ICA) to remove
# eyeblinks from the data. If our analysis were to be very dependent on
# sensors at the front of the head, we could skip ICA and use the previous
# result. However, ICA can increase the amount of usable data by applying
# a spatial filter that downscales the data in sensors most affected by
# eyeblink artifacts.
#
# Note that ICA works best if bad segments of the data are removed
# Hence, we will remove the bad segments from the
# previous run of autoreject for the benefit of the ICA algorithm.
# compute ICA
ica = mne.preprocessing.ICA(random_state=99)
ica.fit(epochs[~reject_log.bad_epochs])
# %%
# We can see in the plots below that ICA effectively removed eyeblink
# artifact.
#
# plot source components to see which is made up of blinks
exclude = [0, # blinks
2 # saccades
]
ica.plot_components(exclude)
ica.exclude = exclude
# %%
# plot with and without eyeblink component
ica.plot_overlay(epochs.average(), exclude=ica.exclude)
ica.apply(epochs, exclude=ica.exclude)
# %%
# Autoreject with highpass filter and ICA
# ---------------------------------------
# We can see in this section that preprocessing, especially ICA, can be made
# to do a lot of the heavy lifting. There isn't a huge difference when viewing
# the averaged data because the ICA effectively limited the number
# of epochs that had to be dropped. However, there are still artifacts such as
# non-stereotypical blinks that weren't able to be removed by ICA, channel
# "pops" (sharp transients with exponential RC decay), muscle artifact such as
# jaw clenches and gross movement artifact that could still impact analyses.
#
# These are the basic steps for a workflow with decisions that must be
# made based on what the data is being used for. Following this may help
# you optimize your use of ``autoreject`` in preprocessing.
# compute channel-level rejections
ar = autoreject.AutoReject(n_interpolate=[1, 2, 3, 4], random_state=11,
n_jobs=1, verbose=True)
ar.fit(epochs[:20]) # fit on the first 20 epochs to save time
epochs_ar, reject_log = ar.transform(epochs, return_log=True)
epochs[reject_log.bad_epochs].plot(scalings=dict(eeg=100e-6))
# %%
# We will do a few more visualizations to see that removing the bad epochs
# found by ``autoreject`` is still important even with preprocessing first.
# This is especially important if your analyses include trial-level statistics
# such as looking for bursting activity. We'll visualize why autoreject
# excluded these epochs and the effect that including these bad epochs would
# have on the data.
#
# First, we will visualize the reject log
reject_log.plot('horizontal')
# %%
# Next, we will visualize the cleaned average data and compare it against
# the bad segments.
evoked_bad = epochs[reject_log.bad_epochs].average()
plt.figure()
plt.plot(evoked_bad.times, evoked_bad.data.T * 1e6, 'r', zorder=-1)
epochs_ar.average().plot(axes=plt.gca())
# %%
# As a last optional step, we can do inspect the reject_log and make manual
# corrections to the reject_log. For instance, if data is limited, we may
# not want to drop epochs but retain the list of bad epochs for quality
# assurance metrics.
reject_log = ar.get_reject_log(epochs)
bad_epochs = reject_log.bad_epochs.copy()
reject_log.bad_epochs[:] = False # no bad epochs
# %%
# The modified reject log can be applied to the data as follows.
epochs_ar = ar.transform(epochs, reject_log=reject_log)
print(f'Number of epochs originally: {len(epochs)}, '
f'after autoreject: {len(epochs_ar)}')
# %%
# Finally, don't forget that we are working with resting state data
# here. Here we used long epochs of 3 seconds so that frequency-domain
# analysis was possible with the epochs. However, this could also lead
# to longer segments of the data being rejected. If you want more
# fine-grained control over the artifacts, you can
# construct shorter epochs and use the autoreject log to mark
# annotations in MNE that can be used to reject the data during doing
# time-frequency analysis. We want to emphasize that there
# is no subsitute for visual inspection. Irrespective of the rejection
# method used, we highly recommend users to inspect their preprocessed
# data before further analyses.
| bsd-3-clause | 0d47db086fd2d4c72fae4367eace67d1 | 38.726872 | 78 | 0.719783 | 3.522656 | false | false | false | false |
scrapinghub/dateparser | dateparser/utils/__init__.py | 1 | 6766 | import calendar
import logging
import types
import unicodedata
from datetime import datetime
import regex as re
from tzlocal import get_localzone
from pytz import UTC, timezone, UnknownTimeZoneError
from collections import OrderedDict
from dateparser.timezone_parser import _tz_offsets, StaticTzInfo
def strip_braces(date_string):
return re.sub(r'[{}()<>\[\]]+', '', date_string)
def normalize_unicode(string, form='NFKD'):
return ''.join(
c for c in unicodedata.normalize(form, string)
if unicodedata.category(c) != 'Mn'
)
def combine_dicts(primary_dict, supplementary_dict):
combined_dict = OrderedDict()
for key, value in primary_dict.items():
if key in supplementary_dict:
if isinstance(value, list):
combined_dict[key] = value + supplementary_dict[key]
elif isinstance(value, dict):
combined_dict[key] = combine_dicts(value, supplementary_dict[key])
else:
combined_dict[key] = supplementary_dict[key]
else:
combined_dict[key] = primary_dict[key]
remaining_keys = [key for key in supplementary_dict.keys() if key not in primary_dict.keys()]
for key in remaining_keys:
combined_dict[key] = supplementary_dict[key]
return combined_dict
def find_date_separator(format):
m = re.search(r'(?:(?:%[dbBmaA])(\W))+', format)
if m:
return m.group(1)
def _get_missing_parts(fmt):
"""
Return a list containing missing parts (day, month, year)
from a date format checking its directives
"""
directive_mapping = {
'day': ['%d', '%-d', '%j', '%-j'],
'month': ['%b', '%B', '%m', '%-m'],
'year': ['%y', '%-y', '%Y']
}
missing = [
field for field in ('day', 'month', 'year')
if not any(directive in fmt for directive in directive_mapping[field])
]
return missing
def get_timezone_from_tz_string(tz_string):
try:
return timezone(tz_string)
except UnknownTimeZoneError as e:
for name, info in _tz_offsets:
if info['regex'].search(' %s' % tz_string):
return StaticTzInfo(name, info['offset'])
else:
raise e
def localize_timezone(date_time, tz_string):
if date_time.tzinfo:
return date_time
tz = get_timezone_from_tz_string(tz_string)
if hasattr(tz, 'localize'):
date_time = tz.localize(date_time)
else:
date_time = date_time.replace(tzinfo=tz)
return date_time
def apply_tzdatabase_timezone(date_time, pytz_string):
usr_timezone = timezone(pytz_string)
if date_time.tzinfo != usr_timezone:
date_time = date_time.astimezone(usr_timezone)
return date_time
def apply_dateparser_timezone(utc_datetime, offset_or_timezone_abb):
for name, info in _tz_offsets:
if info['regex'].search(' %s' % offset_or_timezone_abb):
tz = StaticTzInfo(name, info['offset'])
return utc_datetime.astimezone(tz)
def apply_timezone(date_time, tz_string):
if not date_time.tzinfo:
if hasattr(UTC, 'localize'):
date_time = UTC.localize(date_time)
else:
date_time = date_time.replace(tzinfo=UTC)
new_datetime = apply_dateparser_timezone(date_time, tz_string)
if not new_datetime:
new_datetime = apply_tzdatabase_timezone(date_time, tz_string)
return new_datetime
def apply_timezone_from_settings(date_obj, settings):
tz = get_localzone()
if settings is None:
return date_obj
if 'local' in settings.TIMEZONE.lower():
if hasattr(tz, 'localize'):
date_obj = tz.localize(date_obj)
else:
date_obj = date_obj.replace(tzinfo=tz)
else:
date_obj = localize_timezone(date_obj, settings.TIMEZONE)
if settings.TO_TIMEZONE:
date_obj = apply_timezone(date_obj, settings.TO_TIMEZONE)
if settings.RETURN_AS_TIMEZONE_AWARE is not True:
date_obj = date_obj.replace(tzinfo=None)
return date_obj
def get_last_day_of_month(year, month):
return calendar.monthrange(year, month)[1]
def get_previous_leap_year(year):
return _get_leap_year(year, future=False)
def get_next_leap_year(year):
return _get_leap_year(year, future=True)
def _get_leap_year(year, future):
"""
Iterate through previous or next years until it gets a valid leap year
This is performed to avoid missing or including centurial leap years
"""
step = 1 if future else -1
leap_year = year + step
while not calendar.isleap(leap_year):
leap_year += step
return leap_year
def set_correct_day_from_settings(date_obj, settings, current_day=None):
""" Set correct day attending the `PREFER_DAY_OF_MONTH` setting."""
options = {
'first': 1,
'last': get_last_day_of_month(date_obj.year, date_obj.month),
'current': current_day or datetime.now().day
}
try:
return date_obj.replace(day=options[settings.PREFER_DAY_OF_MONTH])
except ValueError:
return date_obj.replace(day=options['last'])
def registry(cls):
def choose(creator):
def constructor(cls, *args, **kwargs):
key = cls.get_key(*args, **kwargs)
if not hasattr(cls, "__registry_dict"):
setattr(cls, "__registry_dict", {})
registry_dict = getattr(cls, "__registry_dict")
if key not in registry_dict:
registry_dict[key] = creator(cls, *args)
setattr(registry_dict[key], 'registry_key', key)
return registry_dict[key]
return staticmethod(constructor)
if not (hasattr(cls, "get_key")
and isinstance(cls.get_key, types.MethodType)
and cls.get_key.__self__ is cls):
raise NotImplementedError("Registry classes require to implement class method get_key")
setattr(cls, '__new__', choose(cls.__new__))
return cls
def get_logger():
setup_logging()
return logging.getLogger('dateparser')
def setup_logging():
if len(logging.root.handlers):
return
config = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'console': {
'format': "%(asctime)s %(levelname)s: [%(name)s] %(message)s",
},
},
'handlers': {
'console': {
'level': logging.DEBUG,
'class': "logging.StreamHandler",
'formatter': "console",
'stream': "ext://sys.stdout",
},
},
'root': {
'level': logging.DEBUG,
'handlers': ["console"],
},
}
logging.config.dictConfig(config)
| bsd-3-clause | fb414e05b03ff00731d89d260858e646 | 27.428571 | 97 | 0.603311 | 3.715541 | false | false | false | false |
scrapinghub/dateparser | dateparser/__init__.py | 1 | 2631 | __version__ = '1.1.3'
from .date import DateDataParser
from .conf import apply_settings
_default_parser = DateDataParser()
@apply_settings
def parse(date_string, date_formats=None, languages=None, locales=None,
region=None, settings=None, detect_languages_function=None):
"""Parse date and time from given date string.
:param date_string:
A string representing date and/or time in a recognizably valid format.
:type date_string: str
:param date_formats:
A list of format strings using directives as given
`here <https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior>`_.
The parser applies formats one by one, taking into account the detected languages/locales.
:type date_formats: list
:param languages:
A list of language codes, e.g. ['en', 'es', 'zh-Hant'].
If locales are not given, languages and region are used to construct locales for translation.
:type languages: list
:param locales:
A list of locale codes, e.g. ['fr-PF', 'qu-EC', 'af-NA'].
The parser uses only these locales to translate date string.
:type locales: list
:param region:
A region code, e.g. 'IN', '001', 'NE'.
If locales are not given, languages and region are used to construct locales for translation.
:type region: str
:param settings:
Configure customized behavior using settings defined in :mod:`dateparser.conf.Settings`.
:type settings: dict
:param detect_languages_function:
A function for language detection that takes as input a string (the `date_string`) and
a `confidence_threshold`, and returns a list of detected language codes.
Note: this function is only used if ``languages`` and ``locales`` are not provided.
:type detect_languages_function: function
:return: Returns :class:`datetime <datetime.datetime>` representing parsed date if successful, else returns None
:rtype: :class:`datetime <datetime.datetime>`.
:raises:
``ValueError``: Unknown Language, ``TypeError``: Languages argument must be a list,
``SettingValidationError``: A provided setting is not valid.
"""
parser = _default_parser
if languages or locales or region or detect_languages_function or not settings._default:
parser = DateDataParser(languages=languages, locales=locales,
region=region, settings=settings, detect_languages_function=detect_languages_function)
data = parser.get_date_data(date_string, date_formats)
if data:
return data['date_obj']
| bsd-3-clause | 575330ea79fc2d0fbe345c0be4aef790 | 40.109375 | 118 | 0.685671 | 4.264182 | false | false | false | false |
bokeh/bokeh | examples/advanced/extensions/putting_together.py | 1 | 2758 | from bokeh.core.properties import Instance, Required, String
from bokeh.io import show
from bokeh.layouts import column
from bokeh.models import Slider, UIElement
from bokeh.util.compiler import TypeScript
CODE = """
import {UIElement, UIElementView} from "models/ui/ui_element"
import {Slider} from "models/widgets/slider"
import {div} from "core/dom"
import * as p from "core/properties"
export class CustomView extends UIElementView {
model: Custom
private content_el: HTMLElement
override connect_signals(): void {
super.connect_signals()
this.connect(this.model.slider.change, () => this._update_text())
}
override render(): void {
// BokehJS views create <div> elements by default. These are accessible
// as ``this.el``. Many Bokeh views ignore the default <div> and
// instead do things like draw to the HTML canvas. In this case though,
// the program changes the contents of the <div> based on the current
// slider value.
super.render()
this.content_el = div({style: {
textAlign: "center",
fontSize: "1.2em",
padding: "2px",
color: "#b88d8e",
backgroundColor: "#2a3153",
}})
this.shadow_el.appendChild(this.content_el)
this._update_text()
}
private _update_text(): void {
this.content_el.textContent = `${this.model.text}: ${this.model.slider.value}`
}
}
export namespace Custom {
export type Attrs = p.AttrsOf<Props>
export type Props = UIElement.Props & {
text: p.Property<string>
slider: p.Property<Slider>
}
}
export interface Custom extends Custom.Attrs {}
export class Custom extends UIElement {
properties: Custom.Props
__view_type__: CustomView
constructor(attrs?: Partial<Custom.Attrs>) {
super(attrs)
}
static {
// If there is an associated view, this is typically boilerplate.
this.prototype.default_view = CustomView
// The this.define() block adds corresponding "properties" to the JS
// model. These should normally line up 1-1 with the Python model
// class. Most property types have counterparts. For example,
// bokeh.core.properties.String will correspond to ``String`` in the
// JS implementation. Where JS lacks a given type, you can use
// ``p.Any`` as a "wildcard" property type.
this.define<Custom.Props>(({String, Ref}) => ({
text: [ String, "Custom text" ],
slider: [ Ref(Slider) ],
}))
}
}
"""
class Custom(UIElement):
__implementation__ = TypeScript(CODE)
text = String(default="Custom text")
slider = Required(Instance(Slider))
slider = Slider(start=0, end=10, step=0.1, value=0, title="value")
custom = Custom(text="Special Slider Display", slider=slider)
layout = column(slider, custom)
show(layout)
| bsd-3-clause | 326709927df1625e1cf76d91feb74847 | 26.858586 | 82 | 0.680928 | 3.657825 | false | false | false | false |
bokeh/bokeh | src/bokeh/models/expressions.py | 1 | 9495 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Represent array expressions to be computed on the client (browser) side
by BokehJS.
Expression models are useful as ``DataSpec`` values when it is desired that
the array values be computed in the browser:
.. code-block:: python
p.circle(x={'expr': some_expression}, ...)
or using the ``expr`` convenience function:
.. code-block:: python
from bokeh.core.properties import expr
p.circle(x=expr(some_expression), ...)
In this case, the values of the ``x`` coordinates will be computed in the
browser by the JavaScript implementation of ``some_expression`` using a
``ColumnDataSource`` as input.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.enums import Direction
from ..core.has_props import abstract
from ..core.properties import (
AngleSpec,
AnyRef,
Bool,
Dict,
Enum,
Float,
Instance,
Nullable,
NumberSpec,
Required,
Seq,
String,
field,
)
from ..model import Model
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'CumSum',
'CustomJSExpr',
'Expression',
'PolarTransform',
'Stack',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Expression(Model):
''' Base class for ``Expression`` models that represent a computation
to be carried out on the client-side.
JavaScript implementations should implement the following methods:
.. code-block
v_compute(source: ColumnarDataSource): Arrayable {
# compute and return array of values
}
.. note::
If you wish for results to be cached per source and updated only if
the source changes, implement ``_v_compute: (source)`` instead.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
class CustomJSExpr(Expression):
''' Evaluate a JavaScript function/generator.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback. There is no need to manually include the data
source of the associated glyph renderer, as it is available within the scope of
the code via `this` keyword (e.g. `this.data` will give access to raw data).
""")
code = String(default="", help="""
A snippet of JavaScript code to execute in the browser. The code is made into
the body of a generator function, and all of of the named objects in ``args``
are available as parameters that the code can use. One can either return an
array-like object (array, typed array, nd-array), an iterable (which will
be converted to an array) or a scalar value (which will be converted into
an array of an appropriate length), or alternatively yield values that will
be collected into an array.
""")
class CumSum(Expression):
''' An expression for generating arrays by cumulatively summing a single
column from a ``ColumnDataSource``.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
field = Required(String, help="""
The name of a ``ColumnDataSource`` column to cumulatively sum for new values.
""")
include_zero = Bool(default=False, help="""
Whether to include zero at the start of the result. Note that the length
of the result is always the same as the input column. Therefore if this
property is True, then the last value of the column will not be included
in the sum.
.. code-block:: python
source = ColumnDataSource(data=dict(foo=[1, 2, 3, 4]))
CumSum(field='foo')
# -> [1, 3, 6, 10]
CumSum(field='foo', include_zero=True)
# -> [0, 1, 3, 6]
""")
class Stack(Expression):
''' An expression for generating arrays by summing different columns from
a ``ColumnDataSource``.
This expression is useful for implementing stacked bar charts at a low
level.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
fields = Seq(String, default=[], help="""
A sequence of fields from a ``ColumnDataSource`` to sum (elementwise). For
example:
.. code-block:: python
Stack(fields=['sales', 'marketing'])
Will compute an array of values (in the browser) by adding the elements
of the ``'sales'`` and ``'marketing'`` columns of a data source.
""")
@abstract
class ScalarExpression(Model):
""" Base class for for scalar expressions. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
class Minimum(ScalarExpression):
""" Computes minimum value of a data source's column. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
field = Required(String)
initial = Nullable(Float, default=float("+inf"))
class Maximum(ScalarExpression):
""" Computes maximum value of a data source's column. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
field = Required(String)
initial = Nullable(Float, default=float("-inf"))
@abstract
class CoordinateTransform(Expression):
""" Base class for coordinate transforms. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
@property
def x(self):
return XComponent(transform=self)
@property
def y(self):
return YComponent(transform=self)
class PolarTransform(CoordinateTransform):
""" Transform from polar to cartesian coordinates. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
radius = NumberSpec(default=field("radius"), help="""
The radial coordinate (i.e. the distance from the origin).
Negative radius is allowed, which is equivalent to using positive radius
and changing ``direction`` to the opposite value.
""")
angle = AngleSpec(default=field("angle"), help="""
The angular coordinate (i.e. the angle from the reference axis).
""")
direction = Enum(Direction, default=Direction.anticlock, help="""
Whether ``angle`` measures clockwise or anti-clockwise from the reference axis.
""")
@abstract
class XYComponent(Expression):
""" Base class for bi-variate expressions. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
transform = Instance(CoordinateTransform)
class XComponent(XYComponent):
""" X-component of a coordinate system transform to cartesian coordinates. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
class YComponent(XYComponent):
""" Y-component of a coordinate system transform to cartesian coordinates. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 2bfa8384fb5ae3fa5ce2bbb0117aa545 | 30.233553 | 84 | 0.566087 | 4.754632 | false | false | false | false |
bokeh/bokeh | src/bokeh/core/property/either.py | 1 | 4204 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Provide the Either property.
The Either property is used to construct properties that an accept any of
multiple possible types.
"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any, Type, TypeVar
# Bokeh imports
from ...util.strings import nice_join
from ._sphinx import property_link, register_type_link, type_link
from .bases import (
Init,
ParameterizedProperty,
Property,
TypeOrInst,
)
from .singletons import Intrinsic
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Either',
)
T = TypeVar("T")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Either(ParameterizedProperty[Any]):
""" Accept values according to a sequence of other property types.
Example:
.. code-block:: python
>>> class EitherModel(HasProps):
... prop = Either(Bool, Int, Auto)
...
>>> m = EitherModel()
>>> m.prop = True
>>> m.prop = 10
>>> m.prop = "auto"
>>> m.prop = 10.3 # ValueError !!
>>> m.prop = "foo" # ValueError !!
"""
def __init__(self, type_param0: TypeOrInst[Property[Any]], *type_params: TypeOrInst[Property[Any]],
default: Init[T] = Intrinsic, help: str | None = None) -> None:
super().__init__(type_param0, *type_params, default=default, help=help)
for tp in self.type_params:
self.alternatives.extend(tp.alternatives)
def transform(self, value: Any) -> Any:
for param in self.type_params:
try:
return param.transform(value)
except ValueError:
pass
raise ValueError(f"Could not transform {value!r}")
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
if any(param.is_valid(value) for param in self.type_params):
return
msg = "" if not detail else f"expected an element of either {nice_join([ str(param) for param in self.type_params ])}, got {value!r}"
raise ValueError(msg)
def wrap(self, value):
for tp in self.type_params:
value = tp.wrap(value)
return value
def replace(self, old: Type[Property[Any]], new: Property[Any]) -> Property[Any]:
if self.__class__ == old:
return new
else:
params = [ type_param.replace(old, new) for type_param in self.type_params ]
return Either(*params)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
@register_type_link(Either)
def _sphinx_type_link(obj: Either[Any]):
subtypes = ", ".join(type_link(x) for x in obj.type_params)
return f"{property_link(obj)}({subtypes})"
| bsd-3-clause | 31cb43af092f05c9f1607c1b45e41df2 | 31.338462 | 141 | 0.423406 | 5.17734 | false | false | false | false |
bokeh/bokeh | src/bokeh/models/textures.py | 1 | 2827 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.enums import TextureRepetition
from ..core.has_props import abstract
from ..core.properties import Enum, Required, String
from ..model import Model
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'CanvasTexture',
'ImageURLTexture',
'Texture',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Texture(Model):
''' Base class for ``Texture`` models that represent fill patterns.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
repetition = Enum(TextureRepetition, default="repeat", help="""
""")
class CanvasTexture(Texture):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
code = Required(String, help="""
A snippet of JavaScript code to execute in the browser.
""")
class ImageURLTexture(Texture):
'''
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
url = Required(String, help="""
A URL to a drawable resource like image, video, etc.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 54addef839f84941890521cd0c64bf87 | 28.757895 | 78 | 0.348426 | 6.410431 | false | false | false | false |
bokeh/bokeh | tests/support/plugins/file_server.py | 1 | 5929 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Define a simple web server for testing purpose.
Used for serves the testing html pages that are needed by the webdriver unit
tests.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from typing import Any
from urllib.request import URLopener
# External imports
import pytest
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_HOST = "127.0.0.1"
DEFAULT_PORT = 8000
HTML_ROOT = Path(__file__).parent.parent.parent # ../..
WEBDRIVER = os.environ.get('WEBDRIVER', "<undefined>")
__all__ = (
'file_server',
'HtmlOnlyHandler',
'SimpleWebServer',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self) -> None:
"""GET method handler."""
# depending on Python version, leading / may be present or not
path = self.path.split("?")[0] # TODO: .removeprefix("/")
if path.startswith("/"):
path = path[1:]
try:
with open(HTML_ROOT / path, mode="rb") as f: # lgtm [py/path-injection]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(f.read())
except OSError:
self.send_error(404, f"File Not Found: {path}")
def log_message(self, format: str, *args: Any) -> None:
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer:
"""A very basic web server."""
def __init__(self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT) -> None:
self.stop_serving = False
while True:
try:
self.server = HTTPServer((host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except OSError:
log.debug(f"port {port} is in use, trying to next one")
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self) -> None:
"""Runs the server loop."""
log.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self) -> None:
"""Starts the server."""
self.thread.start()
def stop(self) -> None:
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
URLopener().open(f"http://{self.host}:{self.port}")
except OSError:
pass
log.info("Shutting down the webserver")
self.thread.join()
def where_is(self, path: Path) -> str:
path = str(path.relative_to(HTML_ROOT)).replace('\\', '/') # Windows-proof
return f"http://{self.host}:{self.port}/{path}"
@pytest.fixture(scope='session')
def file_server(request: pytest.FixtureRequest) -> SimpleWebServer:
server = SimpleWebServer()
server.start()
request.addfinalizer(server.stop)
return server
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_html_root_error_message = f"Can't find 'common_web' directory, try setting WEBDRIVER environment variable WEBDRIVER: {WEBDRIVER} HTML_ROOT: {HTML_ROOT}"
if not os.path.isdir(HTML_ROOT):
log.error(_html_root_error_message)
assert 0, _html_root_error_message
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Taken from
# https://github.com/SeleniumHQ/selenium/blob/52e9d6407248bce5de2b6a73103a50bb0e670c1f/py/test/selenium/webdriver/common/webserver.py
# with small modifications
| bsd-3-clause | 5aaa997d3eefcb7e2ef20daa80b56ed2 | 34.502994 | 153 | 0.511385 | 4.716786 | false | false | false | false |
bokeh/bokeh | src/bokeh/models/annotations/html/labels.py | 1 | 10444 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ....core.enums import (
AngleUnits,
CoordinateUnits,
FontStyle,
TextAlign,
VerticalAlign,
)
from ....core.properties import (
Alpha,
Angle,
AngleSpec,
Color,
CoordinateLike,
Enum,
Float,
Include,
Nullable,
NullStringSpec,
NumberSpec,
Override,
Required,
String,
field,
)
from ....core.property_mixins import (
FillProps,
LineProps,
ScalarFillProps,
ScalarLineProps,
ScalarTextProps,
TextProps,
)
from ..annotation import DataAnnotation
from .html_annotation import HTMLAnnotation
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"HTMLLabel",
"HTMLLabelSet",
"HTMLTitle",
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class HTMLLabel(HTMLAnnotation):
''' Render a single HTML label as an annotation.
``Label`` will render a single text label at given ``x`` and ``y``
coordinates, which can be in either screen (pixel) space, or data (axis
range) space.
The label can also be configured with a screen space offset from ``x`` and
``y``, by using the ``x_offset`` and ``y_offset`` properties.
Additionally, the label can be rotated with the ``angle`` property.
There are also standard text, fill, and line properties to control the
appearance of the text, its background, as well as the rectangular bounding
box border.
See :ref:`ug_basic_annotations_labels` for information on plotting labels.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
x = Required(CoordinateLike, help="""
The x-coordinate in screen coordinates to locate the text anchors.
""")
x_units = Enum(CoordinateUnits, default='data', help="""
The unit type for the x attribute. Interpreted as |data units| by
default.
""")
y = Required(CoordinateLike, help="""
The y-coordinate in screen coordinates to locate the text anchors.
""")
y_units = Enum(CoordinateUnits, default='data', help="""
The unit type for the y attribute. Interpreted as |data units| by
default.
""")
text = String(default="", help="""
The text value to render.
""")
angle = Angle(default=0, help="""
The angle to rotate the text, as measured from the horizontal.
""")
angle_units = Enum(AngleUnits, default='rad', help="""
Acceptable values for units are ``"rad"`` and ``"deg"``
""")
x_offset = Float(default=0, help="""
Offset value to apply to the x-coordinate.
This is useful, for instance, if it is desired to "float" text a fixed
distance in |screen units| from a given data position.
""")
y_offset = Float(default=0, help="""
Offset value to apply to the y-coordinate.
This is useful, for instance, if it is desired to "float" text a fixed
distance in |screen units| from a given data position.
""")
text_props = Include(ScalarTextProps, help="""
The {prop} values for the text.
""")
background_props = Include(ScalarFillProps, prefix="background", help="""
The {prop} values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(ScalarLineProps, prefix="border", help="""
The {prop} values for the text bounding box.
""")
border_line_color = Override(default=None)
class HTMLLabelSet(HTMLAnnotation, DataAnnotation):
''' Render multiple text labels as annotations.
``LabelSet`` will render multiple text labels at given ``x`` and ``y``
coordinates, which can be in either screen (pixel) space, or data (axis
range) space. In this case (as opposed to the single ``Label`` model),
``x`` and ``y`` can also be the name of a column from a
:class:`~bokeh.models.sources.ColumnDataSource`, in which case the labels
will be "vectorized" using coordinate values from the specified columns.
The label can also be configured with a screen space offset from ``x`` and
``y``, by using the ``x_offset`` and ``y_offset`` properties. These offsets
may be vectorized by giving the name of a data source column.
Additionally, the label can be rotated with the ``angle`` property (which
may also be a column name.)
There are also standard text, fill, and line properties to control the
appearance of the text, its background, as well as the rectangular bounding
box border.
The data source is provided by setting the ``source`` property.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
x = NumberSpec(default=field("x"), help="""
The x-coordinates to locate the text anchors.
""")
x_units = Enum(CoordinateUnits, default='data', help="""
The unit type for the ``xs`` attribute. Interpreted as |data units| by
default.
""")
y = NumberSpec(default=field("y"), help="""
The y-coordinates to locate the text anchors.
""")
y_units = Enum(CoordinateUnits, default='data', help="""
The unit type for the ``ys`` attribute. Interpreted as |data units| by
default.
""")
text = NullStringSpec(default=field("text"), help="""
The text values to render.
""")
angle = AngleSpec(default=0, help="""
The angles to rotate the text, as measured from the horizontal.
""")
x_offset = NumberSpec(default=0, help="""
Offset values to apply to the x-coordinates.
This is useful, for instance, if it is desired to "float" text a fixed
distance in |screen units| from a given data position.
""")
y_offset = NumberSpec(default=0, help="""
Offset values to apply to the y-coordinates.
This is useful, for instance, if it is desired to "float" text a fixed
distance in |screen units| from a given data position.
""")
text_props = Include(TextProps, help="""
The {prop} values for the text.
""")
background_props = Include(FillProps, prefix="background", help="""
The {prop} values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(LineProps, prefix="border", help="""
The {prop} values for the text bounding box.
""")
border_line_color = Override(default=None)
class HTMLTitle(HTMLAnnotation):
''' Render a single title box as an annotation.
See :ref:`ug_basic_annotations_titles` for information on plotting titles.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
text = String(default="", help="""
The text value to render.
""")
vertical_align = Enum(VerticalAlign, default='bottom', help="""
Alignment of the text in its enclosing space, *across* the direction of the text.
""")
align = Enum(TextAlign, default='left', help="""
Alignment of the text in its enclosing space, *along* the direction of the text.
""")
text_line_height = Float(default=1.0, help="""
How much additional space should be allocated for the title. The value is provided
as a number, but should be treated as a percentage of font size. The default is
100%, which means no additional space will be used.
""")
offset = Float(default=0, help="""
Offset the text by a number of pixels (can be positive or negative). Shifts the text in
different directions based on the location of the title:
* above: shifts title right
* right: shifts title down
* below: shifts title right
* left: shifts title up
""")
standoff = Float(default=10, help="""
""")
text_font = String(default="helvetica", help="""
Name of a font to use for rendering text, e.g., ``'times'``,
``'helvetica'``.
""")
text_font_size = String(default="13px")
text_font_style = Enum(FontStyle, default="bold", help="""
A style to use for rendering text.
Acceptable values are:
- ``'normal'`` normal text
- ``'italic'`` *italic text*
- ``'bold'`` **bold text**
""")
text_color = Color(default="#444444", help="""
A color to use to fill text with.
""")
text_outline_color = Nullable(Color, default=None, help="""
A color to use to fill text with.
""")
text_alpha = Alpha(help="""
An alpha value to use to fill text with.
""")
background_props = Include(ScalarFillProps, prefix="background", help="""
The {prop} values for the text bounding box.
""")
background_fill_color = Override(default=None)
border_props = Include(ScalarLineProps, prefix="border", help="""
The {prop} values for the text bounding box.
""")
border_line_color = Override(default=None)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 11492fda8138c30cb3bee2f651861c53 | 29.899408 | 91 | 0.569514 | 4.54878 | false | false | false | false |
bokeh/bokeh | src/bokeh/plotting/_plot.py | 1 | 6594 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from collections.abc import Sequence
from typing import (
TYPE_CHECKING,
Any,
Literal,
Type,
)
# External imports
import numpy as np
# Bokeh imports
from ..core.properties import Datetime
from ..core.property.singletons import Intrinsic
from ..models import (
Axis,
CategoricalAxis,
CategoricalScale,
ContinuousTicker,
DataRange1d,
DatetimeAxis,
FactorRange,
Grid,
LinearAxis,
LinearScale,
LogAxis,
LogScale,
MercatorAxis,
Range,
Range1d,
Scale,
)
if TYPE_CHECKING:
import pandas as pd
from pandas.core.groupby import GroupBy
from typing_extensions import TypeAlias
from ..models.plots import Plot
from ..models.text import BaseText
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'get_range',
'get_scale',
'process_axis_and_grid',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def get_range(range_input: Range | tuple[float, float] | Sequence[str] | pd.Series[Any] | GroupBy | None) -> Range:
import pandas as pd
from pandas.core.groupby import GroupBy
if range_input is None:
return DataRange1d()
if isinstance(range_input, GroupBy):
return FactorRange(factors=sorted(list(range_input.groups.keys())))
if isinstance(range_input, Range):
return range_input
if isinstance(range_input, pd.Series):
range_input = range_input.values
if isinstance(range_input, (Sequence, np.ndarray)):
if all(isinstance(x, str) for x in range_input):
return FactorRange(factors=list(range_input))
if len(range_input) == 2:
try:
start, end = range_input
if start is None:
start = Intrinsic
if end is None:
end = Intrinsic
return Range1d(start=start, end=end)
except ValueError: # @mattpap suggests ValidationError instead
pass
raise ValueError(f"Unrecognized range input: '{range_input}'")
AxisType: TypeAlias = Literal["linear", "log", "datetime", "mercator", "auto"]
AxisLocation: TypeAlias = Literal["above", "below", "left", "right"]
Dim: TypeAlias = Literal[0, 1]
def get_scale(range_input: Range, axis_type: AxisType | None) -> Scale:
if isinstance(range_input, (DataRange1d, Range1d)) and axis_type in ["linear", "datetime", "mercator", "auto", None]:
return LinearScale()
elif isinstance(range_input, (DataRange1d, Range1d)) and axis_type == "log":
return LogScale()
elif isinstance(range_input, FactorRange):
return CategoricalScale()
else:
raise ValueError(f"Unable to determine proper scale for: '{range_input}'")
def process_axis_and_grid(plot: Plot, axis_type: AxisType | None, axis_location: AxisLocation | None,
minor_ticks: int | Literal["auto"] | None, axis_label: str | BaseText | None, rng: Range, dim: Dim) -> None:
axiscls, axiskw = _get_axis_class(axis_type, rng, dim)
if axiscls:
axis = axiscls(**axiskw)
if isinstance(axis.ticker, ContinuousTicker):
axis.ticker.num_minor_ticks = _get_num_minor_ticks(axiscls, minor_ticks)
if axis_label:
axis.axis_label = axis_label
grid = Grid(dimension=dim, axis=axis)
plot.add_layout(grid, "center")
if axis_location is not None:
getattr(plot, axis_location).append(axis)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _get_axis_class(axis_type: AxisType | None, range_input: Range, dim: Dim) -> tuple[Type[Axis] | None, Any]:
if axis_type is None:
return None, {}
elif axis_type == "linear":
return LinearAxis, {}
elif axis_type == "log":
return LogAxis, {}
elif axis_type == "datetime":
return DatetimeAxis, {}
elif axis_type == "mercator":
return MercatorAxis, dict(dimension='lon' if dim == 0 else 'lat')
elif axis_type == "auto":
if isinstance(range_input, FactorRange):
return CategoricalAxis, {}
elif isinstance(range_input, Range1d):
try:
value = range_input.start
# Datetime accepts ints/floats as timestamps, but we don't want
# to assume that implies a datetime axis
if Datetime.is_timestamp(value):
return LinearAxis, {}
Datetime.validate(Datetime(), value)
return DatetimeAxis, {}
except ValueError:
pass
return LinearAxis, {}
else:
raise ValueError(f"Unrecognized axis_type: '{axis_type!r}'")
def _get_num_minor_ticks(axis_class: Type[Axis], num_minor_ticks: int | Literal["auto"] | None) -> int:
if isinstance(num_minor_ticks, int):
if num_minor_ticks <= 1:
raise ValueError("num_minor_ticks must be > 1")
return num_minor_ticks
if num_minor_ticks is None:
return 0
if num_minor_ticks == 'auto':
if axis_class is LogAxis:
return 10
return 5
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | bccfcac91fd603f7e3e3703b37d89bd3 | 34.262032 | 121 | 0.50364 | 4.582349 | false | false | false | false |
bokeh/bokeh | examples/topics/pie/donut.py | 1 | 1735 | ''' A donut chart populated with browser market share percentages. This example
demonstrates the low-level |bokeh.models| API.
.. bokeh-example-metadata::
:apis: bokeh.models.AnnularWedge, bokeh.models.Legend
:refs: :ref:`ug_topics_pie`
:keywords: pandas, donut, wedge
'''
from math import pi
from bokeh.io import show
from bokeh.models import (AnnularWedge, ColumnDataSource,
Legend, LegendItem, Plot, Range1d)
from bokeh.sampledata.browsers import browsers_nov_2013 as df
xdr = Range1d(start=-2, end=2)
ydr = Range1d(start=-2, end=2)
plot = Plot(x_range=xdr, y_range=ydr)
plot.title.text = "Web browser market share (November 2013)"
plot.toolbar_location = None
colors = {
"Chrome": "seagreen",
"Firefox": "tomato",
"Safari": "orchid",
"Opera": "firebrick",
"IE": "skyblue",
"Other": "lightgray"
}
aggregated = df.groupby("Browser").sum(numeric_only=True)
selected = aggregated[aggregated.Share >= 1].copy()
selected.loc["Other"] = aggregated[aggregated.Share < 1].sum()
browsers = selected.index.tolist()
angles = selected.Share.map(lambda x: 2*pi*(x/100)).cumsum().tolist()
browsers_source = ColumnDataSource(dict(
start = [0] + angles[:-1],
end = angles,
colors = [colors[browser] for browser in browsers],
))
glyph = AnnularWedge(x=0, y=0, inner_radius=0.9, outer_radius=1.8,
start_angle="start", end_angle="end",
line_color="white", line_width=3, fill_color="colors")
r= plot.add_glyph(browsers_source, glyph)
legend = Legend(location="center")
for i, name in enumerate(colors):
legend.items.append(LegendItem(label=name, renderers=[r], index=i))
plot.add_layout(legend, "center")
show(plot)
| bsd-3-clause | 8ce29f23bdacfc28bb86bc347a53b5aa | 29.982143 | 79 | 0.672046 | 3.098214 | false | false | false | false |
bokeh/bokeh | src/bokeh/models/ui/dialogs.py | 1 | 2548 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
"""
"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ...core.properties import (
Bool,
Either,
Instance,
List,
Nullable,
Required,
String,
)
from ..dom import DOMNode
from .ui_element import UIElement
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"Dialog",
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Button = UIElement # TODO
class Dialog(UIElement):
""" """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
title = Nullable(Either(String, Instance(DOMNode)), default=None, help="""
""")
content = Required(Either(String, Instance(DOMNode), Instance(UIElement)), help="""
""")
buttons = List(Instance(Button), default=[], help="""
""")
modal = Bool(default=False, help="""
""")
closable = Bool(default=True, help="""
Whether to show close (x) button in the title bar.
""")
draggable = Bool(default=True, help="""
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 4499c548e3436081400f05e00ba0f933 | 28.287356 | 87 | 0.311617 | 6.601036 | false | false | false | false |
bokeh/bokeh | src/bokeh/util/deprecation.py | 1 | 3286 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import warnings # lgtm [py/import-and-import-from]
from typing import TYPE_CHECKING, Tuple, overload
# Bokeh imports
from .warnings import BokehDeprecationWarning
if TYPE_CHECKING:
from typing_extensions import TypeAlias
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'deprecated',
'warn',
)
Version: TypeAlias = Tuple[int, int, int]
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def warn(message: str, stacklevel: int = 2) -> None:
warnings.warn(message, BokehDeprecationWarning, stacklevel=stacklevel)
@overload
def deprecated(since_or_msg: Version, old: str, new: str, extra: str | None = None) -> None:
...
@overload
def deprecated(since_or_msg: str) -> None:
...
def deprecated(since_or_msg: Version | str,
old: str | None = None, new: str | None = None, extra: str | None = None) -> None:
""" Issue a nicely formatted deprecation warning. """
if isinstance(since_or_msg, tuple):
if old is None or new is None:
raise ValueError("deprecated entity and a replacement are required")
if len(since_or_msg) != 3 or not all(isinstance(x, int) and x >= 0 for x in since_or_msg):
raise ValueError(f"invalid version tuple: {since_or_msg!r}")
major, minor, patch = since_or_msg
since = f"{major}.{minor}.{patch}"
message = f"{old} was deprecated in Bokeh {since} and will be removed, use {new} instead."
if extra is not None:
message += " " + extra.strip()
else:
if not (old is None and new is None and extra is None):
raise ValueError("deprecated(message) signature doesn't allow extra arguments")
message = since_or_msg
warn(message)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | e49a440d8ada0d06b52e7abc4de2d813 | 35.511111 | 98 | 0.406573 | 5.588435 | false | false | false | false |
bokeh/bokeh | src/bokeh/sampledata/autompg.py | 1 | 3038 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' A version of the Auto MPG data set.
License: `CC0`_
Sourced from https://archive.ics.uci.edu/ml/datasets/auto+mpg
This module contains two pandas Dataframes: ``autompg`` and ``autompg_clean``.
The "clean" version has cleaned up the ``"mfr"`` and ``"origin"`` fields.
.. rubric:: ``autompg``
:bokeh-dataframe:`bokeh.sampledata.autompg.autompg`
.. rubric:: ``autompg_clean``
:bokeh-dataframe:`bokeh.sampledata.autompg.autompg_clean`
.. bokeh-sampledata-xref:: autompg
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING
# Bokeh imports
from ..util.sampledata import package_csv
if TYPE_CHECKING:
import pandas as pd
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'autompg',
'autompg_clean',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _clean_data(df: pd.DataFrame) -> pd.DataFrame:
'''
'''
df = df.copy()
df['mfr'] = [x.split()[0] for x in df.name]
df.loc[df.mfr == 'chevy', 'mfr'] = 'chevrolet'
df.loc[df.mfr == 'chevroelt', 'mfr'] = 'chevrolet'
df.loc[df.mfr == 'maxda', 'mfr'] = 'mazda'
df.loc[df.mfr == 'mercedes-benz', 'mfr'] = 'mercedes'
df.loc[df.mfr == 'toyouta', 'mfr'] = 'toyota'
df.loc[df.mfr == 'vokswagen', 'mfr'] = 'volkswagen'
df.loc[df.mfr == 'vw', 'mfr'] = 'volkswagen'
ORIGINS = ['North America', 'Europe', 'Asia']
df.origin = [ORIGINS[x-1] for x in df.origin]
return df
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
autompg = package_csv('autompg', 'auto-mpg.csv')
autompg_clean = _clean_data(autompg)
| bsd-3-clause | 3641d586703fe5f204fb232b1bbd39e4 | 30.978947 | 78 | 0.378868 | 4.435036 | false | false | false | false |
bokeh/bokeh | src/bokeh/core/property/alias.py | 1 | 3462 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Provide the ``Alias`` class, for aliasing other properties.
.. note::
This class should normally be imported from ``bokeh.core.properties``
instead of directly from this module.
"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import ClassVar, TypeVar
# Bokeh imports
from .bases import Property
from .descriptors import AliasPropertyDescriptor, PropertyDescriptor
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"Alias",
)
T = TypeVar("T")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Alias(Property[T]): # lgtm [py/missing-call-to-init]
"""
Alias another property of a model.
Example:
Consider the following class definitions:
.. code-block:: python
from bokeh.model import Model
from bokeh.properties import Alias, Int
class Parent(Model):
width = Int()
class Child(Parent):
plot_width = Alias("width")
"""
name: str
_help: str | None
# Alias is somewhat a quasi-property
readonly: ClassVar[bool] = False
serialized: ClassVar[bool] = False
_default = None
def __init__(self, aliased_name: str, *, help: str | None = None) -> None:
self.aliased_name = aliased_name
self._help = help
def make_descriptors(self, base_name: str) -> list[PropertyDescriptor[T]]:
""" Return a list of ``AliasPropertyDescriptor`` instances to
install on a class, in order to delegate attribute access to this
property.
Args:
aliased_name (str) : the name of the property this alias is for
Returns:
list[AliasPropertyDescriptor]
The descriptors returned are collected by the ``MetaHasProps``
metaclass and added to ``HasProps`` subclasses during class creation.
"""
return [ AliasPropertyDescriptor(base_name, self.aliased_name, self) ]
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | add012b09c13c9b58302e15a2828c4c0 | 31.35514 | 78 | 0.410456 | 6.193202 | false | false | false | false |
bokeh/bokeh | tests/integration/widgets/test_slider.py | 1 | 9860 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from time import sleep
# Bokeh imports
from bokeh.layouts import column
from bokeh.models import (
Circle,
ColumnDataSource,
CustomJS,
Plot,
Range1d,
Slider,
)
from tests.support.plugins.project import BokehModelPage, BokehServerPage
from tests.support.util.selenium import (
RECORD,
Keys,
drag_slider,
find_element_for,
find_elements_for,
get_slider_bar_color,
get_slider_title_text,
get_slider_title_value,
select_element_and_press_key,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"tests.support.plugins.project",
)
@pytest.mark.selenium
class Test_Slider:
def test_display(self, bokeh_model_page: BokehModelPage) -> None:
slider = Slider(start=0, end=10, value=1, width=300)
page = bokeh_model_page(slider)
children = find_elements_for(page.driver, slider, "div.bk-input-group > div")
assert len(children) == 2
assert page.has_no_console_errors()
def test_displays_title(self, bokeh_model_page: BokehModelPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
page = bokeh_model_page(slider)
children = find_elements_for(page.driver, slider, "div.bk-input-group > div")
assert len(children) == 2
assert get_slider_title_text(page.driver, slider) == "bar: 1"
assert float(get_slider_title_value(page.driver, slider)) == 1
assert page.has_no_console_errors()
def test_title_updates(self, bokeh_model_page: BokehModelPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
page = bokeh_model_page(slider)
assert float(get_slider_title_value(page.driver, slider)) == 1
drag_slider(page.driver, slider, 50)
value = get_slider_title_value(page.driver, slider)
assert float(value) > 1
assert float(value) == int(value) # integral step size
drag_slider(page.driver, slider, 50)
assert float(get_slider_title_value(page.driver, slider)) > 2
drag_slider(page.driver, slider, -135)
assert float(get_slider_title_value(page.driver, slider)) == 0
assert page.has_no_console_errors()
def test_keypress_event(self, bokeh_model_page: BokehModelPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
page = bokeh_model_page(slider)
handle = find_element_for(page.driver, slider, ".noUi-handle")
select_element_and_press_key(page.driver, handle, Keys.ARROW_RIGHT, press_number=1)
assert float(get_slider_title_value(page.driver, slider)) == 2
select_element_and_press_key(page.driver, handle, Keys.ARROW_LEFT, press_number=3) # hit lower value and continue
assert float(get_slider_title_value(page.driver, slider)) == 0
select_element_and_press_key(page.driver, handle, Keys.ARROW_RIGHT, press_number=11) # hit higher value and continue
assert float(get_slider_title_value(page.driver, slider)) == 10
assert page.has_no_console_errors()
def test_displays_bar_color(self, bokeh_model_page: BokehModelPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300, bar_color="red")
page = bokeh_model_page(slider)
children = find_elements_for(page.driver, slider, "div.bk-input-group > div")
assert len(children) == 2
assert get_slider_bar_color(page.driver, slider) == "rgba(255, 0, 0, 1)"
assert page.has_no_console_errors()
def test_js_on_change_executes(self, bokeh_model_page: BokehModelPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
slider.js_on_change('value', CustomJS(code=RECORD("value", "cb_obj.value")))
page = bokeh_model_page(slider)
drag_slider(page.driver, slider, 150)
results = page.results
assert float(results['value']) > 1
assert page.has_no_console_errors()
def test_server_on_change_round_trip(self, bokeh_server_page: BokehServerPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
def modify_doc(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1], val=["a", "b"]))
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.tags.append(CustomJS(name="custom-action", args=dict(s=source), code=RECORD("data", "s.data")))
def cb(attr, old, new):
source.data['val'] = [old, new]
slider.on_change('value', cb)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 50)
page.eval_custom_action()
results = page.results
old, new = results['data']['val']
assert float(old) == 1
assert float(new) > 1
drag_slider(page.driver, slider, 50)
page.eval_custom_action()
results = page.results
old, new = results['data']['val']
assert float(new) > 2
drag_slider(page.driver, slider, -135)
page.eval_custom_action()
results = page.results
old, new = results['data']['val']
assert float(new) == 0
# XXX (bev) skip keypress part of test until it can be fixed
# handle = find_element_for(page.driver, slider, ".noUi-handle")
# select_element_and_press_key(page.driver, handle, Keys.ARROW_RIGHT)
# page.eval_custom_action()
# results = page.results
# old, new = results['data']['val']
# assert float(new) == 1
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
def test_server_callback_value_vs_value_throttled(self, bokeh_server_page: BokehServerPage) -> None:
junk = dict(v=0, vt=0)
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
def modify_doc(doc):
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
def cbv(attr, old, new): junk['v'] += 1
def cbvt(attr, old, new): junk['vt'] += 1
slider.on_change('value', cbv)
slider.on_change('value_throttled', cbvt)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 30, release=False)
sleep(1) # noUiSlider does a transition that takes some time
drag_slider(page.driver, slider, 30, release=False)
sleep(1) # noUiSlider does a transition that takes some time
drag_slider(page.driver, slider, 30, release=False)
sleep(1) # noUiSlider does a transition that takes some time
drag_slider(page.driver, slider, 30, release=True)
sleep(1) # noUiSlider does a transition that takes some time
assert junk['v'] == 4
assert junk['vt'] == 1
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
def test_server_bar_color_updates(self, bokeh_server_page: BokehServerPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
def modify_doc(doc):
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
def cb(attr, old, new):
slider.bar_color = "rgba(255, 255, 0, 1)"
slider.on_change('value', cb)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 150)
sleep(1) # noUiSlider does a transition that takes some time
assert get_slider_bar_color(page.driver, slider) == "rgba(255, 255, 0, 1)"
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
def test_server_title_updates(self, bokeh_server_page: BokehServerPage) -> None:
slider = Slider(start=0, end=10, value=1, title="bar", width=300)
def modify_doc(doc):
plot = Plot(height=400, width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
def cb(attr, old, new):
slider.title = "baz"
slider.on_change('value', cb)
doc.add_root(column(slider, plot))
page = bokeh_server_page(modify_doc)
drag_slider(page.driver, slider, 150)
sleep(1) # noUiSlider does a transition that takes some time
assert get_slider_title_text(page.driver, slider) == "baz: 6"
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
# assert page.has_no_console_errors()
| bsd-3-clause | 9ad5d21185b089ec9b992b26c9063d8d | 35.928839 | 124 | 0.585801 | 3.58937 | false | true | false | false |
bokeh/bokeh | examples/models/toolbars.py | 1 | 1774 | import numpy as np
from bokeh.layouts import column, row
from bokeh.plotting import figure, show
N = 1000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = ["#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)]
TOOLS="hover,crosshair,pan,reset,box_select"
def mkplot(toolbar, xaxis, yaxis):
p = figure(width=300, height=300, tools=TOOLS, toolbar_location=toolbar, x_axis_location=xaxis, y_axis_location=yaxis)
p.scatter(x, y, radius=radii, fill_color=colors, fill_alpha=0.6, line_color=None)
return p
p_lbl = mkplot(toolbar="left", xaxis="below", yaxis="left")
p_lbr = mkplot(toolbar="left", xaxis="below", yaxis="right")
p_rbl = mkplot(toolbar="right", xaxis="below", yaxis="left")
p_rbr = mkplot(toolbar="right", xaxis="below", yaxis="right")
p_lal = mkplot(toolbar="left", xaxis="above", yaxis="left")
p_lar = mkplot(toolbar="left", xaxis="above", yaxis="right")
p_ral = mkplot(toolbar="right", xaxis="above", yaxis="left")
p_rar = mkplot(toolbar="right", xaxis="above", yaxis="right")
p_abl = mkplot(toolbar="above", xaxis="below", yaxis="left")
p_aal = mkplot(toolbar="above", xaxis="above", yaxis="left")
p_bbl = mkplot(toolbar="below", xaxis="below", yaxis="left")
p_bal = mkplot(toolbar="below", xaxis="above", yaxis="left")
p_abr = mkplot(toolbar="above", xaxis="below", yaxis="right")
p_aar = mkplot(toolbar="above", xaxis="above", yaxis="right")
p_bbr = mkplot(toolbar="below", xaxis="below", yaxis="right")
p_bar = mkplot(toolbar="below", xaxis="above", yaxis="right")
layout = column(
row(p_lbl, p_lbr, p_lal, p_lar),
row(p_rbl, p_rbr, p_ral, p_rar),
row(p_abl, p_aal, p_abr, p_aar),
row(p_bbl, p_bal, p_bbr, p_bar),
)
show(layout)
| bsd-3-clause | 9197a318c4cbd8ee471a53fb40ab7e57 | 37.565217 | 122 | 0.6646 | 2.527066 | false | false | false | false |
bokeh/bokeh | tests/support/defaults.py | 1 | 4753 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Collect default values of all models' properties. """
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
import warnings
from pathlib import Path
from typing import Any
# External imports
import json5
# Bokeh imports
from bokeh.core.has_props import HasProps
from bokeh.core.property.descriptors import PropertyDescriptor
from bokeh.core.property.singletons import Undefined
from bokeh.core.serialization import (
AnyRep,
ObjectRep,
Serializer,
SymbolRep,
)
from bokeh.model import Model
from bokeh.util.warnings import BokehDeprecationWarning
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"collect_defaults",
"output_defaults",
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class DefaultsSerializer(Serializer):
def _encode(self, obj: Any) -> AnyRep:
if isinstance(obj, Model):
def query(prop: PropertyDescriptor[Any]) -> bool:
return prop.readonly or prop.serialized
properties = obj.query_properties_with_values(query, include_defaults=False, include_undefined=True)
attributes = {key: self.encode(val) for key, val in properties.items()}
rep = ObjectRep(
type="object",
name=obj.__qualified_model__,
attributes=attributes,
)
return rep
elif obj is Undefined:
return SymbolRep(type="symbol", name="unset")
else:
return super()._encode(obj)
def collect_defaults() -> dict[str, Any]:
serializer = DefaultsSerializer()
defaults: dict[str, Any] = {}
# In order to look up from the model catalog that Model maintains, it
# has to be created first. These imports ensure that all built-in Bokeh
# models are represented in the catalog.
import bokeh.models
import bokeh.plotting # noqa: F401
for name, model in Model.model_class_reverse_map.items():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=BokehDeprecationWarning)
obj = model()
# filter only own properties and overrides
def query(prop: PropertyDescriptor[Any]) -> bool:
return (prop.readonly or prop.serialized) and \
(prop.name in obj.__class__.__properties__ or prop.name in obj.__class__.__overridden_defaults__)
properties = obj.query_properties_with_values(query, include_defaults=True, include_undefined=True)
attributes = {key: serializer.encode(val) for key, val in properties.items()}
defaults[name] = attributes
bases = [base.__qualified_model__ for base in model.__bases__ if issubclass(base, HasProps) and base != HasProps]
if bases != []:
defaults[name] = dict(
__extends__=bases[0] if len(bases) == 1 else bases,
**defaults[name],
)
return defaults
def output_defaults(dest: Path, defaults: dict[str, Any]) -> None:
os.makedirs(dest.parent, exist_ok=True)
output = json5.dumps(defaults, sort_keys=False, indent=2)
with open(dest, "w", encoding="utf-8") as f:
f.write(output)
f.write("\n")
print(f"Wrote {dest} with {len(defaults)} models")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | b3835c0da27cab91046854a974f09b82 | 35.844961 | 121 | 0.478224 | 5.376697 | false | false | false | false |
bokeh/bokeh | src/bokeh/models/callbacks.py | 1 | 5629 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Client-side interactivity.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Any as any
# Bokeh imports
from ..core.has_props import HasProps, abstract
from ..core.properties import (
Any,
AnyRef,
Bool,
Dict,
Instance,
Required,
String,
)
from ..core.property.bases import Init
from ..core.property.singletons import Intrinsic
from ..core.validation import error
from ..core.validation.errors import INVALID_PROPERTY_VALUE, NOT_A_PROPERTY_OF
from ..model import Model
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Callback',
'OpenURL',
'CustomJS',
'SetValue',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Callback(Model):
''' Base class for interactive callback.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
class OpenURL(Callback):
''' Open a URL in a new or current tab or window.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
url = String("http://", help="""
The URL to direct the web browser to. This can be a template string,
which will be formatted with data from the data source.
""")
same_tab = Bool(False, help="""
Open URL in a new (`False`, default) or current (`True`) tab or window.
For `same_tab=False`, whether tab or window will be opened is browser
dependent.
""")
class CustomJS(Callback):
''' Execute a JavaScript function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
args = Dict(String, AnyRef, help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to execute in the browser. The
code is made into the body of a function, and all of of the named objects in
``args`` are available as parameters that the code can use. Additionally,
a ``cb_obj`` parameter contains the object that triggered the callback
and an optional ``cb_data`` parameter that contains any tool-specific data
(i.e. mouse coordinates and hovered glyph indices for the ``HoverTool``).
""")
class SetValue(Callback):
""" Allows to update a property of an object. """
# explicit __init__ to support Init signatures
def __init__(self, obj: Init[HasProps] = Intrinsic, attr: Init[str] = Intrinsic, value: Init[any] = Intrinsic, **kwargs) -> None:
super().__init__(obj=obj, attr=attr, value=value, **kwargs)
obj: HasProps = Required(Instance(HasProps), help="""
Object to set the value on.
""")
attr: str = Required(String, help="""
The property to modify.
""")
value = Required(Any, help="""
The value to set.
""")
@error(NOT_A_PROPERTY_OF)
def _check_if_an_attribute_is_a_property_of_a_model(self):
if self.obj.lookup(self.attr, raises=False):
return None
else:
return f"{self.attr} is not a property of {self.obj}"
@error(INVALID_PROPERTY_VALUE)
def _check_if_provided_a_valid_value(self):
descriptor = self.obj.lookup(self.attr)
if descriptor.property.is_valid(self.value):
return None
else:
return f"{self.value!r} is not a valid value for {self.obj}.{self.attr}"
# TODO: class Show(Callback): target = Required(Either(Instance(DOMNode), Instance(UIElement)))
# TODO: class Hide(Callback): ...
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | ad55389059595709a3ee3ea062f2081b | 32.706587 | 133 | 0.504708 | 4.882047 | false | false | false | false |
bokeh/bokeh | examples/server/app/clustering/main.py | 1 | 6278 | ''' A `k-nearest neighbors`_ (KNN) chart using datasets from scikit-learn. This
example demonstrates solving both classification and regression problems.
.. note::
This example needs the scikit-learn package to run.
.. _k-nearest neighbors: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
'''
import numpy as np
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from bokeh.io import curdoc
from bokeh.layouts import column, row
from bokeh.models import ColumnDataSource, Select, Slider
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
np.random.seed(0)
# define some helper functions
def clustering(X, algorithm, n_clusters):
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# Generate the new colors:
if algorithm=='MiniBatchKMeans':
model = cluster.MiniBatchKMeans(n_clusters=n_clusters)
elif algorithm=='Birch':
model = cluster.Birch(n_clusters=n_clusters)
elif algorithm=='DBSCAN':
model = cluster.DBSCAN(eps=.2)
elif algorithm=='AffinityPropagation':
model = cluster.AffinityPropagation(damping=.9,
preference=-200)
elif algorithm=='MeanShift':
model = cluster.MeanShift(bandwidth=bandwidth,
bin_seeding=True)
elif algorithm=='SpectralClustering':
model = cluster.SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
elif algorithm=='Ward':
model = cluster.AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward',
connectivity=connectivity)
elif algorithm=='AgglomerativeClustering':
model = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock",
n_clusters=n_clusters,
connectivity=connectivity)
model.fit(X)
if hasattr(model, 'labels_'):
y_pred = model.labels_.astype(int)
else:
y_pred = model.predict(X)
return X, y_pred
def get_dataset(dataset, n_samples):
if dataset == 'Noisy Circles':
return datasets.make_circles(n_samples=n_samples,
factor=0.5,
noise=0.05)
elif dataset == 'Noisy Moons':
return datasets.make_moons(n_samples=n_samples,
noise=0.05)
elif dataset == 'Blobs':
return datasets.make_blobs(n_samples=n_samples,
random_state=8)
elif dataset == "No Structure":
return np.random.rand(n_samples, 2), None
# set up initial data
n_samples = 1500
n_clusters = 2
algorithm = 'MiniBatchKMeans'
dataset = 'Noisy Circles'
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
spectral = np.hstack([Spectral6] * 20)
colors = [spectral[i] for i in y]
# set up plot (styling in theme.yaml)
plot = figure(toolbar_location=None, title=algorithm)
source = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))
plot.circle('x', 'y', fill_color='colors', line_color=None, source=source)
# set up widgets
clustering_algorithms= [
'MiniBatchKMeans',
'AffinityPropagation',
'MeanShift',
'SpectralClustering',
'Ward',
'AgglomerativeClustering',
'DBSCAN',
'Birch'
]
datasets_names = [
'Noisy Circles',
'Noisy Moons',
'Blobs',
'No Structure'
]
algorithm_select = Select(value='MiniBatchKMeans',
title='Select algorithm:',
width=200,
options=clustering_algorithms)
dataset_select = Select(value='Noisy Circles',
title='Select dataset:',
width=200,
options=datasets_names)
samples_slider = Slider(title="Number of samples",
value=1500.0,
start=1000.0,
end=3000.0,
step=100,
width=400)
clusters_slider = Slider(title="Number of clusters",
value=2.0,
start=2.0,
end=10.0,
step=1,
width=400)
# set up callbacks
def update_algorithm_or_clusters(attrname, old, new):
global X
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data = dict(colors=colors, x=X[:, 0], y=X[:, 1])
plot.title.text = algorithm
def update_samples_or_dataset(attrname, old, new):
global X, y
dataset = dataset_select.value
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
n_samples = int(samples_slider.value)
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data = dict(colors=colors, x=X[:, 0], y=X[:, 1])
algorithm_select.on_change('value', update_algorithm_or_clusters)
clusters_slider.on_change('value_throttled', update_algorithm_or_clusters)
dataset_select.on_change('value', update_samples_or_dataset)
samples_slider.on_change('value_throttled', update_samples_or_dataset)
# set up layout
selects = row(dataset_select, algorithm_select, width=420)
inputs = column(selects, samples_slider, clusters_slider)
# add to document
curdoc().add_root(row(inputs, plot))
curdoc().title = "Clustering"
| bsd-3-clause | 56224b85611a9af757f9771061ff0d60 | 30.707071 | 84 | 0.601147 | 4.024359 | false | false | false | false |
bokeh/bokeh | tests/unit/bokeh/util/test_hex.py | 1 | 3875 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import numpy as np
# Module under test
import bokeh.util.hex as buh # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
np.random.seed(0)
n = 500
x = 2 + np.random.standard_normal(n)
y = 2 + np.random.standard_normal(n)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Test_axial_to_cartesian:
def test_default_aspect_pointytop(self) -> None:
q = np.array([0, 0, 0, 1, -1, 1, -1])
r = np.array([0, 1, -1, 0, 1, -1, 0])
x, y = buh.axial_to_cartesian(q, r, 1, "pointytop")
sq3 = np.sqrt(3)
assert list(x) == [0, sq3/2, -sq3/2, sq3, -sq3/2, sq3/2, -sq3]
assert list(y) == [-0.0, -1.5, 1.5, -0.0, -1.5, 1.5, -0.0]
def test_default_aspect_flattop(self) -> None:
q = np.array([0, 0, 0, 1, -1, 1, -1])
r = np.array([0, 1, -1, 0, 1, -1, 0])
x, y = buh.axial_to_cartesian(q, r, 1, "flattop")
sq3 = np.sqrt(3)
assert list(x) == [0.0, 0.0, 0.0, 1.5, -1.5, 1.5, -1.5]
assert list(y) == [0, -sq3, sq3, -sq3/2, -sq3/2, sq3/2, sq3/2]
class Test_cartesian_to_axial:
def test_default_aspect_pointytop(self) -> None:
x = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])
y = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])
q, r = buh.cartesian_to_axial(x, y, 1, "pointytop")
assert list(zip(q, r)) == [
(0,0), (-1, 0), (1,0), (0,-1), (-1, 1), (1, -1), (0,1)
]
def test_default_aspect_flattop(self) -> None:
x = np.array([0, 0, 0, 1.5, -1.5, 1.5, -1.5])
y = np.array([0, -2, 2, -1.5, -1.5, 1.5, 1.5])
q, r = buh.cartesian_to_axial(x, y, 1, "flattop")
assert list(zip(q, r)) == [
(0,0), (0,1), (0,-1), (1, 0), (-1, 1), (1, -1), (-1,0)
]
class Test_hexbin:
# hexbin requires pandas
def test_gaussian_pointytop(self) -> None:
bins = buh.hexbin(x, y, 2)
assert list(bins.q) == [0,0,1,1,1,2,2]
assert list(bins.r) == [-1,0,-2,-1,0,-2,-1]
assert list(bins.counts) == [9,54,1,313,98,3,22]
assert bins.equals(buh.hexbin(x, y, 2, "pointytop"))
def test_gaussian_flattop(self) -> None:
bins = buh.hexbin(x, y, 2, "flattop")
assert list(bins.q) == [0, 0, 1, 1, 1, 2]
assert list(bins.r) == [-1, 0, -2, -1, 0, -2]
assert list(bins.counts) == [95, 57, 14, 324, 8, 2]
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | f2a79eb8e1a364c91846be3ea4f544b5 | 33.90991 | 78 | 0.349677 | 3.378378 | false | true | false | false |
bokeh/bokeh | examples/reference/models/checkbox_button_server.py | 1 | 1529 | ## Bokeh server for checkbox button group
from bokeh.io import curdoc
from bokeh.layouts import row
from bokeh.models import CategoricalColorMapper, CheckboxButtonGroup, ColumnDataSource
from bokeh.palettes import RdBu3
from bokeh.plotting import figure
x=[3,4,6,12,10,1]
y=[7,1,3,4,1,6]
z=['red','red','red','blue','blue','blue']
source = ColumnDataSource(data=dict(x=x, y=y,z=z))
color_mapper = CategoricalColorMapper(factors=['red','blue'], palette=[RdBu3[2], RdBu3[0]])
plot_figure = figure(title='Checkbox Button Group',height=450, width=600,
tools="save,reset", toolbar_location="below")
plot_figure.scatter('x', 'y', source=source, size=10,color={'field': 'z', 'transform': color_mapper})
checkbox_button = CheckboxButtonGroup(labels=['Show x-axis label','Show y-axis label'])
def checkbox_button_click(attr,old,new):
active_checkbox=checkbox_button.active ##Getting checkbox value in list
## Get first checkbox value and show x-axis label
if len(active_checkbox)!=0 and (0 in active_checkbox):
plot_figure.xaxis.axis_label='X-Axis'
else:
plot_figure.xaxis.axis_label = None
## Get second checkbox value and show y-axis label
if len(active_checkbox)!=0 and (1 in active_checkbox):
plot_figure.yaxis.axis_label='Y-Axis'
else:
plot_figure.yaxis.axis_label = None
checkbox_button.on_change('active',checkbox_button_click)
layout=row(checkbox_button, plot_figure)
curdoc().add_root(layout)
curdoc().title = "Checkbox Button Bokeh Server"
| bsd-3-clause | 009c8b52058a0f3f7ec58790f2515aa1 | 32.977778 | 101 | 0.711576 | 3.288172 | false | false | false | false |
bokeh/bokeh | examples/models/sliders.py | 1 | 2741 | '''Demonstrates the use of many Bokeh sliders with examples such as a numerical slider, a disabled slider, a date picker,
and a color picker.
.. bokeh-example-metadata::
:apis: bokeh.models.Column, bokeh.models.CustomJS, bokeh.models.DateRangeSlider, bokeh.models.DateSlider, bokeh.models.Div, bokeh.models.RangeSlider, bokeh.models.Row, bokeh.models.Slider, bokeh.document.document, bokeh.embed.file_html, bokeh.util.browser.view
:refs: :ref:`ug_interaction_widgets`
:keywords: slider
''' # noqa: E501
from datetime import date
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import (Column, CustomJS, DateRangeSlider,
DateSlider, Div, RangeSlider, Row, Slider)
from bokeh.resources import INLINE
from bokeh.util.browser import view
slider = Slider(title="Numerical", value=50, start=0, end=96, step=5)
disabled_slider = Slider(title="Disabled", value=50, start=0, end=96, step=5, disabled=True)
range_slider = RangeSlider(title="Numerical range", value=[30, 70], start=0, end=100, step=0.5)
date_slider = DateSlider(title="Date", value=date(2014, 1, 1), start=date(2010, 1, 1), end=date(2020, 1, 1), step=1)
date_range_slider = DateRangeSlider(title="Date range", value=(date(2014, 1, 1), date(2018, 12, 31)), start=date(2010, 1, 1), end=date(2020, 1, 1), step=1)
only_value_slider = Slider(value=50, start=0, end=96, step=5)
no_title_slider = Slider(title=None, value=50, start=0, end=96, step=5)
def color_picker():
def color_slider(title, color):
return Slider(title=title, show_value=False, value=127, start=0, end=255, step=1, orientation="vertical", bar_color=color)
red = color_slider("R", "red")
green = color_slider("G", "green")
blue = color_slider("B", "blue")
div = Div(width=100, height=100, background="rgb(127, 127, 127)")
cb = CustomJS(args=dict(red=red, green=green, blue=blue, div=div), code="""
const r = red.value
const g = green.value
const b = blue.value
div.background = `rgb(${r}, ${g}, ${b})`
""")
red.js_on_change('value', cb)
green.js_on_change('value', cb)
blue.js_on_change('value', cb)
return Row(children=[red, green, blue, div])
sliders = Row(children=[
Column(children=[
slider,
disabled_slider,
range_slider,
date_slider,
date_range_slider,
only_value_slider,
no_title_slider,
]),
color_picker(),
])
doc = Document()
doc.add_root(sliders)
if __name__ == "__main__":
doc.validate()
filename = "sliders.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "sliders"))
print("Wrote %s" % filename)
view(filename)
| bsd-3-clause | e42e9bc8824d04ee160309feb1b193f5 | 34.141026 | 264 | 0.657059 | 3.150575 | false | false | false | false |
bokeh/bokeh | tests/unit/bokeh/models/test_callbacks__models.py | 1 | 2139 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from pytest import raises
# Bokeh imports
from bokeh.models import Slider
# Module under test
from bokeh.models import CustomJS # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_js_callback() -> None:
slider = Slider()
cb = CustomJS(code="foo();", args=dict(x=slider))
assert 'foo()' in cb.code
assert cb.args['x'] is slider
cb = CustomJS(code="foo();", args=dict(x=3))
assert 'foo()' in cb.code
assert cb.args['x'] == 3
with raises(AttributeError): # kwargs not supported
CustomJS(code="foo();", x=slider)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 11f5c6726e1e485145e6e2d0f8f44da8 | 34.65 | 78 | 0.275362 | 7.350515 | false | true | false | false |
bokeh/bokeh | examples/topics/graph/interaction_nodeslinkededges.py | 1 | 1283 | import networkx as nx
from bokeh.models import (BoxSelectTool, Circle, HoverTool, MultiLine,
NodesAndLinkedEdges, Plot, Range1d, TapTool)
from bokeh.palettes import Spectral4
from bokeh.plotting import from_networkx, show
G = nx.karate_club_graph()
plot = Plot(width=400, height=400,
x_range=Range1d(-1.1,1.1), y_range=Range1d(-1.1,1.1))
plot.title.text = "Graph Interaction Demonstration"
plot.add_tools(HoverTool(tooltips=None), TapTool(), BoxSelectTool())
graph_renderer = from_networkx(G, nx.circular_layout, scale=1, center=(0,0))
graph_renderer.node_renderer.glyph = Circle(size=15, fill_color=Spectral4[0])
graph_renderer.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2])
graph_renderer.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])
graph_renderer.edge_renderer.glyph = MultiLine(line_color="#CCCCCC", line_alpha=0.8, line_width=5)
graph_renderer.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=5)
graph_renderer.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=5)
graph_renderer.selection_policy = NodesAndLinkedEdges()
graph_renderer.inspection_policy = NodesAndLinkedEdges()
plot.renderers.append(graph_renderer)
show(plot)
| bsd-3-clause | c2bde51f2e7358786e8d0ab1f9a54223 | 40.387097 | 98 | 0.755261 | 2.942661 | false | false | true | false |
bokeh/bokeh | src/bokeh/models/filters.py | 1 | 8487 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.has_props import abstract
from ..core.properties import (
AnyRef,
Bool,
Instance,
Int,
NonEmpty,
Nullable,
Required,
RestrictedDict,
Seq,
String,
)
from ..model import Model
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
"AllIndices",
"BooleanFilter",
"CustomJSFilter",
"DifferenceFilter",
"Filter",
"GroupFilter",
"IndexFilter",
"IntersectionFilter",
"InversionFilter",
"SymmetricDifferenceFilter",
"UnionFilter",
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
@abstract
class Filter(Model):
''' A Filter model represents a filtering operation that returns a row-wise subset of
data when applied to a ``ColumnDataSource``.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def __invert__(self) -> Filter:
return InversionFilter(operand=self)
def __and__(self, other: Filter) -> Filter:
return IntersectionFilter(operands=[self, other])
def __or__(self, other: Filter) -> Filter:
return UnionFilter(operands=[self, other])
def __sub__(self, other: Filter) -> Filter:
return DifferenceFilter(operands=[self, other])
def __xor__(self, other: Filter) -> Filter:
return SymmetricDifferenceFilter(operands=[self, other])
class AllIndices(Filter):
""" Trivial filter that includes all indices in a dataset. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
class InversionFilter(Filter):
""" Inverts indices resulting from another filter. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
operand = Required(Instance(Filter), help="""
Indices produced by this filter will be inverted.
""")
class IntersectionFilter(Filter):
""" Computes intersection of indices resulting from other filters. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
operands = Required(NonEmpty(Seq(Instance(Filter))), help="""
Indices produced by a collection of these filters will be intersected.
""")
class UnionFilter(Filter):
""" Computes union of indices resulting from other filters. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
operands = Required(NonEmpty(Seq(Instance(Filter))), help="""
Indices produced by a collection of these filters will be unioned.
""")
class DifferenceFilter(Filter):
""" Computes union of indices resulting from other filters. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
operands = Required(NonEmpty(Seq(Instance(Filter))), help="""
Indices produced by a collection of these filters will be subtracted.
""")
class SymmetricDifferenceFilter(Filter):
""" Computes symmetric difference of indices resulting from other filters. """
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
operands = Required(NonEmpty(Seq(Instance(Filter))), help="""
Indices produced by a collection of these filters will be xored.
""")
class IndexFilter(Filter):
''' An ``IndexFilter`` filters data by returning the subset of data at a given set of indices.
'''
indices = Nullable(Seq(Int), help="""
A list of integer indices representing the subset of data to select.
""")
def __init__(self, *args, **kwargs) -> None:
if len(args) == 1 and "indices" not in kwargs:
kwargs["indices"] = args[0]
super().__init__(**kwargs)
class BooleanFilter(Filter):
''' A ``BooleanFilter`` filters data by returning the subset of data corresponding to indices
where the values of the booleans array is True.
'''
booleans = Nullable(Seq(Bool), help="""
A list of booleans indicating which rows of data to select.
""")
def __init__(self, *args, **kwargs) -> None:
if len(args) == 1 and "booleans" not in kwargs:
kwargs["booleans"] = args[0]
super().__init__(**kwargs)
class GroupFilter(Filter):
''' A ``GroupFilter`` represents the rows of a ``ColumnDataSource`` where the values of the categorical
column column_name match the group variable.
'''
column_name = Required(String, help="""
The name of the column to perform the group filtering operation on.
""")
group = Required(String, help="""
The value of the column indicating the rows of data to keep.
""")
def __init__(self, *args, **kwargs) -> None:
if len(args) == 2 and "column_name" not in kwargs and "group" not in kwargs:
kwargs["column_name"] = args[0]
kwargs["group"] = args[1]
super().__init__(**kwargs)
class CustomJSFilter(Filter):
''' Filter data sources with a custom defined JavaScript function.
.. warning::
The explicit purpose of this Bokeh Model is to embed *raw JavaScript
code* for a browser to execute. If any part of the code is derived
from untrusted user inputs, then you must take appropriate care to
sanitize the user input prior to passing to Bokeh.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
args = RestrictedDict(String, AnyRef, disallow=("source",), help="""
A mapping of names to Python objects. In particular those can be bokeh's models.
These objects are made available to the callback's code snippet as the values of
named parameters to the callback.
""")
code = String(default="", help="""
A snippet of JavaScript code to filter data contained in a columnar data source.
The code is made into the body of a function, and all of of the named objects in
``args`` are available as parameters that the code can use. The variable
``source`` will contain the data source that is associated with the ``CDSView`` this
filter is added to.
The code should either return the indices of the subset or an array of booleans
to use to subset data source rows.
Example:
.. code-block
code = '''
const indices = []
for (let i = 0; i <= source.data['some_column'].length; i++) {
if (source.data['some_column'][i] == 'some_value') {
indices.push(i)
}
}
return indices
'''
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | b2ffed5e75fbe53b808d8540ce8987bc | 32.812749 | 107 | 0.537881 | 4.855263 | false | false | false | false |
bokeh/bokeh | examples/advanced/extensions/tool.py | 1 | 2169 | from bokeh.core.properties import Instance
from bokeh.models import ColumnDataSource, Tool
from bokeh.plotting import figure, show
from bokeh.util.compiler import TypeScript
CODE = """
import {GestureTool, GestureToolView} from "models/tools/gestures/gesture_tool"
import {ColumnDataSource} from "models/sources/column_data_source"
import {PanEvent} from "core/ui_events"
import * as p from "core/properties"
export class DrawToolView extends GestureToolView {
model: DrawTool
// this is executed when the pan/drag event starts
_pan_start(_e: PanEvent): void {
this.model.source.data = {x: [], y: []}
}
// this is executed on subsequent mouse/touch moves
_pan(e: PanEvent): void {
const {frame} = this.plot_view
const {sx, sy} = e
if (!frame.bbox.contains(sx, sy))
return
const x = frame.x_scale.invert(sx)
const y = frame.y_scale.invert(sy)
const {source} = this.model
source.get_array("x").push(x)
source.get_array("y").push(y)
source.change.emit()
}
// this is executed then the pan/drag ends
_pan_end(_e: PanEvent): void {}
}
export namespace DrawTool {
export type Attrs = p.AttrsOf<Props>
export type Props = GestureTool.Props & {
source: p.Property<ColumnDataSource>
}
}
export interface DrawTool extends DrawTool.Attrs {}
export class DrawTool extends GestureTool {
properties: DrawTool.Props
__view_type__: DrawToolView
constructor(attrs?: Partial<DrawTool.Attrs>) {
super(attrs)
}
tool_name = "Draw Tool"
tool_icon = "bk-tool-icon-lasso-select"
event_type = "pan" as "pan"
default_order = 12
static {
this.prototype.default_view = DrawToolView
this.define<DrawTool.Props>(({Ref}) => ({
source: [ Ref(ColumnDataSource) ],
}))
}
}
"""
class DrawTool(Tool):
__implementation__ = TypeScript(CODE)
source = Instance(ColumnDataSource)
source = ColumnDataSource(data=dict(x=[], y=[]))
plot = figure(x_range=(0,10), y_range=(0,10), title="Click and drag to draw",
background_fill_color="#efefef", tools="")
plot.add_tools(DrawTool(source=source))
plot.line('x', 'y', line_width=3, source=source)
show(plot)
| bsd-3-clause | da6776ba87189dfaf5c0437717b225c2 | 23.931034 | 79 | 0.680037 | 3.266566 | false | false | false | false |
bokeh/bokeh | src/bokeh/sampledata/daylight.py | 1 | 2648 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide 2013 Warsaw daylight hours.
License: free to use and redistribute (see `this FAQ`_ for details).
Sourced from http://www.sunrisesunset.com
This module contains one pandas Dataframe: ``daylight_warsaw_2013``.
.. rubric:: ``daylight_warsaw_2013``
:bokeh-dataframe:`bokeh.sampledata.daylight.daylight_warsaw_2013`
.. bokeh-sampledata-xref:: daylight
.. _this FAQ: https://www.sunrisesunset.com/faqs.asp#other_usage
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pandas import DataFrame
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'daylight_warsaw_2013',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data() -> DataFrame:
'''
'''
df = package_csv('daylight', 'daylight_warsaw_2013.csv', parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
daylight_warsaw_2013 = _read_data()
| bsd-3-clause | b1f27b7faec06b662595c9b07c75eaea | 31.691358 | 103 | 0.36216 | 5.471074 | false | false | false | false |
bokeh/bokeh | examples/server/api/flask_gunicorn_embed.py | 1 | 2551 | import asyncio
from threading import Thread
from flask import Flask, render_template
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from bokeh.application import Application
from bokeh.application.handlers import FunctionHandler
from bokeh.embed import server_document
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.plotting import figure
from bokeh.sampledata.sea_surface_temperature import sea_surface_temperature
from bokeh.server.server import BaseServer
from bokeh.server.tornado import BokehTornado
from bokeh.server.util import bind_sockets
from bokeh.themes import Theme
if __name__ == '__main__':
print('This script is intended to be run with gunicorn. e.g.')
print()
print(' gunicorn -w 4 flask_gunicorn_embed:app')
print()
print('will start the app on four processes')
import sys
sys.exit()
app = Flask(__name__)
def bkapp(doc):
df = sea_surface_temperature.copy()
source = ColumnDataSource(data=df)
plot = figure(x_axis_type='datetime', y_range=(0, 25), y_axis_label='Temperature (Celsius)',
title="Sea Surface Temperature at 43.18, -70.43")
plot.line('time', 'temperature', source=source)
def callback(attr, old, new):
if new == 0:
data = df
else:
data = df.rolling(f"{new}D").mean()
source.data = ColumnDataSource.from_df(data)
slider = Slider(start=0, end=30, value=0, step=1, title="Smoothing by N Days")
slider.on_change('value', callback)
doc.add_root(column(slider, plot))
doc.theme = Theme(filename="theme.yaml")
# can't use shortcuts here, since we are passing to low level BokehTornado
bkapp = Application(FunctionHandler(bkapp))
# This is so that if this app is run using something like "gunicorn -w 4" then
# each process will listen on its own port
sockets, port = bind_sockets("localhost", 0)
@app.route('/', methods=['GET'])
def bkapp_page():
script = server_document('http://localhost:%d/bkapp' % port)
return render_template("embed.html", script=script, template="Flask")
def bk_worker():
asyncio.set_event_loop(asyncio.new_event_loop())
bokeh_tornado = BokehTornado({'/bkapp': bkapp}, extra_websocket_origins=["localhost:8000"])
bokeh_http = HTTPServer(bokeh_tornado)
bokeh_http.add_sockets(sockets)
server = BaseServer(IOLoop.current(), bokeh_tornado, bokeh_http)
server.start()
server.io_loop.start()
t = Thread(target=bk_worker)
t.daemon = True
t.start()
| bsd-3-clause | 7af6bb896f949cddaff89545c70e77e4 | 31.291139 | 96 | 0.704822 | 3.598025 | false | false | false | false |
bokeh/bokeh | tests/conftest.py | 1 | 1759 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import annotations
pytest_plugins = (
"tests.support.plugins.ipython",
"tests.support.plugins.managed_server_loop",
"tests.support.plugins.networkx",
)
# Standard library imports
from inspect import iscoroutinefunction
# External imports
import _pytest
import pytest
def pytest_collection_modifyitems(items: list[_pytest.nodes.Item]) -> None:
for item in items:
if iscoroutinefunction(item.obj):
item.add_marker(pytest.mark.asyncio)
# Unfortunately these seem to all need to be centrally defined at the top level
def pytest_addoption(parser: _pytest.config.argparsing.Parser) -> None:
# plugins/selenium
parser.addoption(
"--driver", choices=('chrome', 'firefox', 'safari'), default='chrome', help='webdriver implementation')
# plugins/bokeh_server
parser.addoption(
"--bokeh-port", dest="bokeh_port", type=int, default=5006, help="port on which Bokeh server resides"
)
# plugins/jupyter_notebook
parser.addoption(
"--notebook-port", type=int, default=6007, help="port on which Jupyter Notebook server resides"
)
parser.addoption(
"--examples-log-file", dest="log_file", metavar="path", action="store", default='examples.log', help="where to write the complete log"
)
parser.addoption(
"--no-js", action="store_true", default=False,
help="only run python code and skip js")
| bsd-3-clause | 60f976418b420bf5a52880ac8f70e1af | 34.18 | 142 | 0.630472 | 4.081206 | false | true | false | false |
bokeh/bokeh | src/bokeh/client/connection.py | 1 | 16222 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Implements a very low level facility for communicating with a Bokeh
Server.
Users will always want to use :class:`~bokeh.client.session.ClientSession`
instead for standard usage.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import TYPE_CHECKING, Any, Callable
# External imports
from tornado.httpclient import HTTPClientError, HTTPRequest
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketError, websocket_connect
# Bokeh imports
from ..core.types import ID
from ..protocol import Protocol
from ..protocol.exceptions import MessageError, ProtocolError, ValidationError
from ..protocol.receiver import Receiver
from ..util.strings import format_url_query_arguments
from ..util.tornado import fixup_windows_event_loop_policy
from .states import (
CONNECTED_AFTER_ACK,
CONNECTED_BEFORE_ACK,
DISCONNECTED,
NOT_YET_CONNECTED,
WAITING_FOR_REPLY,
ErrorReason,
State,
)
from .websocket import WebSocketClientConnectionWrapper
if TYPE_CHECKING:
from ..document import Document
from ..document.events import DocumentChangedEvent
from ..protocol.message import Message
from ..protocol.messages.server_info_reply import ServerInfo
from .session import ClientSession
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ClientConnection',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class ClientConnection:
''' A low-level class used to connect to a Bokeh server.
.. autoclasstoc::
'''
_state: State
_loop: IOLoop
_until_predicate: Callable[[], bool] | None
def __init__(self, session: ClientSession, websocket_url: str, io_loop: IOLoop | None = None,
arguments: dict[str, str] | None = None, max_message_size: int = 20*1024*1024) -> None:
''' Opens a websocket connection to the server.
'''
self._url = websocket_url
self._session = session
self._arguments = arguments
self._max_message_size = max_message_size
self._protocol = Protocol()
self._receiver = Receiver(self._protocol)
self._socket = None
self._state = NOT_YET_CONNECTED()
# We can't use IOLoop.current because then we break
# when running inside a notebook since ipython also uses it
self._loop = io_loop if io_loop is not None else IOLoop()
self._until_predicate = None
self._server_info = None
# Properties --------------------------------------------------------------
@property
def connected(self) -> bool:
''' Whether we've connected the Websocket and have exchanged initial
handshake messages.
'''
return isinstance(self._state, CONNECTED_AFTER_ACK)
@property
def io_loop(self) -> IOLoop:
''' The Tornado ``IOLoop`` this connection is using. '''
return self._loop
@property
def url(self) -> str:
''' The URL of the websocket this Connection is to. '''
return self._url
@property
def error_reason(self) -> ErrorReason | None:
''' The reason of the connection loss encoded as a ``DISCONNECTED.ErrorReason`` enum value '''
if not isinstance(self._state, DISCONNECTED):
return None
return self._state.error_reason
@property
def error_code(self) -> int | None:
''' If there was an error that caused a disconnect, this property holds
the error code. None otherwise.
'''
if not isinstance(self._state, DISCONNECTED):
return 0
return self._state.error_code
@property
def error_detail(self) -> str:
''' If there was an error that caused a disconnect, this property holds
the error detail. Empty string otherwise.
'''
if not isinstance(self._state, DISCONNECTED):
return ""
return self._state.error_detail
# Internal methods --------------------------------------------------------
def connect(self) -> None:
def connected_or_closed() -> bool:
# we should be looking at the same state here as the 'connected' property above, so connected
# means both connected and that we did our initial message exchange
return isinstance(self._state, (CONNECTED_AFTER_ACK, DISCONNECTED))
self._loop_until(connected_or_closed)
def close(self, why: str = "closed") -> None:
''' Close the Websocket connection.
'''
if self._socket is not None:
self._socket.close(1000, why)
def force_roundtrip(self) -> None:
''' Force a round-trip request/reply to the server, sometimes needed to
avoid race conditions. Mostly useful for testing.
Outside of test suites, this method hurts performance and should not be
needed.
Returns:
None
'''
self._send_request_server_info()
def loop_until_closed(self) -> None:
''' Execute a blocking loop that runs and executes event callbacks
until the connection is closed (e.g. by hitting Ctrl-C).
While this method can be used to run Bokeh application code "outside"
the Bokeh server, this practice is HIGHLY DISCOURAGED for any real
use case.
'''
if isinstance(self._state, NOT_YET_CONNECTED):
# we don't use self._transition_to_disconnected here
# because _transition is a coroutine
self._tell_session_about_disconnect()
self._state = DISCONNECTED()
else:
def closed() -> bool:
return isinstance(self._state, DISCONNECTED)
self._loop_until(closed)
def pull_doc(self, document: Document) -> None:
''' Pull a document from the server, overwriting the passed-in document
Args:
document : (Document)
The document to overwrite with server content.
Returns:
None
'''
msg = self._protocol.create('PULL-DOC-REQ')
reply = self._send_message_wait_for_reply(msg)
if reply is None:
raise RuntimeError("Connection to server was lost")
elif reply.header['msgtype'] == 'ERROR':
raise RuntimeError("Failed to pull document: " + reply.content['text'])
else:
reply.push_to_document(document)
def push_doc(self, document: Document) -> Message[Any]:
''' Push a document to the server, overwriting any existing server-side doc.
Args:
document : (Document)
A Document to push to the server
Returns:
The server reply
'''
msg = self._protocol.create('PUSH-DOC', document)
reply = self._send_message_wait_for_reply(msg)
if reply is None:
raise RuntimeError("Connection to server was lost")
elif reply.header['msgtype'] == 'ERROR':
raise RuntimeError("Failed to push document: " + reply.content['text'])
else:
return reply
def request_server_info(self) -> ServerInfo:
''' Ask for information about the server.
Returns:
A dictionary of server attributes.
'''
if self._server_info is None:
self._server_info = self._send_request_server_info()
return self._server_info
async def send_message(self, message: Message[Any]) -> None:
if self._socket is None:
log.info("We're disconnected, so not sending message %r", message)
else:
try:
sent = await message.send(self._socket)
log.debug("Sent %r [%d bytes]", message, sent)
except WebSocketError as e:
# A thing that happens is that we detect the
# socket closing by getting a None from
# read_message, but the network socket can be down
# with many messages still in the read buffer, so
# we'll process all those incoming messages and
# get write errors trying to send change
# notifications during that processing.
# this is just debug level because it's completely normal
# for it to happen when the socket shuts down.
log.debug("Error sending message to server: %r", e)
# error is almost certainly because
# socket is already closed, but be sure,
# because once we fail to send a message
# we can't recover
self.close(why="received error while sending")
# don't re-throw the error - there's nothing to
# do about it.
return None
# Private methods ---------------------------------------------------------
async def _connect_async(self) -> None:
formatted_url = format_url_query_arguments(self._url, self._arguments)
request = HTTPRequest(formatted_url)
try:
socket = await websocket_connect(request, subprotocols=["bokeh", self._session.token], max_message_size=self._max_message_size)
self._socket = WebSocketClientConnectionWrapper(socket)
except HTTPClientError as e:
await self._transition_to_disconnected(DISCONNECTED(ErrorReason.HTTP_ERROR, e.code, e.message))
return
except Exception as e:
log.info("Failed to connect to server: %r", e)
if self._socket is None:
await self._transition_to_disconnected(DISCONNECTED(ErrorReason.NETWORK_ERROR, None, "Socket invalid."))
else:
await self._transition(CONNECTED_BEFORE_ACK())
async def _handle_messages(self) -> None:
message = await self._pop_message()
if message is None:
await self._transition_to_disconnected(DISCONNECTED(ErrorReason.HTTP_ERROR, 500, "Internal server error."))
else:
if message.msgtype == 'PATCH-DOC':
log.debug("Got PATCH-DOC, applying to session")
self._session._handle_patch(message)
else:
log.debug("Ignoring %r", message)
# we don't know about whatever message we got, ignore it.
await self._next()
def _loop_until(self, predicate: Callable[[], bool]) -> None:
self._until_predicate = predicate
try:
# this runs self._next ONE time, but
# self._next re-runs itself until
# the predicate says to quit.
self._loop.add_callback(self._next)
self._loop.start()
except KeyboardInterrupt:
self.close("user interruption")
async def _next(self) -> None:
if self._until_predicate is not None and self._until_predicate():
log.debug("Stopping client loop in state %s due to True from %s",
self._state.__class__.__name__, self._until_predicate.__name__)
self._until_predicate = None
self._loop.stop()
return None
else:
log.debug("Running state " + self._state.__class__.__name__)
await self._state.run(self)
async def _pop_message(self) -> Message[Any] | None:
while True:
if self._socket is None:
return None
# log.debug("Waiting for fragment...")
fragment = None
try:
fragment = await self._socket.read_message()
except Exception as e:
# this happens on close, so debug level since it's "normal"
log.debug("Error reading from socket %r", e)
# log.debug("... got fragment %r", fragment)
if fragment is None:
# XXX Tornado doesn't give us the code and reason
log.info("Connection closed by server")
return None
try:
message = await self._receiver.consume(fragment)
if message is not None:
log.debug("Received message %r" % message)
return message
except (MessageError, ProtocolError, ValidationError) as e:
log.error("%r", e, exc_info=True)
self.close(why="error parsing message from server")
def _send_message_wait_for_reply(self, message: Message[Any]) -> Message[Any] | None:
waiter = WAITING_FOR_REPLY(message.header['msgid'])
self._state = waiter
send_result: list[None] = []
async def handle_message(message: Message[Any], send_result: list[None]) -> None:
result = await self.send_message(message)
send_result.append(result)
self._loop.add_callback(handle_message, message, send_result)
def have_send_result_or_disconnected() -> bool:
return len(send_result) > 0 or self._state != waiter
self._loop_until(have_send_result_or_disconnected)
def have_reply_or_disconnected() -> bool:
return self._state != waiter or waiter.reply is not None
self._loop_until(have_reply_or_disconnected)
return waiter.reply
def _send_patch_document(self, session_id: ID, event: DocumentChangedEvent) -> None:
msg = self._protocol.create('PATCH-DOC', [event])
self._loop.add_callback(self.send_message, msg)
def _send_request_server_info(self) -> ServerInfo:
msg = self._protocol.create('SERVER-INFO-REQ')
reply = self._send_message_wait_for_reply(msg)
if reply is None:
raise RuntimeError("Did not get a reply to server info request before disconnect")
return reply.content
def _tell_session_about_disconnect(self) -> None:
if self._session:
self._session._notify_disconnected()
async def _transition(self, new_state: State) -> None:
log.debug(f"transitioning to state {new_state.__class__.__name__}")
self._state = new_state
await self._next()
async def _transition_to_disconnected(self, dis_state: DISCONNECTED) -> None:
self._tell_session_about_disconnect()
await self._transition(dis_state)
async def _wait_for_ack(self) -> None:
message = await self._pop_message()
if message and message.msgtype == 'ACK':
log.debug(f"Received {message!r}")
await self._transition(CONNECTED_AFTER_ACK())
elif message is None:
await self._transition_to_disconnected(DISCONNECTED(ErrorReason.HTTP_ERROR, 500, "Internal server error."))
else:
raise ProtocolError(f"Received {message!r} instead of ACK")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
fixup_windows_event_loop_policy()
| bsd-3-clause | ef171acb786da0cad0b451c917a24d9d | 37.169412 | 139 | 0.555912 | 4.73635 | false | false | false | false |
bokeh/bokeh | examples/server/app/gapminder/main.py | 1 | 2885 | ''' A gapminder chart using a population, life expectancy, and fertility dataset.
This example shows the data visualization capability of Bokeh to recreate charts.
'''
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import layout
from bokeh.models import (Button, CategoricalColorMapper, ColumnDataSource,
HoverTool, Label, SingleIntervalTicker, Slider)
from bokeh.palettes import Spectral6
from bokeh.plotting import figure
from .data import process_data
fertility_df, life_expectancy_df, population_df_size, regions_df, years, regions_list = process_data()
df = pd.concat({'fertility': fertility_df,
'life': life_expectancy_df,
'population': population_df_size},
axis=1)
data = {}
regions_df.rename({'Group':'region'}, axis='columns', inplace=True)
for year in years:
df_year = df.iloc[:,df.columns.get_level_values(1)==year]
df_year.columns = df_year.columns.droplevel(1)
data[year] = df_year.join(regions_df.region).reset_index().to_dict('series')
source = ColumnDataSource(data=data[years[0]])
plot = figure(x_range=(1, 9), y_range=(20, 100), title='Gapminder Data', height=300)
plot.xaxis.ticker = SingleIntervalTicker(interval=1)
plot.xaxis.axis_label = "Children per woman (total fertility)"
plot.yaxis.ticker = SingleIntervalTicker(interval=20)
plot.yaxis.axis_label = "Life expectancy at birth (years)"
label = Label(x=1.1, y=22, text=str(years[0]), text_font_size='80px', text_color='#eeeeee')
plot.add_layout(label)
color_mapper = CategoricalColorMapper(palette=Spectral6, factors=regions_list)
plot.circle(
x='fertility',
y='life',
size='population',
source=source,
fill_color={'field': 'region', 'transform': color_mapper},
fill_alpha=0.8,
line_color='#7c7e71',
line_width=0.5,
line_alpha=0.5,
legend_group='region',
)
plot.add_tools(HoverTool(tooltips="@Country", show_arrow=False, point_policy='follow_mouse'))
def animate_update():
year = slider.value + 1
if year > years[-1]:
year = years[0]
slider.value = year
def slider_update(attrname, old, new):
year = slider.value
label.text = str(year)
source.data = data[year]
slider = Slider(start=years[0], end=years[-1], value=years[0], step=1, title="Year")
slider.on_change('value', slider_update)
callback_id = None
def animate():
global callback_id
if button.label == '► Play':
button.label = '❚❚ Pause'
callback_id = curdoc().add_periodic_callback(animate_update, 200)
else:
button.label = '► Play'
curdoc().remove_periodic_callback(callback_id)
button = Button(label='► Play', width=60)
button.on_event('button_click', animate)
layout = layout([
[plot],
[slider, button],
], sizing_mode='scale_width')
curdoc().add_root(layout)
curdoc().title = "Gapminder"
| bsd-3-clause | 829aea602bfbb70c74fa94adf9574e68 | 29.913978 | 102 | 0.680348 | 3.197998 | false | false | false | false |
bokeh/bokeh | src/bokeh/sphinxext/bokeh_sitemap.py | 1 | 3792 | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
""" Generate a ``sitemap.txt`` to aid with search indexing.
``sitemap.txt`` is a plain text list of all the pages in the docs site.
Each URL is listed on a line in the text file. It is machine readable
and used by search engines to know what pages are available for indexing.
All that is required to generate the sitemap is to list this module
``bokeh.sphinxext.sitemap`` in the list of extensions in the Sphinx
configuration file ``conf.py``.
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
from html import escape
from os.path import join
# External imports
from sphinx.errors import SphinxError
from sphinx.util import status_iterator
# Bokeh imports
from . import PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = (
"build_finished",
"html_page_context",
"setup",
)
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
def html_page_context(app, pagename, templatename, context, doctree):
"""Collect page names for the sitemap as HTML pages are built."""
site = context["SITEMAP_BASE_URL"]
version = context["version"]
app.sitemap_links.add(f"{site}{version}/{pagename}.html")
def build_finished(app, exception):
"""Generate a ``sitemap.txt`` from the collected HTML page links."""
filename = join(app.outdir, "sitemap.xml")
links_iter = status_iterator(sorted(app.sitemap_links), "adding links to sitemap... ", "brown", len(app.sitemap_links), app.verbosity)
try:
with open(filename, "w") as f:
f.write(_header)
for link in links_iter:
f.write(_item % escape(link.strip().replace("https://", "http://"))) # TODO (bev) get rid of old style string subsitution
f.write(_footer)
except OSError as e:
raise SphinxError(f"cannot write sitemap.txt, reason: {e}")
def setup(app):
""" Required Sphinx extension setup function. """
app.connect("html-page-context", html_page_context)
app.connect("build-finished", build_finished)
app.sitemap_links = set()
return PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
_header = """\
<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
"""
_item = """\
<url>
<loc>%s</loc>
</url>
"""
_footer = """\
</urlset>
"""
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| bsd-3-clause | b56691ce5087da11dd8688f08a687c7b | 31.410256 | 138 | 0.444884 | 5.002639 | false | false | false | false |
bokeh/bokeh | src/bokeh/protocol/message.py | 1 | 11833 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a base class for all Bokeh Server Protocol message types.
Boker messages are comprised of a sequence of JSON fragments. Specified as
Python JSON-like data, messages have the general form:
.. code-block:: python
[
# these are required
b'{header}', # serialized header dict
b'{metadata}', # serialized metadata dict
b'{content}, # serialized content dict
# these are optional, and come in pairs; header contains num_buffers
b'{buf_header}', # serialized buffer header dict
b'array' # raw buffer payload data
...
]
The ``header`` fragment will have the form:
.. code-block:: python
header = {
# these are required
'msgid' : <str> # a unique id for the message
'msgtype' : <str> # a message type, e.g. 'ACK', 'PATCH-DOC', etc
# these are optional
'num_buffers' : <int> # the number of additional buffers, if any
}
The ``metadata`` fragment may contain any arbitrary information. It is not
processed by Bokeh for any purpose, but may be useful for external
monitoring or instrumentation tools.
The ``content`` fragment is defined by the specific message type.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Dict,
Generic,
Tuple,
TypedDict,
TypeVar,
)
# Bokeh imports
import bokeh.util.serialization as bkserial
# Bokeh imports
from ..core.json_encoder import serialize_json
from ..core.serialization import Buffer, Serialized
from ..core.types import ID
from .exceptions import MessageError, ProtocolError
if TYPE_CHECKING:
from typing_extensions import TypeAlias
from ..client.websocket import WebSocketClientConnectionWrapper
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Message',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class _Header(TypedDict):
msgid: ID
msgtype: str
class Header(_Header, total=False):
reqid: ID
num_buffers: int
class BufferHeader(TypedDict):
id: ID
Content = TypeVar("Content")
Metadata: TypeAlias = Dict[str, Any]
BufferRef: TypeAlias = Tuple[BufferHeader, bytes]
class Empty(TypedDict):
pass
class Message(Generic[Content]):
''' The Message base class encapsulates creating, assembling, and
validating the integrity of Bokeh Server messages. Additionally, it
provide hooks
'''
msgtype: ClassVar[str]
_header: Header
_header_json: str | None
_content: Content
_content_json: str | None
_metadata: Metadata
_metadata_json: str | None
_buffers: list[Buffer]
def __init__(self, header: Header, metadata: Metadata, content: Content) -> None:
''' Initialize a new message from header, metadata, and content
dictionaries.
To assemble a message from existing JSON fragments, use the
``assemble`` method.
To create new messages with automatically generated headers,
use subclass ``create`` methods.
Args:
header (JSON-like) :
metadata (JSON-like) :
content (JSON-like) :
'''
self.header = header
self.metadata = metadata
self.content = content
self._buffers = []
def __repr__(self) -> str:
return f"Message {self.msgtype!r} content: {self.content!r}"
@classmethod
def assemble(cls, header_json: str, metadata_json: str, content_json: str) -> Message[Content]:
''' Creates a new message, assembled from JSON fragments.
Args:
header_json (``JSON``) :
metadata_json (``JSON``) :
content_json (``JSON``) :
Returns:
Message subclass
Raises:
MessageError
'''
try:
header = json.loads(header_json)
except ValueError:
raise MessageError("header could not be decoded")
try:
metadata = json.loads(metadata_json)
except ValueError:
raise MessageError("metadata could not be decoded")
try:
content = json.loads(content_json)
except ValueError:
raise MessageError("content could not be decoded")
msg = cls(header, metadata, content)
msg._header_json = header_json
msg._metadata_json = metadata_json
msg._content_json = content_json
return msg
def add_buffer(self, buffer: Buffer) -> None:
''' Associate a buffer header and payload with this message.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
MessageError
'''
if 'num_buffers' in self._header:
self._header['num_buffers'] += 1
else:
self._header['num_buffers'] = 1
self._header_json = None
self._buffers.append(buffer)
def assemble_buffer(self, buf_header: BufferHeader, buf_payload: bytes) -> None:
''' Add a buffer header and payload that we read from the socket.
This differs from add_buffer() because we're validating vs.
the header's num_buffers, instead of filling in the header.
Args:
buf_header (``JSON``) : a buffer header
buf_payload (``JSON`` or bytes) : a buffer payload
Returns:
None
Raises:
ProtocolError
'''
num_buffers = self.header.get("num_buffers", 0)
if num_buffers <= len(self._buffers):
raise ProtocolError(f"too many buffers received expecting {num_buffers}")
self._buffers.append(Buffer(buf_header["id"], buf_payload))
async def write_buffers(self, conn: WebSocketClientConnectionWrapper, locked: bool = True) -> int:
''' Write any buffer headers and payloads to the given connection.
Args:
conn (object) :
May be any object with a ``write_message`` method. Typically,
a Tornado ``WSHandler`` or ``WebSocketClientConnection``
locked (bool) :
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot write_buffers to connection None")
sent = 0
for buffer in self._buffers:
header = json.dumps(buffer.ref)
payload = buffer.to_bytes()
await conn.write_message(header, locked=locked)
await conn.write_message(payload, binary=True, locked=locked)
sent += len(header) + len(payload)
return sent
@classmethod
def create_header(cls, request_id: ID | None = None) -> Header:
''' Return a message header fragment dict.
Args:
request_id (str or None) :
Message ID of the message this message replies to
Returns:
dict : a message header
'''
header = Header(
msgid = bkserial.make_id(),
msgtype = cls.msgtype,
)
if request_id is not None:
header['reqid'] = request_id
return header
async def send(self, conn: WebSocketClientConnectionWrapper) -> int:
''' Send the message on the given connection.
Args:
conn (WebSocketHandler) : a WebSocketHandler to send messages
Returns:
int : number of bytes sent
'''
if conn is None:
raise ValueError("Cannot send to connection None")
with await conn.write_lock.acquire():
sent = 0
await conn.write_message(self.header_json, locked=False)
sent += len(self.header_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#await asyncio.sleep(0.1)
await conn.write_message(self.metadata_json, locked=False)
sent += len(self.metadata_json)
# uncomment this to make it a lot easier to reproduce lock-related bugs
#await asyncio.sleep(0.1)
await conn.write_message(self.content_json, locked=False)
sent += len(self.content_json)
sent += await self.write_buffers(conn, locked=False)
return sent
@property
def complete(self) -> bool:
''' Returns whether all required parts of a message are present.
Returns:
bool : True if the message is complete, False otherwise
'''
return self.header is not None and \
self.metadata is not None and \
self.content is not None and \
self.header.get('num_buffers', 0) == len(self._buffers)
@property
def payload(self) -> Serialized[Content]:
return Serialized(self.content, self.buffers)
# header fragment properties
@property
def header(self) -> Header:
return self._header
@header.setter
def header(self, value: Header) -> None:
self._header = value
self._header_json = None
@property
def header_json(self) -> str:
if not self._header_json:
self._header_json = json.dumps(self.header)
return self._header_json
# content fragment properties
@property
def content(self) -> Content:
return self._content
@content.setter
def content(self, value: Content) -> None:
self._content = value
self._content_json = None
@property
def content_json(self) -> str:
if not self._content_json:
self._content_json = serialize_json(self.payload)
return self._content_json
# metadata fragment properties
@property
def metadata(self) -> Metadata:
return self._metadata
@metadata.setter
def metadata(self, value: Metadata) -> None:
self._metadata = value
self._metadata_json = None
@property
def metadata_json(self) -> str:
if not self._metadata_json:
self._metadata_json = json.dumps(self.metadata)
return self._metadata_json
# buffer properties
@property
def buffers(self) -> list[Buffer]:
return list(self._buffers)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 2a8fd597cee12b8cf8f8bddf7ff1bdc0 | 27.651332 | 102 | 0.538156 | 4.819959 | false | false | false | false |
bokeh/bokeh | examples/server/app/image_blur.py | 1 | 1415 | ''' An image blurring example. This sample shows the capability
of Bokeh to transform images to have certain effects.
.. note::
This example needs the scipy package to run.
'''
import numpy as np
import scipy.misc
try:
from numba import njit
except ImportError:
import warnings
warnings.warn("numba is not installed. This example will be painfully slow.")
njit = lambda f: f
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, Slider
from bokeh.palettes import gray
from bokeh.plotting import figure
image = scipy.misc.ascent().astype(np.int32)[::-1, :]
w, h = image.shape
source = ColumnDataSource(data=dict(image=[image]))
p = figure(x_range=(0, w), y_range=(0, h))
p.image('image', x=0, y=0, dw=w, dh=h, palette=gray(256), source=source)
@njit
def blur(outimage, image, amt):
for i in range(amt, w-amt):
for j in range(amt, h-amt):
px = 0.
for iw in range(-amt//2, amt//2):
for jh in range(-amt//2, amt//2):
px += image[i+iw, j+jh]
outimage[i, j] = px/(amt*amt)
def update(attr, old, new):
out = image.copy()
blur(out, image, 2*new + 1)
source.data.update(image=[out])
slider = Slider(title="Blur Factor", start=0, end=10, value=0)
slider.on_change('value', update)
curdoc().add_root(column(p, slider))
curdoc().title = "Image Blur"
| bsd-3-clause | f44157d6373d2be36cdd3c10caff4972 | 26.745098 | 81 | 0.650177 | 3.08952 | false | false | false | false |
bokeh/bokeh | examples/topics/timeseries/missing_dates.py | 1 | 1389 | ''' A `Candlestick chart`_ based on stock price data. This example demonstrates
possible technique for handling missing dates.
.. bokeh-example-metadata::
:sampledata: stocks
:apis: bokeh.plotting.figure.segment, bokeh.plotting.figure.vbar
:keywords: candlestick
.. _Candlestick chart: https://en.wikipedia.org/wiki/Candlestick_chart
'''
import pandas as pd
from bokeh.plotting import figure, show
from bokeh.sampledata.stocks import MSFT
df = pd.DataFrame(MSFT)[60:120]
df["date"] = pd.to_datetime(df["date"])
inc = df.close > df.open
dec = df.open > df.close
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(tools=TOOLS, width=1000, height=400,
title="MSFT Candlestick without missing dates",
background_fill_color="#efefef")
p.xaxis.major_label_orientation = 0.8 # radians
p.x_range.range_padding = 0.05
# map dataframe indices to date strings and use as label overrides
p.xaxis.major_label_overrides = {
i: date.strftime('%b %d') for i, date in zip(df.index, df["date"])
}
# one tick per week (5 weekdays)
p.xaxis.ticker = list(range(df.index[0], df.index[-1], 5))
p.segment(df.index, df.high, df.index, df.low, color="black")
p.vbar(df.index[dec], 0.6, df.open[dec], df.close[dec], color="#eb3c40")
p.vbar(df.index[inc], 0.6, df.open[inc], df.close[inc], fill_color="white",
line_color="#49a3a3", line_width=2)
show(p)
| bsd-3-clause | 697e42f8afbc36afcf8b2bb0c1f259a3 | 29.866667 | 79 | 0.696184 | 2.899791 | false | false | false | false |
bokeh/bokeh | src/bokeh/sampledata/browsers.py | 1 | 3112 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Browser market share by version from November 2013.
License: `CC BY-SA 3.0`_
Sourced from http://gs.statcounter.com/#browser_version-ww-monthly-201311-201311-bar
Icon images sourced from https://github.com/alrra/browser-logos
This module contains one pandas Dataframe: ``browsers_nov_2013``.
.. rubric:: ``browsers_nov_2013``
:bokeh-dataframe:`bokeh.sampledata.browsers.browsers_nov_2013`
The module also contains a dictionary ``icons`` with base64-encoded PNGs of the
logos for Chrome, Firefox, Safari, Opera, and IE.
.. bokeh-sampledata-xref:: browsers
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from os.path import join
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from pandas import DataFrame
# Bokeh imports
from ..util.sampledata import package_csv, package_path
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'browsers_nov_2013',
'icons',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data() -> tuple[DataFrame, dict[str, bytes]]:
'''
'''
df = package_csv('browsers', 'browsers_nov_2013.csv', names=["Version", "Share"], skiprows=1)
_versions = df.Version.map(lambda x: x.rsplit(" ", 1))
df["Browser"] = _versions.map(lambda x: x[0])
df["VersionNumber"] = _versions.map(lambda x: x[1] if len(x) == 2 else "0")
icons = {}
for browser in ["Chrome", "Firefox", "Safari", "Opera", "IE"]:
with open(package_path(join("icons", browser.lower() + "_32x32.png")), "rb") as icon:
icons[browser] = icon.read()
return df, icons
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
browsers_nov_2013, icons = _read_data()
| bsd-3-clause | 7ddcc735d7113614510351dc5622042a | 33.577778 | 97 | 0.404242 | 5.051948 | false | false | false | false |
bokeh/bokeh | src/bokeh/sphinxext/bokeh_directive.py | 1 | 2725 | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
""" Provide a base class and useful functions for Bokeh Sphinx directives.
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
import re
# External imports
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import nested_parse_with_titles
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
# taken from Sphinx autodoc
py_sig_re = re.compile(
r"""^ ([\w.]*\.)? # class name(s)
(\w+) \s* # thing name
(?: \((.*)\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
""",
re.VERBOSE,
)
__all__ = (
"BokehDirective",
"py_sig_re",
)
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
class BokehDirective(SphinxDirective):
def parse(self, rst_text, annotation):
result = ViewList()
for line in rst_text.split("\n"):
result.append(line, annotation)
node = nodes.paragraph()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| bsd-3-clause | 49d743dc7b11dc97f0b8f4a797dccf34 | 33.0625 | 79 | 0.328073 | 6.293303 | false | false | false | false |
bokeh/bokeh | tests/unit/bokeh/test___init__.py | 1 | 4614 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import re
import warnings
# Bokeh imports
from bokeh.util.warnings import BokehDeprecationWarning, BokehUserWarning
from tests.support.util.api import verify_all
from tests.support.util.types import Capture
# Module under test
import bokeh as b # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'__version__',
'license',
'sampledata',
)
_LICENSE = """\
Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of Anaconda nor the names of any contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
"""
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = verify_all(b, ALL)
def test___version___type() -> None:
assert isinstance(b.__version__, str)
def test___version___defined() -> None:
VERSION_PAT = re.compile(r"^(\d+\.\d+\.\d+)((?:\.dev|\.rc).*)?")
assert VERSION_PAT.match(b.__version__.strip(".dirty"))
def test_license(capsys: Capture) -> None:
b.license()
out, err = capsys.readouterr()
assert out == _LICENSE
class TestWarnings:
@pytest.mark.parametrize('cat', (BokehDeprecationWarning, BokehUserWarning))
def test_bokeh_custom(self, cat) -> None:
r = warnings.formatwarning("message", cat, "line", "lineno")
assert r == "%s: %s\n" %(cat.__name__, "message")
def test_general_default(self) -> None:
r = warnings.formatwarning("message", RuntimeWarning, "line", "lineno")
assert r == "line:lineno: RuntimeWarning: message\n"
# TODO (bev) issue with this one test and 3.9 support PR
@pytest.mark.skip
def test_filters(self) -> None:
assert ('always', None, BokehUserWarning, None, 0) in warnings.filters
assert ('always', None, BokehDeprecationWarning, None, 0) in warnings.filters
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 1c54b6ade97e737f07ff15d200cfdc6d | 38.435897 | 85 | 0.531643 | 5.267123 | false | true | false | false |
bokeh/bokeh | src/bokeh/core/enums.py | 1 | 16407 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Common enumerations to be used together with |Enum| property.
This module provides many pre-defined enumerations, as well as functions
for creating new enumerations.
New enumerations can be created using the |enumeration| function:
.. code-block:: python
#: Specify a nautically named side, port or starboard
MyEnum = enumeration("port", "starboard")
Typically, enumerations are used to define |Enum| properties:
.. code-block:: python
from bokeh.model import Model
from bokeh.core.properties import Enum
class MyModel(Model):
location = Enum(MyEnum, help="""
Whether the thing should be a port or starboard.
""")
Enumerations have a defined order and support iteration:
.. code-block:: python
>>> for loc in MyEnum:
... print(loc)
...
port
starboard
as well as containment tests:
.. code-block:: python
>>> "port" in MyEnum
True
Enumerations can be easily documented in Sphinx documentation with the
:ref:`bokeh.sphinxext.bokeh_enum` Sphinx extension.
----
.. autofunction:: bokeh.core.enums.enumeration
----
.. |Enum| replace:: :class:`~bokeh.core.properties.Enum`
.. |enumeration| replace:: :func:`~bokeh.core.enums.enumeration`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import (
Any,
Iterator,
Literal,
get_args,
)
# Bokeh imports
from .. import colors, palettes
from ..util.strings import nice_join
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Align',
'Anchor',
'AngleUnits',
'AutosizeMode',
'ButtonType',
'CalendarPosition',
'ContextWhich',
'CoordinateUnits',
'DashPattern',
'DateFormat',
'DatetimeUnits',
'Dimension',
'Dimensions',
'Direction',
'Enumeration',
'enumeration',
'FlowMode',
'FontStyle',
'HAlign',
'HatchPattern',
'HatchPatternAbbreviation',
'HoldPolicy',
'HorizontalLocation',
'ImageOrigin',
'JitterRandomDistribution',
'LatLon',
'LegendClickPolicy',
'LegendLocation',
'LineCap',
'LineDash',
'LineJoin',
'Location',
'MapType',
'MarkerType',
'NamedColor',
'NumeralLanguage',
'Orientation',
'OutputBackend',
'PaddingUnits',
'Palette',
'Place',
'RenderLevel',
'ResetPolicy',
'RoundingFunction',
'ScrollbarPolicy',
'SelectionMode',
'SizingMode',
'SizingPolicy',
'SortDirection',
'SpatialUnits',
'StartEnd',
'StepMode',
'TextAlign',
'TextBaseline',
'TextureRepetition',
'TickLabelOrientation',
'ToolIcon',
'TooltipAttachment',
'TooltipFieldFormatter',
'TrackPolicy',
'VAlign',
'VerticalAlign',
'VerticalLocation',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Enumeration:
''' Represent an enumerated collection of values.
.. note::
Instances of ``Enumeration`` typically should not be constructed
directly. Instead, use the |enumeration| function.
'''
__slots__ = ()
_values: list[str]
_default: str
_case_sensitive: bool
_quote: bool
def __iter__(self) -> Iterator[str]:
return iter(self._values)
def __contains__(self, value: str) -> bool:
if not self._case_sensitive:
value = value.lower()
return value in self._values
def __str__(self) -> str:
fn = repr if self._quote else str
return f"Enumeration({', '.join(fn(x) for x in self._values)})"
def __len__(self) -> int:
return len(self._values)
__repr__ = __str__
def enumeration(*values: Any, case_sensitive: bool = True, quote: bool = False) -> Enumeration:
''' Create an |Enumeration| object from a sequence of values.
Call ``enumeration`` with a sequence of (unique) strings to create an
Enumeration object:
.. code-block:: python
#: Specify the horizontal alignment for rendering text
TextAlign = enumeration("left", "right", "center")
Args:
values (str) : string enumeration values, passed as positional arguments
The order of arguments is the order of the enumeration, and the
first element will be considered the default value when used
to create |Enum| properties.
Keyword Args:
case_sensitive (bool, optional) :
Whether validation should consider case or not (default: True)
quote (bool, optional):
Whether values should be quoted in the string representations
(default: False)
Raises:
ValueError if values empty, if any value is not a string or not unique
Returns:
Enumeration
'''
if len(values) == 1 and hasattr(values[0], "__args__"):
values = get_args(values[0])
if not (values and all(isinstance(value, str) and value for value in values)):
raise ValueError(f"expected a non-empty sequence of strings, got {nice_join(values)}")
if len(values) != len(set(values)):
raise ValueError(f"enumeration items must be unique, got {nice_join(values)}")
attrs: dict[str, Any] = {value: value for value in values}
attrs.update({
"_values": list(values),
"_default": values[0],
"_case_sensitive": case_sensitive,
"_quote": quote,
})
return type("Enumeration", (Enumeration,), attrs)()
#: Alignment (vertical or horizontal) of a child item
Align = enumeration("start", "center", "end")
#: Horizontal alignment of a child item
HAlign = enumeration("top", "center", "bottom")
#: Vertical alignment of a child item
VAlign = enumeration("left", "center", "right")
#: Specify an anchor position on a box/frame
Anchor = enumeration(
"top_left", "top_center", "top_right",
"center_left", "center_center", "center_right",
"bottom_left", "bottom_center", "bottom_right",
"top", "left", "center", "right", "bottom",
)
#: Specify the units for an angle value
AngleUnits = enumeration("deg", "rad", "grad", "turn")
#: Specify autosize mode for DataTable
AutosizeMode = enumeration("fit_columns", "fit_viewport", "force_fit", "none")
#: Specify a style for button widgets
ButtonType = enumeration("default", "primary", "success", "warning", "danger", "light")
#: Specify a position for the DatePicker calendar to display
CalendarPosition = enumeration("auto", "above", "below")
#: Specify which tick to add additional context to
ContextWhich = enumeration("start", "center", "end", "all")
#: Specify units for mapping coordinates
CoordinateUnits = enumeration("canvas", "screen", "data")
#: Specify a named dashing patter for stroking lines
DashPattern = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
#: Specify a format for printing dates
DateFormat = enumeration("ATOM", "W3C", "RFC-3339", "ISO-8601", "COOKIE", "RFC-822",
"RFC-850", "RFC-1036", "RFC-1123", "RFC-2822", "RSS", "TIMESTAMP")
#: Specify a date/time scale
DatetimeUnits = enumeration("microseconds", "milliseconds", "seconds", "minsec",
"minutes", "hourmin", "hours", "days", "months", "years")
#: Specify a vertical/horizontal dimension
Dimension = enumeration("width", "height")
#: Specify a vertical/horizontal dimensions
DimensionsType = Literal["width", "height", "both"]
Dimensions = enumeration("width", "height", "both")
#: Specify a stroke direction for circles, wedges, etc.
Direction = enumeration("clock", "anticlock")
#: Specify the flow behavior in CSS layouts.
FlowMode = enumeration("block", "inline")
#: Specify the font style for rendering text
FontStyle = enumeration("normal", "italic", "bold", "bold italic")
_hatch_patterns = (
(" ", "blank"),
(".", "dot"),
("o", "ring"),
("-", "horizontal_line"),
("|", "vertical_line"),
("+", "cross"),
('"', "horizontal_dash"),
(":", "vertical_dash"),
("@", "spiral"),
("/", "right_diagonal_line"),
("\\", "left_diagonal_line"),
("x", "diagonal_cross"),
(",", "right_diagonal_dash"),
("`", "left_diagonal_dash"),
("v", "horizontal_wave"),
(">", "vertical_wave"),
("*", "criss_cross"),
)
#: Specify one of the built-in patterns for hatching fills
HatchPattern = enumeration(*list(zip(*_hatch_patterns))[1])
#: Specify one of the built-in patterns for hatching fills with a one-letter abbreviation
#:
#: The abbreviations are mapped as follows:
#:
#: .. code-block:: none
#:
#: " " : blank
#: "." : dot
#: "o" : ring
#: "-" : horizontal_line
#: "|" : vertical_line
#: "+" : cross
#: '"' : horizontal_dash
#: ":" : vertical_dash
#: "@" : spiral
#: "/" : right_diagonal_line
#: "\\" : left_diagonal_line
#: "x" : diagonal_cross
#: "," : right_diagonal_dash
#: "`" : left_diagonal_dash
#: "v" : horizontal_wave
#: ">" : vertical_wave
#: "*" : criss_cross
HatchPatternAbbreviation = enumeration(*list(zip(*_hatch_patterns))[0], quote=True)
#: Specify whether events should be combined or collected as-is when a Document hold is in effect
HoldPolicyType = Literal["combine", "collect"]
HoldPolicy = enumeration(HoldPolicyType)
#: Specify a horizontal location in plot layouts
HorizontalLocation = enumeration("left", "right")
#: Defines the coordinate space within an image
ImageOrigin = enumeration("bottom_left", "top_left", "bottom_right", "top_right")
#: Specify a distribution to use for the Jitter class
JitterRandomDistributionType = Literal["uniform", "normal"]
JitterRandomDistribution = enumeration(JitterRandomDistributionType)
#: Specify whether a dimension or coordinate is latitude or longitude
LatLon = enumeration("lat", "lon")
#: Specify how a legend should respond to click events
LegendClickPolicy = enumeration("none", "hide", "mute")
#: Specify a fixed location for a Bokeh legend
LegendLocation = Anchor
#: Specify how stroked lines should be terminated
LineCap = enumeration("butt", "round", "square")
#: Specify a named dash pattern for stroking lines
LineDash = enumeration("solid", "dashed", "dotted", "dotdash", "dashdot")
#: Specify how stroked lines should be joined together
LineJoin = enumeration("miter", "round", "bevel")
#: Specify a location in plot layouts
LocationType = Literal["above", "below", "left", "right"]
Location = enumeration(LocationType)
#: Specify a style for a Google map
MapType = enumeration("satellite", "roadmap", "terrain", "hybrid")
#: Specify one of the built-in marker types
MarkerType = enumeration(
"asterisk", "circle", "circle_cross", "circle_dot", "circle_x",
"circle_y", "cross", "dash", "diamond", "diamond_cross", "diamond_dot",
"dot", "hex", "hex_dot", "inverted_triangle", "plus", "square",
"square_cross", "square_dot", "square_pin", "square_x", "star", "star_dot",
"triangle", "triangle_dot", "triangle_pin", "x", "y"
)
#: Specify one of the CSS4 named colors (https://www.w3.org/TR/css-color-4/#named-colors)
NamedColor = enumeration(*colors.named.__all__, case_sensitive=False)
#: Specify a locale for printing numeric values
NumeralLanguage = enumeration("be-nl", "chs", "cs", "da-dk", "de-ch", "de", "en",
"en-gb", "es-ES", "es", "et", "fi", "fr-CA", "fr-ch",
"fr", "hu", "it", "ja", "nl-nl", "pl", "pt-br",
"pt-pt", "ru", "ru-UA", "sk", "th", "tr", "uk-UA")
#: Specify a vertical/horizontal orientation for something
Orientation = enumeration("horizontal", "vertical")
#: Specify an output backend to render a plot area onto
OutputBackend = enumeration("canvas", "svg", "webgl")
#: Whether range padding should be interpreted a percentage or and absolute quantity
PaddingUnits = enumeration("percent", "absolute")
#: Specify the name of a palette from :ref:`bokeh.palettes`
Palette = enumeration(*palettes.__palettes__)
#:
PlaceType = Literal["above", "below", "left", "right", "center"]
Place = enumeration(PlaceType)
#: Specify a position in the render order for a Bokeh renderer
RenderLevel = enumeration("image", "underlay", "glyph", "guide", "annotation", "overlay")
#: What reset actions should occur on a Plot reset
ResetPolicy = enumeration("standard", "event_only")
#: Specify a policy for how numbers should be rounded
RoundingFunction = enumeration("round", "nearest", "floor", "rounddown", "ceil", "roundup")
#: Scrollbar policies
ScrollbarPolicy = enumeration("auto", "visible", "hidden")
#: Selection modes
SelectionMode = enumeration("replace", "append", "intersect", "subtract")
#: Sizing mode policies
SizingModeType = Literal["stretch_width", "stretch_height", "stretch_both", "scale_width", "scale_height", "scale_both", "fixed", "inherit"]
SizingMode = enumeration(SizingModeType)
#: Individual sizing mode policies
SizingPolicy = enumeration("fixed", "fit", "min", "max")
#: Specify sorting directions
SortDirection = enumeration("ascending", "descending")
#: Specify units for mapping values
SpatialUnits = enumeration("screen", "data")
#: Specify a start/end value
StartEnd = enumeration("start", "end")
#: Specify a mode for stepwise interpolation
StepMode = enumeration("before", "after", "center")
#: Specify the horizontal alignment for rendering text
TextAlign = enumeration("left", "right", "center")
#: Specify the baseline location for rendering text
TextBaseline = enumeration("top", "middle", "bottom", "alphabetic", "hanging", "ideographic")
#: Specify how textures used as canvas patterns should repeat
TextureRepetition = enumeration("repeat", "repeat_x", "repeat_y", "no_repeat")
#: Specify how axis tick labels are oriented with respect to the axis
TickLabelOrientation = enumeration("horizontal", "vertical", "parallel", "normal")
#: Well known tool icon names
ToolIcon = enumeration(
"append_mode",
"box_edit",
"box_select",
"box_zoom",
"clear_selection",
"copy",
"crosshair",
"freehand_draw",
"help",
"hover",
"intersect_mode",
"lasso_select",
"line_edit",
"pan",
"point_draw",
"poly_draw",
"poly_edit",
"polygon_select",
"range",
"redo",
"replace_mode",
"reset",
"save",
"subtract_mode",
"tap_select",
"undo",
"wheel_pan",
"wheel_zoom",
"xpan",
"ypan",
"zoom_in",
"zoom_out",
)
#: Specify an attachment for tooltips
TooltipAttachment = enumeration("horizontal", "vertical", "left", "right", "above", "below")
#: Specify how a format string for a tooltip field should be interpreted
TooltipFieldFormatter = enumeration("numeral", "datetime", "printf")
#: Grid track (row/column) sizing policies
TrackPolicy = enumeration("auto", "min", "max", "flex", "fixed")
#: Specify the vertical alignment for rendering text
VerticalAlign = enumeration("top", "middle", "bottom")
#: Specify a vertical location in plot layouts
VerticalLocation = enumeration("above", "below")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | fa726da78d6bdfa83d94371133464bd4 | 29.66729 | 140 | 0.602609 | 3.94399 | false | false | false | false |
bokeh/bokeh | examples/server/app/server_auth/auth.py | 1 | 2288 | '''
Bokeh server authentication hooks are building blocks that can be used by
experienced users to implement any authentication flow they require. This
example is a "toy" example that is only intended to demonstrate how those
building blocks fit together. It should not be used as-is for "production"
use. Users looking for pre-built auth flows that work out of the box should
consider a higher level tool, such as Panel:
https://panel.holoviz.org/user_guide/Authentication.html
'''
import tornado
from tornado.web import RequestHandler
# could define get_user_async instead
def get_user(request_handler):
return request_handler.get_cookie("user")
# could also define get_login_url function (but must give up LoginHandler)
login_url = "/login"
# optional login page for login_url
class LoginHandler(RequestHandler):
def get(self):
try:
errormessage = self.get_argument("error")
except Exception:
errormessage = ""
self.render("login.html", errormessage=errormessage)
def check_permission(self, username, password):
# !!!
# !!! This code below is a toy demonstration of the API, and not
# !!! intended for "real" use. A real app should use these APIs
# !!! to connect Oauth or some other established auth workflow.
# !!!
if username == "bokeh" and password == "bokeh":
return True
return False
def post(self):
username = self.get_argument("username", "")
password = self.get_argument("password", "")
auth = self.check_permission(username, password)
if auth:
self.set_current_user(username)
self.redirect("/")
else:
error_msg = "?error=" + tornado.escape.url_escape("Login incorrect")
self.redirect(login_url + error_msg)
def set_current_user(self, user):
if user:
self.set_cookie("user", tornado.escape.json_encode(user))
else:
self.clear_cookie("user")
# optional logout_url, available as curdoc().session_context.logout_url
logout_url = "/logout"
# optional logout handler for logout_url
class LogoutHandler(RequestHandler):
def get(self):
self.clear_cookie("user")
self.redirect("/")
| bsd-3-clause | 0c1e5b45e1aa282424e05f6716a54c44 | 32.15942 | 80 | 0.658217 | 4.10772 | false | false | false | false |
bokeh/bokeh | src/bokeh/server/auth_provider.py | 1 | 9657 | ''' Provide a hook for supplying authorization mechanisms to a Bokeh server.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import importlib.util
from os.path import isfile
from types import ModuleType
from typing import (
TYPE_CHECKING,
Awaitable,
Callable,
NewType,
Type,
)
# External imports
from tornado.httputil import HTTPServerRequest
from tornado.web import RequestHandler
# Bokeh imports
from ..util.serialization import make_globally_unique_id
if TYPE_CHECKING:
from ..core.types import PathLike
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'AuthModule',
'AuthProvider',
'NullAuth'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
User = NewType("User", object)
class AuthProvider:
''' Abstract base class for implementing authorization hooks.
Subclasses must supply one of: ``get_user`` or ``get_user_async``.
Subclasses must also supply one of ``login_url`` or ``get_login_url``.
Optionally, if ``login_url`` provides a relative URL, then ``login_handler``
may also be supplied.
The properties ``logout_url`` and ``get_logout_handler`` are analogous to
the corresponding login properties, and are optional.
.. autoclasstoc::
'''
def __init__(self) -> None:
self._validate()
@property
def endpoints(self) -> list[tuple[str, Type[RequestHandler]]]:
''' URL patterns for login/logout endpoints.
'''
endpoints: list[tuple[str, Type[RequestHandler]]] = []
if self.login_handler:
assert self.login_url is not None
endpoints.append((self.login_url, self.login_handler))
if self.logout_handler:
assert self.logout_url is not None
endpoints.append((self.logout_url, self.logout_handler))
return endpoints
@property
def get_login_url(self) -> Callable[[HTTPServerRequest], str] | None:
''' A function that computes a URL to redirect unathenticated users
to for login.
This property may return None, if a ``login_url`` is supplied
instead.
If a function is returned, it should accept a ``RequestHandler``
and return a login URL for unathenticated users.
'''
pass
@property
def get_user(self) -> Callable[[HTTPServerRequest], User] | None:
''' A function to get the current authenticated user.
This property may return None, if a ``get_user_async`` function is
supplied instead.
If a function is returned, it should accept a ``RequestHandler``
and return the current authenticated user.
'''
pass
@property
def get_user_async(self) -> Callable[[HTTPServerRequest], Awaitable[User]] | None:
''' An async function to get the current authenticated user.
This property may return None, if a ``get_user`` function is supplied
instead.
If a function is returned, it should accept a ``RequestHandler``
and return the current authenticated user.
'''
pass
@property
def login_handler(self) -> Type[RequestHandler] | None:
''' A request handler class for a login page.
This property may return None, if ``login_url`` is supplied
instead.
If a class is returned, it must be a subclass of RequestHandler,
which will used for the endpoint specified by ``logout_url``
'''
pass
@property
def login_url(self) -> str | None:
''' A URL to redirect unauthenticated users to for login.
This proprty may return None, if a ``get_login_url`` function is
supplied instead.
'''
pass
@property
def logout_handler(self) -> Type[RequestHandler] | None:
''' A request handler class for a logout page.
This property may return None.
If a class is returned, it must be a subclass of RequestHandler,
which will used for the endpoint specified by ``logout_url``
'''
pass
@property
def logout_url(self) -> str | None:
''' A URL to redirect authenticated users to for logout.
This proprty may return None.
'''
pass
def _validate(self) -> None:
if self.get_user and self.get_user_async:
raise ValueError("Only one of get_user or get_user_async should be supplied")
if (self.get_user or self.get_user_async) and not (self.login_url or self.get_login_url):
raise ValueError("When user authentication is enabled, one of login_url or get_login_url must be supplied")
if self.login_url and self.get_login_url:
raise ValueError("At most one of login_url or get_login_url should be supplied")
if self.login_handler and self.get_login_url:
raise ValueError("LoginHandler cannot be used with a get_login_url() function")
if self.login_handler and not issubclass(self.login_handler, RequestHandler):
raise ValueError("LoginHandler must be a Tornado RequestHandler")
if self.login_url and not probably_relative_url(self.login_url):
raise ValueError("LoginHandler can only be used with a relative login_url")
if self.logout_handler and not issubclass(self.logout_handler, RequestHandler):
raise ValueError("LogoutHandler must be a Tornado RequestHandler")
if self.logout_url and not probably_relative_url(self.logout_url):
raise ValueError("LogoutHandler can only be used with a relative logout_url")
class AuthModule(AuthProvider):
''' An AuthProvider configured from a Python module.
The following properties return the corresponding values from the module if
they exist, or None otherwise:
* ``get_login_url``,
* ``get_user``
* ``get_user_async``
* ``login_url``
* ``logout_url``
The ``login_handler`` property will return a ``LoginHandler`` class from the
module, or None otherwise.
The ``logout_handler`` property will return a ``LogoutHandler`` class from
the module, or None otherwise.
.. autoclasstoc::
'''
def __init__(self, module_path: PathLike) -> None:
if not isfile(module_path):
raise ValueError(f"no file exists at module_path: {module_path!r}")
self._module = load_auth_module(module_path)
super().__init__()
@property
def get_user(self):
return getattr(self._module, 'get_user', None)
@property
def get_user_async(self):
return getattr(self._module, 'get_user_async', None)
@property
def login_url(self):
return getattr(self._module, 'login_url', None)
@property
def get_login_url(self):
return getattr(self._module, 'get_login_url', None)
@property
def login_handler(self):
return getattr(self._module, 'LoginHandler', None)
@property
def logout_url(self):
return getattr(self._module, 'logout_url', None)
@property
def logout_handler(self):
return getattr(self._module, 'LogoutHandler', None)
class NullAuth(AuthProvider):
''' A default no-auth AuthProvider.
All of the properties of this provider return None.
.. autoclasstoc::
'''
@property
def get_user(self):
return None
@property
def get_user_async(self):
return None
@property
def login_url(self):
return None
@property
def get_login_url(self):
return None
@property
def login_handler(self):
return None
@property
def logout_url(self):
return None
@property
def logout_handler(self):
return None
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def load_auth_module(module_path: PathLike) -> ModuleType:
''' Load a Python source file at a given path as a module.
Arguments:
module_path (str): path to a Python source file
Returns
module
'''
module_name = "bokeh.auth_" + make_globally_unique_id().replace('-', '')
spec = importlib.util.spec_from_file_location(module_name, module_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def probably_relative_url(url: str) -> bool:
''' Return True if a URL is not one of the common absolute URL formats.
Arguments:
url (str): a URL string
Returns
bool
'''
return not url.startswith(("http://", "https://", "//"))
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | f861a8296be9223b59e724cd7163cf71 | 28.622699 | 119 | 0.567257 | 4.631655 | false | false | false | false |
bokeh/bokeh | docs/bokeh/docserver.py | 1 | 2689 | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
""" Basic webserver for developing Bokeh documentation locally.
Executing this script will automatically open a browser tab to the locally
built documentation index page.
This script can be run manually:
python docserver.py
or more commonly via executing ``make serve`` in this directory. It is possible
to combine this usage with other make targets in a single invovation, e.g.
make clean html serve
will clean any previous docs output, build all the documentation from scratch,
and then run this script to serve and display the results.
For more information about building Bokeh's documentation, see the Developer's
Guide:
https://docs.bokeh.org/en/latest/docs/dev_guide.html
"""
import threading
import time
import webbrowser
from pathlib import Path
import flask
from flask import redirect, url_for
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.wsgi import WSGIContainer
from bokeh.util.tornado import fixup_windows_event_loop_policy
IOLOOP = None
HOST = "localhost"
PORT = 5009
VISIT_URL = f"http://{HOST}:{PORT}/en/latest/index.html"
SPHINX_TOP = Path(__file__).resolve().parent
app = flask.Flask(__name__, static_folder="/unused")
@app.route("/")
def root():
return redirect(url_for("en/latest/index.html"))
@app.route("/switcher.json")
def switcher():
return flask.send_from_directory(SPHINX_TOP, "switcher.json")
@app.route("/en/latest/<path:filename>")
def docs(filename):
return flask.send_from_directory(SPHINX_TOP / "build" / "html", filename)
def open_browser():
webbrowser.open(VISIT_URL, new=2)
def serve_http():
global IOLOOP
IOLOOP = IOLoop().current()
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(PORT)
IOLOOP.start()
if __name__ == "__main__":
fixup_windows_event_loop_policy()
print(f"\nStarting Bokeh plot server on port {PORT}...")
print(f"Visit {VISIT_URL} to see plots\n")
server = threading.Thread(target=serve_http)
server.start()
time.sleep(0.5)
browser = threading.Thread(target=open_browser)
browser.start()
try:
input("Press <ENTER> to exit...\n") # lgtm [py/use-of-input]
except KeyboardInterrupt:
pass
IOLOOP.add_callback(IOLOOP.stop) # type: ignore
server.join()
browser.join()
print("Server shut down.")
| bsd-3-clause | 6154199bd9913ef152d83b7ad9bc56dc | 25.106796 | 79 | 0.671253 | 3.766106 | false | false | false | false |
bokeh/bokeh | src/bokeh/util/version.py | 1 | 2944 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a version for the Bokeh library.
This module uses `versioneer`_ to manage version strings. During development,
`versioneer`_ will compute a version string from the current git revision.
For packaged releases based off tags, the version string is hard coded in the
files packaged for distribution.
Attributes:
__version__:
The full version string for this installed Bokeh library
Functions:
base_version:
Return the base version string, without any "dev", "rc" or local build
information appended.
is_full_release:
Return whether the current installed version is a full release.
.. _versioneer: https://github.com/warner/python-versioneer
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .. import __version__
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'base_version',
'is_full_release',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def base_version() -> str:
return _base_version_helper(__version__)
def is_full_release(version: str | None = None) -> bool:
import re
version = version or __version__
VERSION_PAT = re.compile(r"^(\d+\.\d+\.\d+)$")
return bool(VERSION_PAT.match(version))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _base_version_helper(version: str) -> str:
import re
VERSION_PAT = re.compile(r"^(\d+\.\d+\.\d+)((?:\.dev|\.rc).*)?")
match = VERSION_PAT.search(version)
assert match is not None
return match.group(1)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 99638c92cdbd1582785472f98bc8ce2f | 34.047619 | 78 | 0.388587 | 5.995927 | false | false | false | false |
bokeh/bokeh | src/bokeh/sphinxext/bokeh_enum.py | 1 | 4423 | # -----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
""" Thoroughly document Bokeh enumerations
The ``bokeh-enum`` directive generates useful documentation for enumerations,
including all the allowable values. If the number of values is large, the full
list is put in a collapsible code block.
This directive takes the name of a Bokeh enum variable as the argument and the
module name as an option. An optional description may be added as content:
.. code-block:: rest
.. bokeh-enum:: baz
:module: bokeh.sphinxext.sample
Specify a baz style
Examples
--------
The directive above will generate the following output:
.. bokeh-enum:: baz
:module: bokeh.sphinxext.sample
Specify a baz style
Although ``bokeh-enum`` may be used explicitly, it is more often convenient in
conjunction with the :ref:`bokeh.sphinxext.bokeh_autodoc` extension. Together,
the same output above will be generated directly from the following code:
.. code-block:: python
#: Specify a baz style
baz = enumeration("a", "b", "c")
"""
# -----------------------------------------------------------------------------
# Boilerplate
# -----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
# Standard library imports
import importlib
import textwrap
# External imports
from docutils.parsers.rst.directives import unchanged
from sphinx.errors import SphinxError
# Bokeh imports
from . import PARALLEL_SAFE
from .bokeh_directive import BokehDirective
from .templates import ENUM_DETAIL
# -----------------------------------------------------------------------------
# Globals and constants
# -----------------------------------------------------------------------------
__all__ = (
"BokehEnumDirective",
"setup",
)
# -----------------------------------------------------------------------------
# General API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Dev API
# -----------------------------------------------------------------------------
class BokehEnumDirective(BokehDirective):
has_content = True
required_arguments = 1
option_spec = {
"module": unchanged,
"noindex": lambda x: True, # directives.flag weirdly returns None
}
def run(self):
enum_name = self.arguments[0]
module_name = self.options["module"]
try:
module = importlib.import_module(module_name)
except ImportError:
raise SphinxError(f"Could not generate reference docs for {enum_name!r}: could not import module {module_name}")
enum = getattr(module, enum_name, None)
fullrepr = repr(enum)
if len(fullrepr) > 180:
shortrepr = f"{fullrepr[:40]} .... {fullrepr[-40:]}"
fullrepr = _wrapper.wrap(fullrepr)
else:
shortrepr = fullrepr
fullrepr = None
rst_text = ENUM_DETAIL.render(
name=enum_name,
module=self.options["module"],
noindex=self.options.get("noindex", False),
content=self.content,
shortrepr=shortrepr,
fullrepr=fullrepr,
)
return self.parse(rst_text, "<bokeh-enum>")
def setup(app):
""" Required Sphinx extension setup function. """
app.add_directive_to_domain("py", "bokeh-enum", BokehEnumDirective)
return PARALLEL_SAFE
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
_wrapper = textwrap.TextWrapper(subsequent_indent=" ")
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
| bsd-3-clause | edb9bde6e36579a66a407e74b348345d | 30.147887 | 124 | 0.474791 | 5.173099 | false | false | false | false |
bokeh/bokeh | src/bokeh/__init__.py | 1 | 3755 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Bokeh is a Python library for creating interactive visualizations for modern
web browsers.
Bokeh helps you build beautiful graphics, ranging from simple plots to complex
dashboards with streaming datasets. With Bokeh, you can create JavaScript-powered
visualizations without writing any JavaScript yourself.
Most of the functionality of Bokeh is accessed through submodules such as
|bokeh.plotting| and |bokeh.models|.
For full documentation, please visit https://docs.bokeh.org
----
The top-level ``bokeh`` module itself contains a few useful functions and
attributes:
.. attribute:: __version__
:annotation: = currently installed version of Bokeh
.. autofunction:: bokeh.license
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import importlib.metadata as importlib_metadata
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'__version__',
'license',
'sampledata',
)
__version__ = importlib_metadata.version("bokeh")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def license():
''' Print the Bokeh license to the console.
Returns:
None
'''
from pathlib import Path
with open(Path(__file__).parent / 'LICENSE.txt') as lic:
print(lic.read())
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
del importlib_metadata
# expose sample data module
from . import sampledata # isort:skip
# configure Bokeh logger
from .util import logconfig # isort:skip
del logconfig
# Configure warnings to always show nice mssages, despite Python's active
# efforts to hide them from users.
import warnings # isort:skip
from .util.warnings import BokehDeprecationWarning, BokehUserWarning # isort:skip
warnings.simplefilter('always', BokehDeprecationWarning)
warnings.simplefilter('always', BokehUserWarning)
original_formatwarning = warnings.formatwarning
def _formatwarning(message, category, filename, lineno, line=None):
from .util.warnings import BokehDeprecationWarning, BokehUserWarning
if category not in (BokehDeprecationWarning, BokehUserWarning):
return original_formatwarning(message, category, filename, lineno, line)
return "%s: %s\n" % (category.__name__, message)
warnings.formatwarning = _formatwarning
del _formatwarning
del BokehDeprecationWarning, BokehUserWarning
del warnings
| bsd-3-clause | 1dd085b7e4d2fe60679652b2128d4694 | 32.828829 | 81 | 0.498269 | 5.74159 | false | false | false | false |
bokeh/bokeh | examples/reference/models/MultiPolygons.py | 1 | 1399 | from bokeh.io import curdoc, show
from bokeh.models import ColumnDataSource, Grid, LinearAxis, MultiPolygons, Plot
xs_dict = [
[ {'exterior': [1, 1, 2, 2], 'holes': [ ]} ],
[ {'exterior': [1, 1, 3], 'holes': [ [1.5, 1.5, 2] ]} ],
[ {'exterior': [2, 2, 4, 4], 'holes': [ [2.5, 2.5, 3], [3.5, 3, 3] ]},
{'exterior': [3.5, 3.5, 4], 'holes': [ ]} ]
]
ys_dict = [
[ {'exterior': [4, 3, 3, 4], 'holes': [ ]} ],
[ {'exterior': [1, 3, 1], 'holes': [ [1.5, 2, 1.5] ]} ],
[ {'exterior': [2, 4, 4, 2], 'holes': [ [3, 3.5, 3.5], [2.5, 2.5, 3] ]},
{'exterior': [1, 1.5, 1.5], 'holes': [ ]} ]
]
xs = [[[p['exterior'], *p['holes']] for p in mp] for mp in xs_dict]
ys = [[[p['exterior'], *p['holes']] for p in mp] for mp in ys_dict]
source = ColumnDataSource(dict(xs=xs, ys=ys))
plot = Plot(
title=None, width=300, height=300,
min_border=0, toolbar_location=None)
glyph = MultiPolygons(xs="xs", ys="ys", line_width=2)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
| bsd-3-clause | bd9ddd84a76a2bcb653a6df96d295cfd | 33.121951 | 80 | 0.494639 | 2.664762 | false | false | false | false |
bokeh/bokeh | src/bokeh/application/handlers/function.py | 1 | 4966 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a Bokeh Application Handler to build up documents by running
a specified Python function.
This Handler is not used by the Bokeh server command line tool, but is often
useful if users wish to embed the Bokeh server programmatically:
.. code-block:: python
def make_doc(doc: Document):
# do work to modify the document, add plots, widgets, etc.
return doc
app = Application(FunctionHandler(make_doc))
server = Server({'/bkapp': app}, io_loop=IOLoop.current())
server.start()
For complete examples of this technique, see :bokeh-tree:`examples/server/api`
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from typing import Callable
# Bokeh imports
from ...document import Document
from ...util.callback_manager import _check_callback
from .handler import Handler, handle_exception
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'FunctionHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
ModifyDoc = Callable[[Document], None]
class FunctionHandler(Handler):
''' A Handler that accepts a plain python function to use for modifying
Bokeh Documents.
For example, the following code configures a handler with a function that
adds an empty plot to a Document:
.. code-block:: python
def add_empty_plot(doc: Document):
p = figure(x_range=(0, 10), y_range=(0, 10))
doc.add_root(p)
return doc
handler = FunctionHandler(add_empty_plot)
This handler could be configured on an Application, and the Application
would run this function every time a new session is created.
.. autoclasstoc::
'''
_func: ModifyDoc
_trap_exceptions: bool
_safe_to_fork: bool
def __init__(self, func: ModifyDoc, *, trap_exceptions: bool = False) -> None:
'''
Args:
func (callable) : a function to modify and return a Bokeh Document.
The function should have the form:
.. code-block:: python
def func(doc: Document):
# modify doc
return doc
and it should return the passed-in document after making any
modifications in-place.
trap_exceptions (bool) : should exceptions in `func` be caught and
logged or allowed to propagate
'''
super().__init__()
_check_callback(func, ('doc',))
self._func = func
self._trap_exceptions = trap_exceptions
self._safe_to_fork = True
# Properties --------------------------------------------------------------
@property
def safe_to_fork(self) -> bool:
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if ``modify_doc`` has already been called.
'''
return self._safe_to_fork
# Public methods ----------------------------------------------------------
def modify_document(self, doc: Document) -> None:
''' Execute the configured ``func`` to modify the document.
After this method is first executed, ``safe_to_fork`` will return
``False``.
'''
try:
self._func(doc)
except Exception as e:
if self._trap_exceptions:
handle_exception(self, e)
else:
raise
finally:
self._safe_to_fork = False
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause | 77644a70f9bc48e6933f4260b0bb9752 | 30.630573 | 82 | 0.444825 | 5.701493 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.