code
stringlengths 1
199k
|
|---|
import os
import shelve
from .common import cache_uri_build, sprite_filepath_build
CACHE_DIR = None
API_CACHE = None
SPRITE_CACHE = None
def save(data, endpoint, resource_id=None, subresource=None):
if data == dict(): # No point in saving empty data.
return None
if not isinstance(data, (dict, list)):
raise ValueError("Could not save non-dict data")
uri = cache_uri_build(endpoint, resource_id, subresource)
try:
with shelve.open(API_CACHE) as cache:
cache[uri] = data
except OSError as error:
if error.errno == 11: # Cache open by another person/program
# print('Cache unavailable, skipping save')
pass
else:
raise error
return None
def save_sprite(data, sprite_type, sprite_id, **kwargs):
abs_path = data["path"]
# Make intermediate directories; this line removes the file+extension.
dirs = abs_path.rpartition(os.path.sep)[0]
safe_make_dirs(dirs)
with open(abs_path, "wb") as img_file:
img_file.write(data["img_data"])
return None
def load(endpoint, resource_id=None, subresource=None):
uri = cache_uri_build(endpoint, resource_id, subresource)
try:
with shelve.open(API_CACHE) as cache:
return cache[uri]
except OSError as error:
if error.errno == 11:
# Cache open by another person/program
# print('Cache unavailable, skipping load')
raise KeyError("Cache could not be opened.")
else:
raise
def load_sprite(sprite_type, sprite_id, **kwargs):
abs_path = get_sprite_path(sprite_type, sprite_id, **kwargs)
with open(abs_path, "rb") as img_file:
img_data = img_file.read()
return dict(img_data=img_data, path=abs_path)
def safe_make_dirs(path, mode=0o777):
"""Create a leaf directory and all intermediate ones in a safe way.
A wrapper to os.makedirs() that handles existing leaf directories while
avoiding os.path.exists() race conditions.
:param path: relative or absolute directory tree to create
:param mode: directory permissions in octal
:return: The newly-created path
"""
try:
os.makedirs(path, mode)
except OSError as error:
if error.errno != 17: # File exists
raise
return path
def get_default_cache():
"""Get the default cache location.
Adheres to the XDG Base Directory specification, as described in
https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
:return: the default cache directory absolute path
"""
xdg_cache_home = os.environ.get("XDG_CACHE_HOME") or os.path.join(
os.path.expanduser("~"), ".cache"
)
return os.path.join(xdg_cache_home, "pokebase")
def get_sprite_path(sprite_type, sprite_id, **kwargs):
rel_filepath = sprite_filepath_build(sprite_type, sprite_id, **kwargs)
abs_path = os.path.join(SPRITE_CACHE, rel_filepath)
return abs_path
def set_cache(new_path=None):
"""Simple function to change the cache location.
`new_path` can be an absolute or relative path. If the directory does not
exist yet, this function will create it. If None it will set the cache to
the default cache directory.
If you are going to change the cache directory, this function should be
called at the top of your script, before you make any calls to the API.
This is to avoid duplicate files and excess API calls.
:param new_path: relative or absolute path to the desired new cache
directory
:return: str, str
"""
global CACHE_DIR, API_CACHE, SPRITE_CACHE
if new_path is None:
new_path = get_default_cache()
CACHE_DIR = safe_make_dirs(os.path.abspath(new_path))
API_CACHE = os.path.join(CACHE_DIR, "api.cache")
SPRITE_CACHE = safe_make_dirs(os.path.join(CACHE_DIR, "sprite"))
return CACHE_DIR, API_CACHE, SPRITE_CACHE
CACHE_DIR, API_CACHE, SPRITE_CACHE = set_cache()
|
from __future__ import unicode_literals
from functools import update_wrapper
from hashlib import md5
from django.conf.urls import url
from django.contrib.admin import ModelAdmin as DjangoModelAdmin
from django.contrib.admin.utils import flatten_fieldsets, model_format_dict
from django.contrib.auth import get_permission_codename
from django.db import models
from django.db import transaction
from django.db.models.fields import BLANK_CHOICE_DASH, FieldDoesNotExist
from django.utils import six
from django.utils.encoding import smart_bytes
from yepes.admin import actions, operations
from yepes.admin.views import (
CsvExportView,
JsonExportView,
TsvExportView,
YamlExportView,
MassUpdateView,
)
from yepes.contrib.datamigrations import serializers
class ModelAdmin(DjangoModelAdmin):
def get_action_choices(self, request, default_choices=BLANK_CHOICE_DASH):
# Change the string formatting operation to the ``str.format()`` method.
choices = [] + default_choices
model_verbose_names = model_format_dict(self.opts)
for func, name, description in six.itervalues(self.get_actions(request)):
choice = (name, description.format(**model_verbose_names))
choices.append(choice)
return choices
def get_actions(self, request):
acts = super(ModelAdmin, self).get_actions(request)
def add_action(label):
action = getattr(actions, label)
acts[label] = (action, label, action.short_description)
if self.has_change_permission(request):
add_action('mass_update')
# Move admin site actions after mass_update.
for name, func in self.admin_site.actions:
info = acts.pop(name, None)
if (info is not None
and (name != 'delete_selected'
or self.has_delete_permission(request))):
acts[name] = info
if self.has_view_permission(request):
if serializers.has_serializer('csv'):
add_action('export_csv')
if serializers.has_serializer('json'):
add_action('export_json')
if serializers.has_serializer('tsv'):
add_action('export_tsv')
if serializers.has_serializer('yaml'):
add_action('export_yaml')
return acts
def get_field_operations(self, request, field):
ops = [operations.Set]
if not field.choices and field.remote_field is None:
if field.null:
ops.append(
operations.SetNull,
)
if isinstance(field, models.CharField):
ops.extend([
operations.Lower,
operations.Upper,
operations.Capitalize,
operations.Title,
operations.SwapCase,
operations.Strip,
])
elif isinstance(field, (models.IntegerField,
models.FloatField,
models.DecimalField)):
ops.extend([
operations.Add,
operations.Sub,
operations.Mul,
operations.Div,
])
elif isinstance(field, (models.BooleanField,
models.NullBooleanField)):
ops.append(
operations.Swap,
)
return ops
def get_formfields(self, request, unique=False, **kwargs):
field_names = flatten_fieldsets(self.get_fieldsets(request))
readonly_fields = self.get_readonly_fields(request)
opts = self.opts
db_fields = []
for field_name in field_names:
if field_name in readonly_fields:
continue
try:
field = opts.get_field(field_name)
except FieldDoesNotExist:
continue
if not field.editable or (field.unique and not unique):
continue
db_fields.append(field)
form_fields = []
for dbfield in db_fields:
formfield = self.formfield_for_dbfield(
dbfield,
request=request,
**kwargs)
if formfield is None:
continue
form_fields.append((dbfield, formfield))
return form_fields
def get_inline_instances(self, request, obj=None):
sup = super(ModelAdmin, self)
inline_instances = sup.get_inline_instances(request, obj)
if not self.has_change_permission(request, obj, strict=True):
for inline in inline_instances:
if inline.fieldsets:
fields = flatten_fieldsets(inline.get_fieldsets(request, obj))
else:
fields = {
f.name
for f
in inline.model._meta.get_fields()
if not (f.is_relation and f.auto_created)
}
fields.update(inline.readonly_fields)
inline.max_num = 0
inline.readonly_fields = list(fields)
return inline_instances
def get_readonly_fields(self, request, obj=None):
if self.has_change_permission(request, obj, strict=True):
return self.readonly_fields
if self.fieldsets:
fields = flatten_fieldsets(self.get_fieldsets(request, obj))
else:
fields = {
f.name
for f
in self.model._meta.get_fields()
if not (f.is_relation and f.auto_created)
}
fields.update(self.readonly_fields)
return list(fields)
def get_urls(self):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = (self.model._meta.app_label, self.model._meta.model_name)
urls = [
url(r'^export-csv/$',
wrap(CsvExportView.as_view(modeladmin=self)),
name='{0}_{1}_exportcsv'.format(*info),
),
url(r'^export-json/$',
wrap(JsonExportView.as_view(modeladmin=self)),
name='{0}_{1}_exportjson'.format(*info),
),
url(r'^export-tsv/$',
wrap(TsvExportView.as_view(modeladmin=self)),
name='{0}_{1}_exporttsv'.format(*info),
),
url(r'^export-yaml/$',
wrap(YamlExportView.as_view(modeladmin=self)),
name='{0}_{1}_exportyaml'.format(*info),
),
url(r'^mass-update/$',
wrap(MassUpdateView.as_view(modeladmin=self)),
name='{0}_{1}_massupdate'.format(*info),
),
]
return urls + super(ModelAdmin, self).get_urls()
# NOTE: Permission verification is an inexpensive task most of time.
# However, I store permissions in cache because, if they change during
# response process, they may cause problems.
def _get_cache_attribute(self, view, obj):
h = md5(smart_bytes('{0}.{1}.{2}'.format(
self.opts.app_label,
self.opts.object_name.lower(),
hash(obj))))
return '_{0}_permission_{1}'.format(view, h.hexdigest())
def has_add_permission(self, request):
attr_name = self._get_cache_attribute('add', None)
try:
permission = getattr(request, attr_name)
except AttributeError:
permission = self._has_add_permission(request)
setattr(request, attr_name, permission)
return permission
def _has_add_permission(self, request):
app_label = self.opts.app_label
codename = get_permission_codename('add', self.opts)
return request.user.has_perm('.'.join((app_label, codename)))
def has_change_permission(self, request, obj=None, strict=False):
if not strict:
return self.has_view_permission(request, obj)
attr_name = self._get_cache_attribute('change', obj)
try:
permission = getattr(request, attr_name)
except AttributeError:
permission = self._has_change_permission(request, obj)
setattr(request, attr_name, permission)
return permission
def _has_change_permission(self, request, obj):
app_label = self.opts.app_label
codename = get_permission_codename('change', self.opts)
return request.user.has_perm('.'.join((app_label, codename)))
def has_delete_permission(self, request, obj=None):
attr_name = self._get_cache_attribute('delete', obj)
try:
permission = getattr(request, attr_name)
except AttributeError:
permission = self._has_delete_permission(request, obj)
setattr(request, attr_name, permission)
return permission
def _has_delete_permission(self, request, obj):
app_label = self.opts.app_label
codename = get_permission_codename('delete', self.opts)
return request.user.has_perm('.'.join((app_label, codename)))
def has_view_permission(self, request, obj=None):
attr_name = self._get_cache_attribute('view', obj)
try:
permission = getattr(request, attr_name)
except AttributeError:
permission = self._has_view_permission(request, obj)
setattr(request, attr_name, permission)
return permission
def _has_view_permission(self, request, obj):
#app_label = self.opts.app_label
#codename = get_permission_codename('view', self.opts)
#return request.user.has_perm('.'.join((app_label, codename)))
return True
def report_change(self, request, queryset, affected_rows,
log_message, user_message, user_message_plural):
kwargs = model_format_dict(self.opts)
for obj in queryset:
self.log_change(request, obj, log_message.format(record=obj, **kwargs))
if affected_rows == 1:
kwargs['record'] = queryset.get()
self.message_user(request, user_message.format(**kwargs))
else:
kwargs['record_count'] = affected_rows
self.message_user(request, user_message_plural.format(**kwargs))
def update_queryset(self, request, queryset, ops, in_bulk=False):
if not in_bulk:
affected_rows = 0
with transaction.atomic():
for obj in queryset:
for op in ops:
op.run(obj)
obj.full_clean()
obj.save()
affected_rows += 1
return affected_rows
else:
return queryset.update(**{
op.field_name: op.as_expression()
for op
in ops
})
|
from core import kde
class DefaultProfile4(kde.KDE4Action):
"""Set Konsole's default profile."""
def arguments(self):
return [
('path', 'The relative path to a Konsole .profile file.')
]
def binary_dependencies(self):
return ['konsole']
def execute(self, path, binary = 'konsole'):
kde.writeconfig('Desktop Entry', 'DefaultProfile',
path, "%src" % binary)
return True
class DefaultProfile5(kde.KDE5Action):
"""Set Konsole's default profile."""
def arguments(self):
return [
('path', 'The relative path to a Konsole .profile file.')
]
def binary_dependencies(self):
return ['konsole']
def execute(self, path, binary = 'konsole'):
kde.writeconfig('Desktop Entry', 'DefaultProfile',
path, "%src" % binary)
return True
|
from . import access_token
from . import errors
from . import utils
from .connection import connection
from .logger import setup_logger
from .batch import Batch
from .request import Broker
from .malware import Malware
from .malware_family import MalwareFamily
from .threat_exchange_member import ThreatExchangeMember
from .threat_descriptor import ThreatDescriptor
from .threat_indicator import ThreatIndicator
from .threat_privacy_group import ThreatPrivacyGroup
from .threat_tag import ThreatTag
from .rtu import RTUListener
__title__ = 'pytx'
__version__ = '0.5.9'
__author__ = 'Mike Goffin'
__license__ = 'BSD'
__copyright__ = 'Copyright 2017 Mike Goffin'
__all__ = [
'access_token',
'connection',
'Batch',
'Broker',
'errors',
'Malware',
'MalwareFamily',
'RTUListener',
'setup_logger',
'ThreatExchangeMember',
'ThreatDescriptor',
'ThreatIndicator',
'ThreatPrivacyGroup',
'ThreatTag',
'utils',
]
|
"""
Consumer process management. Imports consumer code, manages RabbitMQ
connection state and collects stats about the consuming process.
"""
import collections
import logging
import math
import multiprocessing
import os
from os import path
import profile
import signal
import time
import warnings
from helper import config as helper_config
try:
import sprockets_influxdb as influxdb
except ImportError:
influxdb = None
import pika
from pika import exceptions, spec
from pika.adapters import tornado_connection
try:
import raven
from raven import breadcrumbs
from raven.contrib.tornado import AsyncSentryClient
except ImportError:
breadcrumbs, raven, AsyncSentryClient = None, None, None
from tornado import gen, ioloop, locks
from . import __version__, data, state, statsd, utils
LOGGER = logging.getLogger(__name__)
class Callbacks:
"""Slotted callback classes to fix namedtuple issue in 3.9"""
__slots__ = ['on_ready', 'on_connection_failure', 'on_closed',
'on_blocked', 'on_unblocked', 'on_confirmation',
'on_delivery', 'on_return']
def __init__(self, on_ready, on_connection_failure, on_closed, on_blocked,
on_unblocked, on_confirmation, on_delivery, on_return):
self.on_ready = on_ready
self.on_connection_failure = on_connection_failure
self.on_closed = on_closed
self.on_blocked = on_blocked
self.on_unblocked = on_unblocked
self.on_confirmation = on_confirmation
self.on_delivery = on_delivery
self.on_return = on_return
class Connection(state.State):
HB_INTERVAL = 300
STATE_CLOSED = 0x08
def __init__(self, name, config, consumer_name, should_consume,
publisher_confirmations, io_loop, callbacks):
super(Connection, self).__init__()
self.blocked = False
self.callbacks = callbacks
self.channel = None
self.config = config
self.should_consume = should_consume
self.consumer_tag = '{}-{}'.format(consumer_name, os.getpid())
self.io_loop = io_loop
self.name = name
self.publisher_confirm = publisher_confirmations
self.handle = self.connect()
# Override STOPPED with CLOSED
self.STATES[0x08] = 'CLOSED'
@property
def is_closed(self):
return self.is_stopped
def connect(self):
self.set_state(self.STATE_CONNECTING)
connection = tornado_connection.TornadoConnection(
self._connection_parameters,
stop_ioloop_on_close=False,
custom_ioloop=self.io_loop)
connection.add_on_close_callback(self.on_closed)
connection.add_on_open_callback(self.on_open)
connection.add_on_open_error_callback(self.on_open_error)
return connection
def shutdown(self):
if self.is_shutting_down:
LOGGER.debug('Connection %s is already shutting down', self.name)
return
self.set_state(self.STATE_SHUTTING_DOWN)
LOGGER.debug('Connection %s is shutting down', self.name)
if self.is_active:
LOGGER.debug('Connection %s is sending a Basic.Cancel to RabbitMQ',
self.name)
self.channel.basic_cancel(self.on_consumer_cancelled,
self.consumer_tag)
else:
self.channel.close()
def on_open(self, handle):
"""Invoked when the connection is opened
:type handle: pika.adapters.tornado_connection.TornadoConnection
"""
LOGGER.debug('Connection %s is open (%r)', self.name, handle)
self.handle = handle
try:
self.handle.channel(self.on_channel_open)
except exceptions.ConnectionClosed:
LOGGER.warning('Channel open on closed connection')
self.set_state(self.STATE_CLOSED)
self.callbacks.on_closed(self.name)
return
self.handle.add_on_connection_blocked_callback(self.on_blocked)
self.handle.add_on_connection_unblocked_callback(self.on_unblocked)
def on_open_error(self, *args, **kwargs):
LOGGER.error('Connection %s failure %r %r', self.name, args, kwargs)
self.on_failure()
def on_closed(self, _connection, status_code, status_text):
if self.is_connecting:
LOGGER.error('Connection %s failure while connecting (%s): %s',
self.name, status_code, status_text)
self.on_failure()
elif not self.is_closed:
self.set_state(self.STATE_CLOSED)
LOGGER.info('Connection %s closed (%s) %s',
self.name, status_code, status_text)
self.callbacks.on_closed(self.name)
def on_blocked(self, frame):
LOGGER.warning('Connection %s is blocked: %r', frame)
self.blocked = True
self.callbacks.on_blocked(self.name)
def on_unblocked(self, frame):
LOGGER.warning('Connection %s is unblocked: %r', frame)
self.blocked = False
self.callbacks.on_unblocked(self.name)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened. It
will change the state to IDLE, add the callbacks and setup the channel
to start consuming.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.debug('Connection %s channel is now open', self.name)
self.set_state(self.STATE_IDLE)
self.channel = channel
self.channel.add_on_close_callback(self.on_channel_closed)
self.channel.add_on_cancel_callback(self.on_consumer_cancelled)
self.channel.add_on_return_callback(self.on_return)
if self.publisher_confirm:
self.channel.confirm_delivery(self.on_confirmation)
self.callbacks.on_ready(self.name)
def on_channel_closed(self, _channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel _channel: The AMQP Channel
:param int reply_code: The AMQP reply code
:param str reply_text: The AMQP reply text
"""
del self.channel
if reply_code <= 0 or reply_code == 404:
LOGGER.error('Channel Error (%r): %s',
reply_code, reply_text or 'unknown')
self.on_failure()
elif self.is_shutting_down:
LOGGER.debug('Connection %s closing', self.name)
self.handle.close()
elif self.is_running:
LOGGER.warning('Connection %s channel was closed: (%s) %s',
self.name, reply_code, reply_text)
try:
self.handle.channel(self.on_channel_open)
except exceptions.ConnectionClosed as error:
LOGGER.warning('Error raised while creating new channel: %s',
error)
self.on_failure()
else:
self.set_state(self.STATE_CONNECTING)
def on_failure(self):
LOGGER.info('Connection failure, terminating connection')
self.set_state(self.STATE_CLOSED)
try:
self.handle.close()
except AttributeError:
pass
del self.handle
self.callbacks.on_connection_failure(self.name)
def consume(self, queue_name, no_ack, prefetch_count):
self.set_state(self.STATE_ACTIVE)
self.channel.basic_qos(self.on_qos_set, 0, prefetch_count, False)
self.channel.basic_consume(consumer_callback=self.on_delivery,
queue=queue_name,
no_ack=no_ack,
consumer_tag=self.consumer_tag)
def on_qos_set(self, frame):
"""Invoked by pika when the QoS is set
:param pika.frame.Frame frame: The QoS Frame
"""
LOGGER.debug('Connection %s QoS was set: %r', self.name, frame)
def on_consumer_cancelled(self, frame):
"""Invoked by pika when a ``Basic.Cancel`` or ``Basic.CancelOk``
is received.
:param pika.frame.Frame frame: The QoS Frame
"""
LOGGER.info('Connection %s consumer has been cancelled', self.name)
if not self.is_shutting_down:
self.set_state(self.STATE_SHUTTING_DOWN)
self.channel.close()
def on_confirmation(self, frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish.
:param pika.frame.Method frame: Basic.Ack or Basic.Nack frame
"""
delivered = frame.method.NAME.split('.')[1].lower() == 'ack'
LOGGER.debug('Connection %s received delivery confirmation '
'(Delivered: %s)', self.name, delivered)
self.callbacks.on_confirmation(
self.name, delivered, frame.method.delivery_tag)
def on_delivery(self, channel, method, properties, body):
self.callbacks.on_delivery(
self.name, channel, method, properties, body)
def on_return(self, channel, method, properties, body):
self.callbacks.on_return(self.name, channel, method, properties, body)
@property
def _connection_parameters(self):
"""Return connection parameters for a pika connection.
:rtype: pika.ConnectionParameters
"""
return pika.ConnectionParameters(
self.config.get('host', 'localhost'),
self.config.get('port', 5672),
self.config.get('vhost', '/'),
pika.PlainCredentials(
self.config.get('user', 'guest'),
self.config.get('password', self.config.get('pass', 'guest'))),
ssl=self.config.get('ssl', False),
frame_max=self.config.get('frame_max', spec.FRAME_MAX_SIZE),
socket_timeout=self.config.get('socket_timeout', 10),
heartbeat_interval=self.config.get(
'heartbeat_interval', self.HB_INTERVAL))
class Process(multiprocessing.Process, state.State):
"""Core process class that manages the consumer object and communicates
with RabbitMQ.
"""
AMQP_APP_ID = 'rejected/%s' % __version__
# Additional State constants
STATE_PROCESSING = 0x04
# Counter constants
ACKED = 'acked'
CLOSED_ON_COMPLETE = 'closed_on_complete'
DROPPED = 'dropped'
ERROR = 'failed'
FAILURES = 'failures_until_stop'
NACKED = 'nacked'
PROCESSED = 'processed'
REQUEUED = 'requeued'
REDELIVERED = 'redelivered'
TIME_SPENT = 'processing_time'
TIME_WAITED = 'idle_time'
CONSUMER_EXCEPTION = 'consumer_exception'
MESSAGE_EXCEPTION = 'message_exception'
PROCESSING_EXCEPTION = 'processing_exception'
UNHANDLED_EXCEPTION = 'unhandled_exception'
QOS_PREFETCH_COUNT = 1
MAX_ERROR_COUNT = 5
MAX_ERROR_WINDOW = 60
MAX_SHUTDOWN_WAIT = 5
def __init__(self,
group=None,
target=None,
name=None,
args=(),
kwargs=None):
if kwargs is None:
kwargs = {}
super(Process, self).__init__(group, target, name, args, kwargs)
self.active_message = None
self.callbacks = Callbacks(self.on_connection_ready,
self.on_connection_failure,
self.on_connection_closed,
self.on_connection_blocked,
self.on_connection_unblocked,
self.on_confirmation,
self.on_delivery,
self.on_returned)
self.connections = {}
self.consumer = None
self.consumer_lock = None
self.consumer_version = None
self.counters = collections.Counter()
self.delivery_time = None
self.influxdb = None
self.ioloop = None
self.last_failure = 0
self.last_stats_time = None
self.measurement = None
self.message_connection_id = None
self.pending = collections.deque()
self.prepend_path = None
self.previous = None
self.sentry_client = None
self.state = self.STATE_INITIALIZING
self.state_start = time.time()
self.statsd = None
# Override ACTIVE with PROCESSING
self.STATES[0x04] = 'Processing'
def ack_message(self, message):
"""Acknowledge the message on the broker and log the ack
:param message: The message to acknowledge
:type message: rejected.data.Message
"""
if not self.connections[message.connection].is_running:
LOGGER.warning('Can not ack message, disconnected from RabbitMQ')
self.counters[self.CLOSED_ON_COMPLETE] += 1
return
LOGGER.debug('Acking %s', message.delivery_tag)
message.channel.basic_ack(delivery_tag=message.delivery_tag)
self.counters[self.ACKED] += 1
self.measurement.set_tag(self.ACKED, True)
def calc_velocity(self, values):
"""Return the message consuming velocity for the process.
:param dict values: The dict with velocity data
:rtype: float
"""
processed = (values['counts'].get(self.PROCESSED, 0) -
values['previous'].get(self.PROCESSED, 0))
duration = time.time() - self.last_stats_time
# If there were no messages, do not calculate, use the base
if not processed or not duration:
return 0
# Calculate the velocity as the basis for the calculation
velocity = float(processed) / float(duration)
LOGGER.debug('Message processing velocity: %.2f/s', velocity)
return velocity
def create_connections(self):
"""Create and start the RabbitMQ connections, assigning the connection
object to the connections dict.
"""
self.set_state(self.STATE_CONNECTING)
for connection in self.consumer_config.get('connections', []):
name, confirm, consume = connection, False, True
if isinstance(connection, dict):
name = connection['name']
confirm = connection.get('publisher_confirmation', False)
consume = connection.get('consume', True)
if name not in self.config['Connections']:
LOGGER.critical('Connection "%s" for %s not found',
name, self.consumer_name)
continue
self.connections[name] = Connection(
name, self.config['Connections'][name], self.consumer_name,
consume, confirm, self.ioloop, self.callbacks)
@staticmethod
def get_config(cfg, number, name, connection):
"""Initialize a new consumer thread, setting defaults and config values
:param dict cfg: Consumer config section from YAML File
:param int number: The identification number for the consumer
:param str name: The name of the consumer
:param str connection: The name of the connection):
:rtype: dict
"""
return {
'connection': cfg['Connections'][connection],
'consumer_name': name,
'process_name': '%s_%i_tag_%i' % (name, os.getpid(), number)
}
def get_consumer(self, cfg):
"""Import and create a new instance of the configured message consumer.
:param dict cfg: The named consumer section of the configuration
:rtype: instance
:raises: ImportError
"""
try:
handle, version = utils.import_consumer(cfg['consumer'])
except ImportError as error:
LOGGER.exception('Error importing the consumer %s: %s',
cfg['consumer'], error)
return
if version:
LOGGER.info('Creating consumer %s v%s', cfg['consumer'], version)
self.consumer_version = version
else:
LOGGER.info('Creating consumer %s', cfg['consumer'])
settings = cfg.get('config', {})
settings['_import_module'] = '.'.join(cfg['consumer'].split('.')[0:-1])
kwargs = {
'settings': settings,
'process': self,
'drop_exchange': cfg.get('drop_exchange'),
'drop_invalid_messages': cfg.get('drop_invalid_messages'),
'message_type': cfg.get('message_type'),
'error_exchange': cfg.get('error_exchange'),
'error_max_retry': cfg.get('error_max_retry')
}
try:
return handle(**kwargs)
except Exception as error:
LOGGER.exception('Error creating the consumer "%s": %s',
cfg['consumer'], error)
@gen.coroutine
def invoke_consumer(self, message):
"""Wrap the actual processor processing bits
:param rejected.data.Message message: The message to process
"""
# Only allow for a single message to be processed at a time
with (yield self.consumer_lock.acquire()):
if self.is_idle:
self.set_state(self.STATE_PROCESSING)
self.delivery_time = start_time = time.time()
self.active_message = message
self.measurement = data.Measurement()
if message.method.redelivered:
self.counters[self.REDELIVERED] += 1
self.measurement.set_tag(self.REDELIVERED, True)
try:
result = yield self.consumer.execute(message,
self.measurement)
except Exception as error:
LOGGER.exception('Unhandled exception from consumer in '
'process. This should not happen. %s',
error)
result = data.MESSAGE_REQUEUE
LOGGER.debug('Finished processing message: %r', result)
self.on_processed(message, result, start_time)
elif self.is_waiting_to_shutdown:
LOGGER.info(
'Requeueing pending message due to pending shutdown')
self.reject(message, True)
self.shutdown_connections()
elif self.is_shutting_down:
LOGGER.info('Requeueing pending message due to shutdown')
self.reject(message, True)
self.on_ready_to_stop()
else:
LOGGER.warning('Exiting invoke_consumer without processing, '
'this should not happen. State: %s',
self.state_description)
if self.pending:
self.ioloop.add_callback(
self.invoke_consumer, self.pending.popleft())
@property
def is_processing(self):
"""Returns a bool specifying if the consumer is currently processing
:rtype: bool
"""
return self.state in [self.STATE_PROCESSING, self.STATE_STOP_REQUESTED]
def maybe_submit_measurement(self):
"""Check for configured instrumentation backends and if found, submit
the message measurement info.
"""
if self.statsd:
self.submit_statsd_measurements()
if self.influxdb:
self.submit_influxdb_measurement()
def on_connection_closed(self, name):
if self.is_running:
LOGGER.warning('Connection %s was closed, reconnecting', name)
return self.connections[name].connect()
ready = all(c.is_closed for c in self.connections.values())
if (self.is_shutting_down or self.is_waiting_to_shutdown) and ready:
self.on_ready_to_stop()
def on_connection_failure(self, *args, **kwargs):
ready = all(c.is_closed for c in self.connections.values())
LOGGER.warning('Connection failure while %s - Ready to stop: %r',
self.state_description, ready)
if (self.is_connecting or self.is_idle or self.is_shutting_down or
self.is_waiting_to_shutdown) and ready:
self.on_ready_to_stop()
def on_connection_ready(self, name):
LOGGER.debug('Connection %s indicated it is ready', name)
self.consumer.set_channel(name, self.connections[name].channel)
if all(c.is_idle for c in self.connections.values()):
for key in self.connections.keys():
if self.connections[key].should_consume:
self.connections[key].consume(
self.queue_name, self.no_ack, self.qos_prefetch)
if self.is_connecting:
self.set_state(self.STATE_IDLE)
def on_connection_blocked(self, name):
LOGGER.warning('Connection %s blocked', name)
if self.is_processing:
self.consumer.on_blocked(name)
def on_connection_unblocked(self, name):
LOGGER.info('Connection %s unblocked', name)
if self.is_processing:
self.consumer.on_blocked(name)
def on_confirmation(self, name, delivered, delivery_tag):
"""Invoked on delivery confirmation
:param str name: The RabbitMQ connection that confirmed the delivery
:param bool delivered: Was the message was successfully delivered
:param str delivery_tag: The delivery tag for the message
"""
if self.is_processing:
self.consumer.on_confirmation(name, delivered, delivery_tag)
def on_delivery(self, name, channel, method, properties, body):
"""Process a message from Rabbit
:param str name: The connection name
:param pika.channel.Channel channel: The message's delivery channel
:param pika.frames.MethodFrame method: The method frame
:param pika.spec.BasicProperties properties: The message properties
:param str body: The message body
"""
message = data.Message(name, channel, method, properties, body, False)
if self.is_processing:
self.pending.append(message)
else:
self.invoke_consumer(message)
def on_returned(self, name, channel, method, properties, body):
"""Send a message to the consumer that was returned by RabbitMQ
:param str name: The connection name
:param channel: The channel the message was returned on
:type channel: pika.channel.Channel channel:
:param pika.frames.MethodFrame method: The method frame
:param pika.spec.BasicProperties properties: The message properties
:param str body: The message body
"""
message = data.Message(name, channel, method, properties, body, True)
if self.is_processing:
self.pending.append(message)
else:
self.invoke_consumer(message)
def on_processed(self, message, result, start_time):
"""Invoked after a message is processed by the consumer and
implements the logic for how to deal with a message based upon
the result.
:param rejected.data.Message message: The message that was processed
:param int result: The result of the processing of the message
:param float start_time: When the message was received
"""
duration = max(start_time, time.time()) - start_time
self.counters[self.TIME_SPENT] += duration
self.measurement.add_duration(self.TIME_SPENT, duration)
if result == data.MESSAGE_DROP:
LOGGER.debug('Rejecting message due to drop return from consumer')
self.reject(message, False)
self.counters[self.DROPPED] += 1
elif result == data.MESSAGE_EXCEPTION:
LOGGER.debug('Rejecting message due to MessageException')
self.reject(message, False)
self.counters[self.MESSAGE_EXCEPTION] += 1
elif result == data.PROCESSING_EXCEPTION:
LOGGER.debug('Rejecting message due to ProcessingException')
if self.consumer.ACK_PROCESSING_EXCEPTIONS:
self.ack_message(message)
else:
self.reject(message, False)
self.counters[self.PROCESSING_EXCEPTION] += 1
elif result == data.CONSUMER_EXCEPTION:
LOGGER.debug('Re-queueing message due to ConsumerException')
self.reject(message, True)
self.on_processing_error()
self.counters[self.CONSUMER_EXCEPTION] += 1
elif result == data.UNHANDLED_EXCEPTION:
LOGGER.debug('Re-queueing message due to UnhandledException')
self.reject(message, True)
self.on_processing_error()
self.counters[self.UNHANDLED_EXCEPTION] += 1
elif result == data.MESSAGE_REQUEUE:
LOGGER.debug('Re-queueing message due Consumer request')
self.reject(message, True)
self.counters[self.REQUEUED] += 1
elif result == data.MESSAGE_ACK and not self.no_ack:
self.ack_message(message)
self.counters[self.PROCESSED] += 1
self.measurement.set_tag(self.PROCESSED, True)
self.maybe_submit_measurement()
self.reset_state()
def on_processing_error(self):
"""Called when message processing failure happens due to a
ConsumerException or an unhandled exception.
"""
duration = time.time() - self.last_failure
if duration > self.MAX_ERROR_WINDOW:
LOGGER.info('Resetting failure window, %i seconds since last',
duration)
self.reset_error_counter()
self.counters[self.ERROR] += 1
self.last_failure = time.time()
if self.too_many_errors:
LOGGER.critical('Error threshold exceeded (%i), shutting down',
self.counters[self.ERROR])
self.shutdown_connections()
def on_ready_to_stop(self):
"""Invoked when the consumer is ready to stop."""
LOGGER.debug('Ready to stop')
# Set the state to shutting down if it wasn't set as that during loop
self.set_state(self.STATE_SHUTTING_DOWN)
# Reset any signal handlers
signal.signal(signal.SIGABRT, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGPROF, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
# Allow the consumer to gracefully stop and then stop the IOLoop
if self.consumer:
self.stop_consumer()
# Clear IOLoop constructs
self.consumer_lock = None
# Stop the IOLoop
if self.ioloop:
LOGGER.debug('Stopping IOLoop')
self.ioloop.stop()
# Note that shutdown is complete and set the state accordingly
self.set_state(self.STATE_STOPPED)
LOGGER.info('Shutdown complete')
def on_sigprof(self, _unused_signum, _unused_frame):
"""Called when SIGPROF is sent to the process, will dump the stats, in
future versions, queue them for the master process to get data.
:param int _unused_signum: The signal number
:param frame _unused_frame: The python frame the signal was received at
"""
self.stats_queue.put(self.report_stats(), True)
self.last_stats_time = time.time()
signal.siginterrupt(signal.SIGPROF, False)
def on_startup_error(self, error):
"""Invoked when a pre-condition for starting the consumer has failed.
Log the error and then exit the process.
"""
LOGGER.critical('Could not start %s: %s', self.consumer_name, error)
self.set_state(self.STATE_STOPPED)
def reject(self, message, requeue=True):
"""Reject the message on the broker and log it.
:param message: The message to reject
:type message: rejected.Data.message
:param bool requeue: Specify if the message should be re-queued or not
"""
if self.no_ack:
raise RuntimeError('Can not rejected messages when ack is False')
if not self.connections[message.connection].is_running:
LOGGER.warning('Can not nack message, disconnected from RabbitMQ')
self.counters[self.CLOSED_ON_COMPLETE] += 1
return
LOGGER.warning('Rejecting message %s %s requeue', message.delivery_tag,
'with' if requeue else 'without')
message.channel.basic_nack(
delivery_tag=message.delivery_tag, requeue=requeue)
self.measurement.set_tag(self.NACKED, True)
self.measurement.set_tag(self.REQUEUED, requeue)
def report_stats(self):
"""Create the dict of stats data for the MCP stats queue"""
if not self.previous:
self.previous = {}
for key in self.counters:
self.previous[key] = 0
values = {
'name': self.name,
'consumer_name': self.consumer_name,
'counts': dict(self.counters),
'previous': dict(self.previous)
}
self.previous = dict(self.counters)
return values
def reset_error_counter(self):
"""Reset the error counter to 0"""
LOGGER.debug('Resetting the error counter')
self.counters[self.ERROR] = 0
def reset_state(self):
"""Reset the runtime state after processing a message to either idle
or shutting down based upon the current state.
"""
self.active_message = None
self.measurement = None
if self.is_waiting_to_shutdown:
self.set_state(self.STATE_SHUTTING_DOWN)
self.shutdown_connections()
elif self.is_processing:
self.set_state(self.STATE_IDLE)
elif self.is_idle or self.is_connecting or self.is_shutting_down:
pass
else:
LOGGER.critical('Unexepected state: %s', self.state_description)
LOGGER.debug('State reset to %s (%s in pending)',
self.state_description, len(self.pending))
def run(self):
"""Start the consumer"""
if self.profile_file:
LOGGER.info('Profiling to %s', self.profile_file)
profile.runctx('self._run()', globals(), locals(),
self.profile_file)
else:
self._run()
LOGGER.debug('Exiting %s (%i, %i)', self.name, os.getpid(),
os.getppid())
def _run(self):
"""Run method that can be profiled"""
self.set_state(self.STATE_INITIALIZING)
self.ioloop = ioloop.IOLoop.current()
self.consumer_lock = locks.Lock()
self.sentry_client = self.setup_sentry(
self._kwargs['config'], self.consumer_name)
try:
self.setup()
except (AttributeError, ImportError) as error:
LOGGER.exception('Setup failure: %s', error)
return self.on_startup_error(
'Failed to import the Python module for {}'.format(
self.consumer_name))
if not self.is_stopped:
try:
self.ioloop.start()
except KeyboardInterrupt:
LOGGER.warning('CTRL-C while waiting for clean shutdown')
def send_exception_to_sentry(self, exc_info):
"""Send an exception to Sentry if enabled.
:param tuple exc_info: exception information as returned from
:func:`sys.exc_info`
"""
if not self.sentry_client:
LOGGER.debug('No sentry_client, aborting')
return
message = dict(self.active_message)
try:
duration = math.ceil(time.time() - self.delivery_time) * 1000
except TypeError:
duration = 0
kwargs = {
'extra': {
'consumer_name': self.consumer_name,
'env': dict(os.environ),
'message': message},
'time_spent': duration}
LOGGER.debug('Sending exception to sentry: %r', kwargs)
self.sentry_client.captureException(exc_info, **kwargs)
def setup(self):
"""Initialize the consumer, setting up needed attributes and connecting
to RabbitMQ.
"""
helper_config.LoggingConfig(self.logging_config).configure()
LOGGER.info('Initializing for %s', self.name)
if 'consumer' not in self.consumer_config:
return self.on_startup_error(
'"consumer" not specified in configuration')
self.consumer = self.get_consumer(self.consumer_config)
if not self.consumer:
return self.on_startup_error(
'Could not import "{}"'.format(
self.consumer_config.get(
'consumer', 'unconfigured consumer')))
self.setup_instrumentation()
self.reset_error_counter()
self.setup_sighandlers()
self.create_connections()
def setup_influxdb(self, config):
"""Configure the InfluxDB module for measurement submission.
:param dict config: The InfluxDB configuration stanza
"""
base_tags = {
'version': self.consumer_version
}
measurement = self.config.get('influxdb_measurement',
os.environ.get('SERVICE'))
if measurement != self.consumer_name:
base_tags['consumer'] = self.consumer_name
for key in {'ENVIRONMENT', 'SERVICE'}:
if key in os.environ:
base_tags[key.lower()] = os.environ[key]
influxdb.install(
'{}://{}:{}/write'.format(
config.get('scheme',
os.environ.get('INFLUXDB_SCHEME', 'http')),
config.get('host',
os.environ.get('INFLUXDB_HOST', 'localhost')),
config.get('port', os.environ.get('INFLUXDB_PORT', '8086'))
),
config.get('user', os.environ.get('INFLUXDB_USER')),
config.get('password', os.environ.get('INFLUXDB_PASSWORD')),
base_tags=base_tags)
return config.get('database', 'rejected'), measurement
def setup_instrumentation(self):
"""Configure instrumentation for submission per message measurements
to statsd and/or InfluxDB.
"""
if not self.config.get('stats') and not self.config.get('statsd'):
return
if 'stats' not in self.config:
self.config['stats'] = {}
# Backwards compatible statsd config support
if self.config.get('statsd'):
warnings.warn('Deprecated statsd configuration detected',
DeprecationWarning)
self.config['stats'].setdefault('statsd',
self.config.get('statsd'))
if self.config['stats'].get('statsd'):
if self.config['stats']['statsd'].get('enabled', True):
self.statsd = statsd.Client(self.consumer_name,
self.config['stats']['statsd'],
self.stop)
LOGGER.debug('statsd measurements configured')
# InfluxDB support
if influxdb and self.config['stats'].get('influxdb'):
if self.config['stats']['influxdb'].get('enabled', True):
self.influxdb = self.setup_influxdb(
self.config['stats']['influxdb'])
LOGGER.debug('InfluxDB measurements configured: %r', self.influxdb)
def setup_sentry(self, cfg, consumer_name):
# Setup the Sentry client if configured and installed
sentry_dsn = cfg['Consumers'][consumer_name].get('sentry_dsn',
cfg.get('sentry_dsn'))
if not raven or not sentry_dsn:
return
consumer = cfg['Consumers'][consumer_name]['consumer'].split('.')[0]
kwargs = {
'exclude_paths': [],
'include_paths':
['pika', 'rejected', 'raven', 'tornado', consumer],
'ignore_exceptions': ['rejected.consumer.ConsumerException',
'rejected.consumer.MessageException',
'rejected.consumer.ProcessingException'],
'processors': ['raven.processors.SanitizePasswordsProcessor']
}
if os.environ.get('ENVIRONMENT'):
kwargs['environment'] = os.environ['ENVIRONMENT']
if self.consumer_version:
kwargs['version'] = self.consumer_version
for logger in {'pika', 'pika.channel', 'pika.connection',
'pika.callback', 'pika.heartbeat',
'rejected.process', 'rejected.state'}:
breadcrumbs.ignore_logger(logger)
return AsyncSentryClient(sentry_dsn, **kwargs)
def setup_sighandlers(self):
"""Setup the stats and stop signal handlers."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGPROF, self.on_sigprof)
signal.signal(signal.SIGABRT, self.stop)
signal.siginterrupt(signal.SIGPROF, False)
signal.siginterrupt(signal.SIGABRT, False)
LOGGER.debug('Signal handlers setup')
def shutdown_connections(self):
"""This method closes the connections to RabbitMQ."""
if not self.is_shutting_down:
self.set_state(self.STATE_SHUTTING_DOWN)
for name in self.connections:
if self.connections[name].is_running:
self.connections[name].shutdown()
def stop(self, signum=None, _unused=None):
"""Stop the consumer from consuming by calling BasicCancel and setting
our state.
:param int signum: The signal received
:param frame _unused: The stack frame from when the signal was called
"""
LOGGER.debug('Stop called in state: %s', self.state_description)
if self.is_stopped:
LOGGER.warning('Stop requested but consumer is already stopped')
return
elif self.is_shutting_down:
LOGGER.warning('Stop requested, consumer is already shutting down')
return
elif self.is_waiting_to_shutdown:
LOGGER.warning('Stop requested but already waiting to shut down')
return
# Stop consuming and close AMQP connections
self.shutdown_connections()
# Wait until the consumer has finished processing to shutdown
if self.is_processing:
LOGGER.info('Waiting for consumer to finish processing')
self.set_state(self.STATE_STOP_REQUESTED)
if signum == signal.SIGTERM:
signal.siginterrupt(signal.SIGTERM, False)
return
def stop_consumer(self):
"""Stop the consumer object and allow it to do a clean shutdown if it
has the ability to do so.
"""
try:
LOGGER.info('Shutting down the consumer')
self.consumer.shutdown()
except AttributeError:
LOGGER.debug('Consumer does not have a shutdown method')
def submit_influxdb_measurement(self):
"""Submit a measurement for a message to InfluxDB"""
measurement = influxdb.Measurement(*self.influxdb)
measurement.set_timestamp(time.time())
for key, value in self.measurement.counters.items():
measurement.set_field(key, value)
for key, value in self.measurement.tags.items():
measurement.set_tag(key, value)
for key, value in self.measurement.values.items():
measurement.set_field(key, value)
for key, values in self.measurement.durations.items():
if len(values) == 1:
measurement.set_field(key, values[0])
elif len(values) > 1:
measurement.set_field('{}-average'.format(key),
sum(values) / len(values))
measurement.set_field('{}-max'.format(key), max(values))
measurement.set_field('{}-min'.format(key), min(values))
measurement.set_field('{}-median'.format(key),
utils.percentile(values, 50))
measurement.set_field('{}-95th'.format(key),
utils.percentile(values, 95))
influxdb.add_measurement(measurement)
LOGGER.debug('InfluxDB Measurement: %r', measurement.marshall())
def submit_statsd_measurements(self):
"""Submit a measurement for a message to statsd as individual items."""
for key, value in self.measurement.counters.items():
self.statsd.incr(key, value)
for key, values in self.measurement.durations.items():
for value in values:
self.statsd.add_timing(key, value)
for key, value in self.measurement.values.items():
self.statsd.set_gauge(key, value)
for key, value in self.measurement.tags.items():
if isinstance(value, bool):
if value:
self.statsd.incr(key)
elif isinstance(value, str):
if value:
self.statsd.incr('{}.{}'.format(key, value))
elif isinstance(value, int):
self.statsd.incr(key, value)
else:
LOGGER.warning('The %s value type of %s is unsupported',
key, type(value))
@property
def active_consumers(self):
return len([c for c in self.connections.values()
if c.should_consume and c.is_active()])
@property
def config(self):
return self._kwargs['config']
@property
def consumer_config(self):
return self.config['Consumers'][self.consumer_name] or {}
@property
def consumer_name(self):
return self._kwargs['consumer_name']
@property
def expected_consumers(self):
return len([c for c in self.connections.values() if c.should_consume])
@property
def logging_config(self):
return self._kwargs['logging_config']
@property
def max_error_count(self):
return int(self.consumer_config.get('max_errors',
self.MAX_ERROR_COUNT))
@property
def no_ack(self):
return not self.consumer_config.get('ack', True)
@property
def profile_file(self):
"""Return the full path to write the cProfile data
:return: str
"""
if not self._kwargs['profile']:
return None
if os.path.exists(self._kwargs['profile']) and \
os.path.isdir(self._kwargs['profile']):
return '%s/%s-%s.prof' % (path.normpath(self._kwargs['profile']),
os.getpid(),
self._kwargs['consumer_name'])
return None
@property
def qos_prefetch(self):
"""Return the base, configured QoS prefetch value.
:rtype: int
"""
return self.consumer_config.get(
'qos_prefetch', self.QOS_PREFETCH_COUNT)
@property
def queue_name(self):
return self.consumer_config.get('queue', self.name)
@property
def stats_queue(self):
return self._kwargs['stats_queue']
@property
def too_many_errors(self):
"""Return a bool if too many errors have occurred.
:rtype: bool
"""
return self.counters[self.ERROR] >= self.max_error_count
|
import os
from datetime import datetime, timedelta
from django.db import models
from django.core.exceptions import PermissionDenied, ValidationError
from mezzanine.conf import settings
from hs_core.signals import pre_check_bag_flag
class ResourceIRODSMixin(models.Model):
""" This contains iRODS methods to be included as options for resources """
class Meta:
abstract = True
@property
def irods_home_path(self):
"""
Return the home path for local iRODS resources
This must be public in order to be accessed from the methods below in a mixin context.
"""
return settings.IRODS_CWD
def irods_full_path(self, path):
"""
Return fully qualified path for local paths
This leaves fully qualified paths alone, but presumes that unqualified paths
are home paths, and adds irods_home_path to these to qualify them.
"""
if path.startswith('/'):
return path
else:
return os.path.join(self.irods_home_path, path)
def update_bag(self):
"""
Update a bag if necessary.
This uses the Django signal pre_check_bag_flag to prepare collections,
and then checks the AVUs 'metadata_dirty' and 'bag_modified' to determine
whether to regenerate the metadata files and/or bag.
This is a synchronous update. The call waits until the update is finished.
"""
from hs_core.tasks import create_bag_by_irods
from hs_core.hydroshare.resource import check_resource_type
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files
# send signal for pre_check_bag_flag
resource_cls = check_resource_type(self.resource_type)
pre_check_bag_flag.send(sender=resource_cls, resource=self)
metadata_dirty = self.getAVU('metadata_dirty')
bag_modified = self.getAVU('bag_modified')
if metadata_dirty: # automatically cast to Bool
create_bag_metadata_files(self)
self.setAVU('metadata_dirty', False)
# the ticket system does synchronous bag creation.
# async bag creation isn't supported.
if bag_modified: # automatically cast to Bool
create_bag_by_irods(self.short_id)
self.setAVU('bag_modified', False)
def update_metadata_files(self):
"""
Make the metadata files resourcemetadata.xml and resourcemap.xml up to date.
This checks the "metadata dirty" AVU before updating files if necessary.
"""
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files
metadata_dirty = self.getAVU('metadata_dirty')
if metadata_dirty:
create_bag_metadata_files(self)
self.setAVU('metadata_dirty', False)
def create_ticket(self, user, path=None, write=False, allowed_uses=1):
"""
create an iRODS ticket for reading or modifying a resource
:param user: user to authorize
:param path: path in iRODS to the object being requested.
:param allowed_uses: number of possible uses of the ticket; default is one use.
:return:
:raises PermissionDenied: if user is not allowed to create the ticket.
:raises SessionException: if ticket fails to be created for some reason.
:raises SuspiciousFileOperation: if path uses .. illegally
WARNING: This creates a ticket that expires in one hour in UTC. If the
iRODS and django servers are in different time zones and not set for UTC,
this results in a useless ticket. This includes federated servers.
THERE IS NO STRAIGHTFORWARD MECHANISM IN IRODS for determining the time zone
or local time of an iRODS server.
Also, note that there is no mechanism for asynchronous bag creation in the
ticketing system. The bag is created *synchronously* if required. The
ticket is not issued until the bag exists.
"""
from hs_core.models import path_is_allowed
# raises SuspiciousFileOperation if path is not allowed
path_is_allowed(path)
# authorize user
if write:
if not user.is_authenticated() or not user.uaccess.can_change_resource(self):
raise PermissionDenied("user {} cannot change resource {}"
.format(user.username, self.short_id))
else:
if not self.raccess.public and (not user.is_authenticated() or
not user.uaccess.can_view_resource(self)):
raise PermissionDenied("user {} cannot view resource {}"
.format(user.username, self.short_id))
if path is None:
path = self.file_path # default = all data files
# can only write resource files
if write:
if not path.startswith(self.file_path):
raise PermissionDenied("{} can only write data files to {}"
.format(user.username, self.short_id))
# can read anything inside this particular resource
else:
if path == self.bag_path:
self.update_bag()
elif not path.startswith(self.root_path):
raise PermissionDenied("invalid resource file path {}".format(path))
elif path == self.resmap_path or path == self.scimeta_path:
self.update_metadata_files()
istorage = self.get_irods_storage()
read_or_write = 'write' if write else 'read'
if path.startswith(self.short_id) or path.startswith('bags/'): # local path
path = os.path.join(self.irods_home_path, path)
stdout, stderr = istorage.session.run("iticket", None, 'create', read_or_write, path)
if not stdout.startswith('ticket:'):
raise ValidationError("ticket creation failed: {}", stderr)
ticket = stdout.split('\n')[0]
ticket_id = ticket[len('ticket:'):]
istorage.session.run('iticket', None, 'mod', ticket_id,
'uses', str(allowed_uses))
# This creates a timestamp with a one-hour timeout.
# Note that this is a timeout on when the ticket is first used, and
# not on the completion of the use, which can take considerably longer.
# TODO: this will fail unless Django and iRODS are both running in UTC.
# There is no current mechanism for determining the timezone of a remote iRODS
# server from within iRODS; shell access is required.
timeout = datetime.now() + timedelta(hours=1)
formatted = timeout.strftime("%Y-%m-%d.%H:%M")
istorage.session.run('iticket', None, 'mod', ticket_id,
'expire', formatted)
# fully qualify home paths with their iRODS prefix when returning them.
return ticket_id, self.irods_full_path(path)
def list_ticket(self, ticket_id):
""" List a ticket's attributes """
istorage = self.get_irods_storage()
stdout, stderr = istorage.session.run("iticket", None, 'ls', ticket_id)
if stdout.startswith('id:'):
stuff = stdout.split('\n')
output = {}
for s in stuff:
try:
line = s.split(': ')
key = line[0]
value = line[1]
if key == 'collection name' or key == 'data collection':
output['full_path'] = value
if self.is_federated:
if __debug__:
assert(value.startswith(self.resource_federation_path))
output['long_path'] = value[len(self.resource_federation_path):]
output['home_path'] = self.resource_federation_path
else:
location = value.find(self.short_id)
if __debug__:
assert(location >= 0)
if location == 0:
output['long_path'] = value
output['home_path'] = self.irods_home_path
else:
output['long_path'] = value[location:]
# omit trailing slash
output['home_path'] = value[:(location-1)]
if __debug__:
assert(output['long_path'].startswith(self.short_id))
if key == 'string':
output['ticket_id'] = value
elif key == 'data-object name':
output['filename'] = value
elif key == 'ticket type':
output['type'] = value
elif key == 'owner name':
output['owner'] = value
elif key == 'owner zone':
output['zone'] = value
elif key == 'expire time':
output['expires'] = value
else:
output[line[0]] = line[1]
except Exception: # no ':' in line
pass
# put in actual file path including folder and filename
if 'filename' in output:
output['full_path'] = os.path.join(output['full_path'], output['filename'])
return output
elif stdout == '':
raise ValidationError("ticket {} not found".format(ticket_id))
else:
raise ValidationError("ticket {} error: {}".format(ticket_id, stderr))
def delete_ticket(self, user, ticket_id):
"""
delete an existing ticket
:param ticket_id: ticket string
:raises SessionException: if ticket does not exist.
This checks that the user at least has the privilege the ticket grants,
before deleting it. This is not quite as comprehensive as keeping a
ticket history, but provides a small amount of safety nonetheless.
It remains possible for one user to delete the ticket of another user without
that user's knowledge, provided that the users have the same privilege.
However, tickets are not broadcast, so this is unlikely to happen.
The usual mechanism -- of checking that the user owns the ticket -- is not
practical, because the ticket owner is always the proxy user.
"""
meta = self.list_ticket(ticket_id)
if self.root_path not in meta['full_path']:
raise PermissionDenied("user {} cannot delete ticket {} for a different resource"
.format(user.username, ticket_id))
# get kind of ticket
write = meta['type'] == 'write'
# authorize user
if write:
if not user.is_authenticated() or not user.uaccess.can_change_resource(self):
raise PermissionDenied("user {} cannot delete change ticket {} for {}"
.format(user.username, ticket_id, self.short_id))
else:
if not user.is_authenticated() or not user.uaccess.can_view_resource(self):
raise PermissionDenied("user {} cannot delete view ticket {} for {}"
.format(user.username, ticket_id, self.short_id))
istorage = self.get_irods_storage()
istorage.session.run('iticket', None, 'delete', ticket_id)
return meta
class ResourceFileIRODSMixin(models.Model):
""" This contains iRODS functions related to resource files """
class Meta:
abstract = True
def create_ticket(self, user, write=False):
""" This creates a ticket to read or modify this file """
return self.resource.create_ticket(user, path=self.storage_path, write=write)
|
"""Defines a Jupyter Notebook interface to Klampt."""
from ._version import version_info, __version__
from .widgets import *
|
import os
import simplejson as json
from tests.checks.common import AgentCheckTest, Fixtures
from checks import AgentCheck
class TestCeph(AgentCheckTest):
"""Basic Test for ceph integration."""
CHECK_NAME = 'ceph'
FIXTURE_DIR = os.path.join(os.path.dirname(__file__), 'ci')
def test_simple_metrics(self):
mocks = {
'_collect_raw': lambda x,y,z: json.loads(Fixtures.read_file('raw.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{'host': 'foo'}]
}
self.run_check_twice(config, mocks=mocks, force_reload=True)
expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
'ceph_mon_state:peon']
expected_metrics = ['ceph.num_mons', 'ceph.total_objects', 'ceph.pgstate.active_clean']
for metric in expected_metrics:
self.assertMetric(metric, count=1, tags=expected_tags)
self.assertServiceCheck('ceph.overall_status', status=AgentCheck.OK)
def test_warn_health(self):
mocks = {
'_collect_raw': lambda x,y,z: json.loads(
Fixtures.read_file('warn.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{'host': 'foo'}]
}
self.run_check_twice(config, mocks=mocks, force_reload=True)
expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
'ceph_mon_state:peon']
expected_metrics = ['ceph.num_mons', 'ceph.total_objects', 'ceph.pgstate.active_clean']
for metric in expected_metrics:
self.assertMetric(metric, count=1, tags=expected_tags)
self.assertServiceCheck('ceph.overall_status', status=AgentCheck.WARNING)
def test_luminous_warn_health(self):
mocks = {
'_collect_raw': lambda x,y,z: json.loads(
Fixtures.read_file('ceph_luminous_warn.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{
'host': 'foo',
'collect_service_check_for': ['OSD_NEARFULL', 'OSD_FULL'],
}]
}
self.run_check(config, mocks=mocks, force_reload=True)
self.assertServiceCheck('ceph.overall_status', status=AgentCheck.CRITICAL)
self.assertServiceCheck('ceph.osd_nearfull', status=AgentCheck.WARNING)
self.assertServiceCheck('ceph.osd_full', status=AgentCheck.CRITICAL)
def test_luminous_ok_health(self):
mocks = {
'_collect_raw': lambda x,y,z: json.loads(
Fixtures.read_file('ceph_luminous_ok.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{
'host': 'foo',
'collect_service_check_for': ['OSD_NEARFULL'],
}]
}
self.run_check(config, mocks=mocks, force_reload=True)
self.assertServiceCheck('ceph.overall_status', status=AgentCheck.OK)
self.assertServiceCheck('ceph.osd_nearfull', status=AgentCheck.OK)
self.assertServiceCheck('ceph.pool_app_not_enabled', count=0)
def test_luminous_osd_full_metrics(self):
mocks = {
'_collect_raw': lambda x,y,z: json.loads(
Fixtures.read_file('ceph_luminous_warn.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{'host': 'foo'}]
}
self.run_check(config, mocks=mocks, force_reload=True)
self.assertMetric('ceph.num_full_osds', value=1)
self.assertMetric('ceph.num_near_full_osds', value=1)
def test_tagged_metrics(self):
mocks = {
'_collect_raw': lambda x,y,z: json.loads(
Fixtures.read_file('raw.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{'host': 'foo'}]
}
self.run_check_twice(config, mocks=mocks, force_reload=True)
for osd in ['osd0', 'osd1', 'osd2']:
expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
'ceph_mon_state:peon',
'ceph_osd:%s' % osd]
for metric in ['ceph.commit_latency_ms', 'ceph.apply_latency_ms']:
self.assertMetric(metric, count=1, tags=expected_tags)
for pool in ['pool0', 'rbd']:
expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a',
'ceph_mon_state:peon',
'ceph_pool:%s' % pool]
for metric in ['ceph.read_bytes', 'ceph.write_bytes', 'ceph.pct_used', 'ceph.num_objects']:
self.assertMetric(metric, count=1, tags=expected_tags)
def test_osd_status_metrics(self):
mocks = {
'_collect_raw': lambda x,y,z: json.loads(
Fixtures.read_file('ceph_10.2.2.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{'host': 'foo'}]
}
self.run_check_twice(config, mocks=mocks, force_reload=True)
for osd, pct_used in [('osd1', 94), ('osd2', 95)]:
expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a','ceph_mon_state:leader',
'ceph_osd:%s' % osd]
for metric in ['ceph.osd.pct_used']:
self.assertMetric(metric, value=pct_used, count=1, tags=expected_tags)
self.assertMetric('ceph.num_full_osds', value=1, count=1,
tags=['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:leader'])
self.assertMetric('ceph.num_near_full_osds', value=1, count=1,
tags=['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a', 'ceph_mon_state:leader'])
for pool in ['rbd', 'scbench']:
expected_tags = ['ceph_fsid:e0efcf84-e8ed-4916-8ce1-9c70242d390a','ceph_mon_state:leader',
'ceph_pool:%s' % pool]
expected_metrics = ['ceph.read_op_per_sec', 'ceph.write_op_per_sec', 'ceph.op_per_sec']
for metric in expected_metrics:
self.assertMetric(metric, count=1, tags=expected_tags)
def test_osd_status_metrics_non_osd_health(self):
"""
The `detail` key of `health detail` can contain info on the health of non-osd units:
shouldn't make the check fail
"""
mocks = {
'_collect_raw': lambda x,y,z: json.loads(
Fixtures.read_file('ceph_10.2.2_mon_health.json', sdk_dir=self.FIXTURE_DIR)),
}
config = {
'instances': [{'host': 'foo'}]
}
self.run_check_twice(config, mocks=mocks, force_reload=True)
self.assertMetric('ceph.num_full_osds', value=0, count=1,
tags=['ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader'])
self.assertMetric('ceph.num_near_full_osds', value=0, count=1,
tags=['ceph_fsid:7d375c2a-902a-4990-93fd-ce21a296f444', 'ceph_mon_state:leader'])
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0004_auto_20150726_1315'),
]
operations = [
migrations.CreateModel(
name='JobType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('job', models.CharField(max_length=40)),
('active', models.BooleanField(default=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='catalogueitem',
name='relevant_to',
field=models.ManyToManyField(to='catalogue.JobType'),
),
]
|
from __future__ import absolute_import
import six
import hashlib
import logging
import os
from nose.plugins import Plugin
def hash_filename(filename):
'''Design goal:
* Take a filename and output a number.
* Return the same number even if the filename
is now in a different path.
To achieve that, it assumes that filename is a sub-path
of the current working directory, and then removes the
current working directory from the path.
'''
here = os.path.realpath(os.getcwd())
there = os.path.realpath(filename)
assert there.startswith(here), "{} must start with {}".format(there, here)
shorter_there = six.ensure_binary(there[len(here):])
as_int = int(hashlib.sha1(shorter_there).hexdigest(), 16)
return as_int
class NosePicker(Plugin):
name = 'nose-picker'
def __init__(self, *args, **kwargs):
self.output = True
self.enableOpt = 'with-nose-picker'
self.logger = logging.getLogger('nose.plugins.picker')
def options(self, parser, env=os.environ):
parser.add_option(
'--which-process',
type='int',
dest='which_process',
help='nose-picker: Which process number this is of the total.',
)
parser.add_option(
'--futz-with-django',
action='store_true',
dest='futz_with_django',
help='nose-picker: Whether to futz with the django configuration.',
)
parser.add_option(
'--total-processes',
type='int',
dest='total_processes',
help='nose-picker: How many total processes to run with.',
)
super(NosePicker, self).options(parser, env=env)
def configure(self, options, config):
self.enabled = getattr(options, self.enableOpt)
self.total_processes = options.total_processes
self.which_process = options.which_process
if options.futz_with_django:
import django
from django.db import connections
for connection in connections.all():
test_alias = 'test_{name}__{process}'.format(
name=connection.settings_dict['NAME'],
process=self.which_process,
)
if django.VERSION >= (1, 7):
connection.settings_dict.setdefault('TEST', {})
connection.settings_dict['TEST']['NAME'] = test_alias
else:
connection.settings_dict['TEST_NAME'] = test_alias
super(NosePicker, self).configure(options, config)
def wantFile(self, fullpath):
"""
Do we want to run this file? See _should_run.
"""
return self._should_run(fullpath)
def _should_run(self, name):
if self.enabled:
hashed_value = hash_filename(name) % self.total_processes
if hashed_value == self.which_process:
return None
return False
return None
|
"""--------------------------------------------------------------------
COPYRIGHT 2016 Stanley Innovation Inc.
Software License Agreement:
The software supplied herewith by Stanley Innovation Inc. (the "Company")
for its licensed SI Vector Platform is intended and supplied to you,
the Company's customer, for use solely and exclusively with Stanley Innovation
products. The software is owned by the Company and/or its supplier, and is
protected under applicable copyright laws. All rights are reserved. Any use in
violation of the foregoing restrictions may subject the user to criminal
sanctions under applicable laws, as well as to civil liability for the
breach of the terms and conditions of this license. The Company may
immediately terminate this Agreement upon your use of the software with
any products that are not Stanley Innovation products.
The software was written using Python programming language. Your use
of the software is therefore subject to the terms and conditions of the
OSI- approved open source license viewable at http://www.python.org/.
You are solely responsible for ensuring your compliance with the Python
open source license.
You shall indemnify, defend and hold the Company harmless from any claims,
demands, liabilities or expenses, including reasonable attorneys fees, incurred
by the Company as a result of any claim or proceeding against the Company
arising out of or based upon:
(i) The combination, operation or use of the software by you with any hardware,
products, programs or data not supplied or approved in writing by the Company,
if such claim or proceeding would have been avoided but for such combination,
operation or use.
(ii) The modification of the software by or on behalf of you
(iii) Your use of the software.
THIS SOFTWARE IS PROVIDED IN AN "AS IS" CONDITION. NO WARRANTIES,
WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE APPLY TO THIS SOFTWARE. THE COMPANY SHALL NOT,
IN ANY CIRCUMSTANCES, BE LIABLE FOR SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES, FOR ANY REASON WHATSOEVER.
\file vector_comm.py
\brief runs the driver
\Platform: Linux/ROS Indigo
--------------------------------------------------------------------"""
from system_defines import *
from utils import *
from vector_msgs.msg import *
from std_msgs.msg import Empty
from geometry_msgs.msg import Twist
from vector_ros.cfg import vectorConfig
from dynamic_reconfigure.server import Server
from dynamic_reconfigure.client import Client
from dynamic_reconfigure.msg import Config
from io_eth import IoEthThread
from vector_data_classes import VECTOR_DATA
import multiprocessing
import rospy
import select
import threading
import re
import os
"""
Dictionary for all VECTOR configuration command ID's
"""
command_ids = dict({"GENERAL_PURPOSE_CMD_NONE": 0,
"GENERAL_PURPOSE_CMD_SET_OPERATIONAL_MODE": 1,
"GENERAL_PURPOSE_CMD_SEND_FAULTLOG": 2,
"GENERAL_PURPOSE_CMD_RESET_ODOMETRY": 3,
"GENERAL_PURPOSE_CMD_RESET_PARAMS_TO_DEFAULT":4,
"GENERAL_PURPOSE_CMD_SET_STRAFE_CORRECTION" :6,
"GENERAL_PURPOSE_CMD_SET_YAW_CORRECTION" :7,
"GENERAL_PURPOSE_CMD_LASER_FEILD_SELECTION" :8,
"SIC_CMD_RESET_IN_DIAGNOSTIC_MODE":1001})
class VectorDriver:
def __init__(self):
"""
Variables to track communication frequency for debugging
"""
self.summer=0
self.samp = 0
self.avg_freq = 0.0
self.start_frequency_samp = False
self.need_to_terminate = False
self.flush_rcvd_data=True
"""
Initialize the publishers for VECTOR
"""
self.vector_data = VECTOR_DATA()
"""
Initialize faultlog related items
"""
self.is_init = True
self.extracting_faultlog = False
"""
Initialize the dynamic reconfigure server for VECTOR
"""
self.param_server_initialized = False
self.dyn_reconfigure_srv = Server(vectorConfig, self._dyn_reconfig_callback)
"""
Wait for the parameter server to set the configs and then set the IP address from that.
Note that this must be the current ethernet settings of the platform. If you want to change it
set the ethernet settings at launch to the current ethernet settings, power up, change them, power down, set the
the ethernet settings at launch to the new ones and relaunch
"""
r = rospy.Rate(10)
start_time = rospy.Time.now().to_sec()
while ((rospy.Time.now().to_sec() - start_time) < 3.0) and (False == self.param_server_initialized):
r.sleep()
if (False == self.param_server_initialized):
rospy.logerr("Parameter server not found, you must pass an initial yaml in the launch! exiting...")
return
"""
Create the thread to run VECTOR communication
"""
self.tx_queue_ = multiprocessing.Queue()
self.rx_queue_ = multiprocessing.Queue()
self.comm = IoEthThread((os.environ['VECTOR_IP_ADDRESS'],int(os.environ['VECTOR_IP_PORT_NUM'])),
self.tx_queue_,
self.rx_queue_,
max_packet_size=1248)
if (False == self.comm.link_up):
rospy.logerr("Could not open socket for VECTOR...")
self.comm.close()
return
"""
Initialize the publishers and subscribers for the node
"""
self.pubs = [0]*2
self.pubs[0] = rospy.Publisher('/vector/feedback/faultlog', Faultlog, queue_size=10,latch=True)
self.pubs[1] = rospy.Publisher('/odometry_is_reset', Empty, queue_size=1)
self.subs = [0]*4
self.subs[0] = rospy.Subscriber("/vector/cmd_vel", Twist, self._add_motion_command_to_queue)
self.subs[1] = rospy.Subscriber("/vector/gp_command",ConfigCmd,self._add_config_command_to_queue)
self.subs[2] = rospy.Subscriber("/vector/motion_test_cmd",MotionTestCmd,self._add_motion_test_command_to_queue)
self.subs[3] = rospy.Subscriber("/reset_odometry",Empty,self._reset_odometry)
"""
Start the receive handler thread
"""
self.terminate_mutex = threading.RLock()
self.last_rsp_rcvd = rospy.Time.now().to_sec()
self._rcv_thread = threading.Thread(target = self._run)
self._rcv_thread.start()
"""
Start streaming continuous data
"""
rospy.loginfo("Stopping the data stream")
if (False == self._continuous_data(False)):
rospy.logerr("Could not stop VECTOR communication stream")
self.Shutdown()
return
"""
Extract the faultlog at startup
"""
self.flush_rcvd_data=False
rospy.loginfo("Extracting the faultlog")
self.extracting_faultlog = True
if (False == self._extract_faultlog()):
rospy.logerr("Could not get retrieve VECTOR faultlog")
self.Shutdown()
return
"""
Start streaming continuous data
"""
rospy.loginfo("Starting the data stream")
if (False == self._continuous_data(True)):
rospy.logerr("Could not start VECTOR communication stream")
self.Shutdown()
return
self.start_frequency_samp = True
"""
Force the configuration to update the first time to ensure that the variables are set to
the correct values on the machine
"""
if (False == self._initial_param_force_update()):
rospy.logerr("Initial configuration parameteters my not be valid, please check them in the yaml file")
rospy.logerr("The ethernet address must be set to the present address at startup, to change it:")
rospy.logerr("start the machine; change the address using rqt_reconfigure; shutdown; update the yaml and restart")
self.Shutdown()
return
rospy.loginfo("Vector Driver is up and running")
def Shutdown(self):
with self.terminate_mutex:
self.need_to_terminate = True
rospy.loginfo("Vector Driver has called the Shutdown method, terminating")
for sub in self.subs:
sub.unregister()
for pub in self.pubs:
pub.unregister()
self.comm.Close()
self.tx_queue_.close()
self.rx_queue_.close()
def _run(self):
while not self.need_to_terminate:
"""
Run until signaled to stop
Perform the actions defined based on the flags passed out
"""
result = select.select([self.rx_queue_._reader],[],[],0.02)
if len(result[0]) > 0:
data = result[0][0].recv()
with self.terminate_mutex:
if not self.need_to_terminate:
self._handle_rsp(data)
def _add_command_to_queue(self,command):
"""
Create a byte array with the CRC from the command
"""
cmd_bytes = generate_cmd_bytes(command)
"""
Send it
"""
self.tx_queue_.put(cmd_bytes)
def _update_rcv_frq(self):
if (True == self.start_frequency_samp):
self.samp+=1
self.summer+=1.0/(rospy.Time.now().to_sec() - self.last_rsp_rcvd)
self.avg_freq = self.summer/self.samp
self.last_rsp_rcvd = rospy.Time.now().to_sec()
def _handle_rsp(self,data_bytes):
if (True == self.flush_rcvd_data):
return
if (self.extracting_faultlog):
valid_data = validate_response(data_bytes,((NUMBER_OF_FAULTLOG_WORDS+1)*4))
else:
valid_data = validate_response(data_bytes,((NUMBER_OF_VECTOR_RSP_WORDS+1)*4))
if (False == valid_data):
rospy.logerr("bad vector data packet")
return
rsp_data = array.array('I',data_bytes.tostring()).tolist()
rsp_data = rsp_data[:(len(rsp_data)-1)]
if (self.extracting_faultlog):
self.extracting_faultlog = False
faultlog_msg = Faultlog()
faultlog_msg.data = rsp_data
self.pubs[0].publish(faultlog_msg)
else:
header_stamp = self.vector_data.status.parse(rsp_data[START_STATUS_BLOCK:END_STATUS_BLOCK])
wheel_circum = self.vector_data.config_param.parse(rsp_data[START_APP_CONFIG_BLOCK:END_FRAM_CONFIG_BLOCK],header_stamp)
self.vector_data.auxiliary_power.parse(rsp_data[START_BATTERY_DATA_BLOCK:END_BATTERY_DATA_BLOCK],header_stamp)
self.vector_data.propulsion.parse(rsp_data[START_PROPULSION_DATA_BLOCK:END_PROPULSION_DATA_BLOCK],header_stamp)
self.vector_data.dynamics.parse(rsp_data[START_DYNAMICS_DATA_BLOCK:END_DYNAMICS_DATA_BLOCK],header_stamp,wheel_circum)
self.vector_data.imu.parse_data(rsp_data[START_IMU_DATA_BLOCK:END_IMU_DATA_BLOCK],header_stamp)
self._update_rcv_frq()
rospy.logdebug("feedback received from vector")
def _add_motion_command_to_queue(self,command):
"""
Add the command to the queue, platform does command limiting and mapping
"""
cmds = [MOTION_CMD_ID,[convert_float_to_u32(command.linear.x),
convert_float_to_u32(command.linear.y),
convert_float_to_u32(command.angular.z)]]
self._add_command_to_queue(cmds)
def _reset_odometry(self,data):
cmds = [GENERAL_PURPOSE_CMD_ID,[GENERAL_PURPOSE_CMD_RESET_ODOMETRY,RESET_ALL_ODOMETRY]]
self._add_command_to_queue(cmds)
tmp = Empty()
self.pubs[1].publish(tmp)
def _add_config_command_to_queue(self,command):
try:
cmds = [GENERAL_PURPOSE_CMD_ID,[command_ids[command.gp_cmd],command.gp_param]]
self._add_command_to_queue(cmds)
except:
rospy.logerr("Config param failed, it is probably not known")
return
def _add_motion_test_command_to_queue(self,command):
test = command.test_type & ~MOTION_TEST_TYPE_MASK;
if (0 != test):
rospy.logerr("Bad test command see system_defines.py for details")
cmds = [MOTION_TEST_CMD_ID,
[command.test_type,
command.duration_sec,
convert_float_to_u32(command.magnitude)]]
rospy.loginfo("MOTION_TEST IS GOING TO BE SENT!!!!!!!!!!!!!!")
self._add_command_to_queue(cmds)
def _dyn_reconfig_callback(self,config,level):
"""
The first time through we want to ignore the values because they are just defaults from the ROS
parameter server which has no knowledge of the platform being used
"""
if (True == self.is_init):
self.is_init = False
return config
"""
Create the configuration bitmap from the appropriate variables
"""
config_bitmap = (((config.motion_while_charging^1) << DISABLE_AC_PRESENT_CSI_SHIFT)|
(config.motion_ctl_input_filter << MOTION_MAPPING_FILTER_SHIFT))
"""
Define the configuration parameters for all the platforms
"""
self.valid_config_cmd = [LOAD_MACH_CONFIG_CMD_ID,
[convert_float_to_u32(config.x_vel_limit_mps),
convert_float_to_u32(config.y_vel_limit_mps),
convert_float_to_u32(config.accel_limit_mps2),
convert_float_to_u32(config.decel_limit_mps2),
convert_float_to_u32(config.dtz_decel_limit_mps2),
convert_float_to_u32(config.yaw_rate_limit_rps),
convert_float_to_u32(config.yaw_accel_limit_rps2),
convert_float_to_u32(config.wheel_diameter_m),
convert_float_to_u32(config.wheel_base_length_m),
convert_float_to_u32(config.wheel_track_width_m),
convert_float_to_u32(config.gear_ratio),
config_bitmap]]
rospy.loginfo("Reconfigure Requested!")
rospy.loginfo("x_vel_limit_mps: %f"%config.x_vel_limit_mps)
rospy.loginfo("y_vel_limit_mps: %f"%config.y_vel_limit_mps)
rospy.loginfo("accel_limit_mps2: %f"%config.accel_limit_mps2)
rospy.loginfo("decel_limit_mps2: %f"%config.decel_limit_mps2)
rospy.loginfo("dtz_decel_limit_mps2: %f"%config.dtz_decel_limit_mps2)
rospy.loginfo("yaw_rate_limit_rps: %f"%config.yaw_rate_limit_rps)
rospy.loginfo("yaw_accel_limit_rps2: %f"%config.yaw_accel_limit_rps2)
rospy.loginfo("wheel_diameter_m: %f"%config.wheel_diameter_m)
rospy.loginfo("wheel_base_length_m: %f"%config.wheel_base_length_m)
rospy.loginfo("wheel_track_width_m: %f"%config.wheel_track_width_m)
rospy.loginfo("gear_ratio: %f"%config.gear_ratio)
rospy.loginfo("motion_while_charging: %u"%config.motion_while_charging)
rospy.loginfo("motion_ctl_input_filter: %u"%config.motion_ctl_input_filter)
rospy.loginfo("strafe_correction_factor: %u"%config.strafe_correction_factor)
rospy.loginfo("yaw_correction_factor: %u"%config.yaw_correction_factor)
"""
The teleop limits are always the minimum of the actual machine limit and the ones set for teleop
"""
config.teleop_x_vel_limit_mps = minimum_f(config.teleop_x_vel_limit_mps, config.x_vel_limit_mps)
config.teleop_y_vel_limit_mps = minimum_f(config.teleop_y_vel_limit_mps, config.y_vel_limit_mps)
config.teleop_accel_limit_mps2 = minimum_f(config.teleop_accel_limit_mps2, config.accel_limit_mps2)
config.teleop_yaw_rate_limit_rps = minimum_f(config.teleop_yaw_rate_limit_rps, config.yaw_rate_limit_rps)
config.teleop_yaw_accel_limit_rps2 = minimum_f(config.teleop_yaw_accel_limit_rps2, config.teleop_yaw_accel_limit_rps2)
config.jog_yaw_rate_rps = minimum_f(config.jog_yaw_rate_rps, config.yaw_rate_limit_rps)
config.jog_velocity_mps = minimum_f(config.jog_velocity_mps, config.y_vel_limit_mps)
config.jog_velocity_mps = minimum_f(config.jog_velocity_mps, config.y_vel_limit_mps)
"""
Set the teleop configuration in the feedback
"""
self.vector_data.config_param.SetTeleopConfig([config.teleop_x_vel_limit_mps,
config.teleop_y_vel_limit_mps,
config.teleop_accel_limit_mps2,
config.teleop_yaw_rate_limit_rps,
config.teleop_yaw_accel_limit_rps2,
config.jog_velocity_mps,
config.jog_yaw_rate_rps])
if self.param_server_initialized:
if ((1<<4) == (level & (1<<4))):
rospy.sleep(0.1)
cmds = [GENERAL_PURPOSE_CMD_ID,
[6,convert_float_to_u32(config.strafe_correction_factor)]]
self._add_command_to_queue(cmds)
rospy.sleep(0.1)
if ((1<<5) == (level & (1<<5))):
rospy.sleep(0.1)
cmds = [GENERAL_PURPOSE_CMD_ID,
[7,convert_float_to_u32(config.yaw_correction_factor)]]
self._add_command_to_queue(cmds)
rospy.sleep(0.1)
"""
Check and see if we need to store the parameters in NVM before we try, although the NVM is F-RAM
with unlimited read/write, uneccessarily setting the parameters only introduces risk for error
"""
if self.param_server_initialized:
load_params = False
for i in range(NUMBER_OF_CONFIG_PARAM_VARIABLES):
if (self.vector_data.config_param.configuration_feedback[i] != self.valid_config_cmd[1][i]):
load_params = True
if (True == load_params):
self._add_command_to_queue(self.valid_config_cmd)
rospy.loginfo("Sent config update command")
self.param_server_initialized = True
self.valid_config = config
return config
def _continuous_data(self,start_cont):
set_continuous = [GENERAL_PURPOSE_CMD_ID,[GENERAL_PURPOSE_CMD_SEND_CONTINUOUS_DATA,start_cont]]
ret = False
if (True == start_cont):
r = rospy.Rate(10)
start_time = rospy.Time.now().to_sec()
while ((rospy.Time.now().to_sec() - start_time) < 3.0) and (True == self.vector_data.status.init):
self._add_command_to_queue(set_continuous)
r.sleep()
ret = not self.vector_data.status.init
else:
r = rospy.Rate(5)
start_time = rospy.Time.now().to_sec()
while ((rospy.Time.now().to_sec() - start_time) < 3.0) and (False == ret):
self._add_command_to_queue(set_continuous)
rospy.sleep(0.6)
if ((rospy.Time.now().to_sec() - self.last_rsp_rcvd) > 0.5):
ret = True
r.sleep()
self.vector_data.status.init = True
return ret
def _extract_faultlog(self):
r = rospy.Rate(2)
start_time = rospy.Time.now().to_sec()
while ((rospy.Time.now().to_sec() - start_time) < 3.0) and (True == self.extracting_faultlog):
self._add_command_to_queue([GENERAL_PURPOSE_CMD_ID,[GENERAL_PURPOSE_CMD_SEND_FAULTLOG,0]])
r.sleep()
return not self.extracting_faultlog
def _initial_param_force_update(self):
"""
Load all the parameters on the machine at startup; first check if they match, if they do continue.
Otherwise load them and check again.
"""
r = rospy.Rate(2)
start_time = rospy.get_time()
params_loaded = False
while ((rospy.get_time() - start_time) < 3.0) and (False == params_loaded):
load_params = False
for i in range(NUMBER_OF_CONFIG_PARAM_VARIABLES):
if (self.vector_data.config_param.configuration_feedback[i] != self.valid_config_cmd[1][i]):
load_params = True
if (True == load_params):
self._add_command_to_queue(self.valid_config_cmd)
r.sleep()
else:
params_loaded = True
return params_loaded
|
import chemkit
import unittest
class AtomTest(unittest.TestCase):
def test_symbol(self):
molecule = chemkit.Molecule()
atom = molecule.addAtom("C")
self.assertEqual(atom.symbol(), "C")
atom.setAtomicNumber(2)
self.assertEqual(atom.symbol(), "He")
def test_name(self):
molecule = chemkit.Molecule()
atom = molecule.addAtom("C")
self.assertEqual(atom.name(), "Carbon")
atom.setAtomicNumber(8)
self.assertEqual(atom.name(), "Oxygen")
if __name__ == '__main__':
unittest.main()
|
from unittest import TestCase
from ..model_conversion import *
from ..statespace import ss
from ..transferfunction import tf
from .tools.test_utility import assert_tf_equal, assert_ss_equal
class TestModelConversion(TestCase):
def setUp(self):
self.ss = ss([[1, 1], [-2, -5]], [[1], [1]], [[0, 3]], 0)
self.tf = tf([3, -9], [1, 4, -3])
def test_tf2ss(self):
assert_ss_equal(tf2ss(self.tf), self.ss.ctrb_form())
def test_ss2tf(self):
assert_tf_equal(ss2tf(self.ss), self.tf)
|
import traceback
from core import debug
debug.warning("The use of core.modules.port_configure is deprecated. "
"Please use gui.modules.port_configure.",
''.join(traceback.format_stack()))
from gui.modules.port_configure import *
|
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
import setup as _setup
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = _setup.__project__.title()
copyright = '2015 %s' % _setup.__author__
version = _setup.__version__
release = _setup.__version__
exclude_patterns = ['_build']
pygments_style = 'sphinx'
autodoc_member_order = 'groupwise'
intersphinx_mapping = {
'python': ('http://docs.python.org/3.2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
if on_rtd:
html_theme = 'alabaster'
html_theme_options = {
'github_user': 'waveform80',
'github_repo': 'picraft',
'logo': 'logo.png',
'logo_name': True,
'logo_text_align': 'center',
'description': 'The Pythonic Minecraft Library',
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
else:
html_theme = 'default'
#html_theme_options = {}
#html_sidebars = {}
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % _setup.__project__
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
#'preamble': '',
}
latex_documents = [
(
'index', # source start file
'%s.tex' % _setup.__project__, # target filename
'%s Documentation' % project, # title
_setup.__author__, # author
'manual', # documentclass
),
]
man_pages = []
texinfo_documents = []
|
from Queue import Empty
from anyjson import serialize, deserialize
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import sessionmaker
from kombu.transport import virtual
from sqlakombu.models import Queue, Message, metadata
class Channel(virtual.Channel):
_session = None
_engines = {} # engine cache
def _engine_from_config(self):
conninfo = self.connection.client
configuration = dict(conninfo.transport_options)
url = conninfo.hostname
return create_engine(url, **configuration)
def _open(self):
conninfo = self.connection.client
if conninfo.hostname not in self._engines:
engine = self._engine_from_config()
Session = sessionmaker(bind=engine)
metadata.create_all(engine)
self._engines[conninfo.hostname] = engine, Session
return self._engines[conninfo.hostname]
@property
def session(self):
if self._session is None:
_, Session = self._open()
self._session = Session()
return self._session
def _get_or_create(self, queue):
obj = self.session.query(Queue).filter(Queue.name == queue) \
.first()
if not obj:
obj = Queue(queue)
self.session.add(obj)
try:
self.session.commit()
except OperationalError:
self.session.rollback()
return obj
def _new_queue(self, queue, **kwargs):
self._get_or_create(queue)
def _put(self, queue, payload, **kwargs):
obj = self._get_or_create(queue)
message = Message(serialize(payload), obj)
self.session.add(message)
try:
self.session.commit()
except OperationalError:
self.session.rollback()
def _get(self, queue):
obj = self._get_or_create(queue)
if self.session.bind.name == 'sqlite':
self.session.execute('BEGIN IMMEDIATE TRANSACTION')
try:
msg = self.session.query(Message) \
.with_lockmode('update') \
.filter(Message.queue_id == obj.id) \
.filter(Message.visible != False) \
.order_by(Message.sent_at) \
.order_by(Message.id) \
.limit(1) \
.first()
if msg:
msg.visible = False
return deserialize(msg.payload)
raise Empty()
finally:
self.session.commit()
def _query_all(self, queue):
obj = self._get_or_create(queue)
return self.session.query(Message) \
.filter(Message.queue_id == obj.id)
def _purge(self, queue):
count = self._query_all(queue).delete(synchronize_session=False)
try:
self.session.commit()
except OperationalError:
self.session.rollback()
return count
def _size(self, queue):
return self._query_all(queue).count()
class Transport(virtual.Transport):
Channel = Channel
default_port = 0
connection_errors = ()
channel_errors = ()
|
CUSTOM_DAY_FORMAT = 'd/m/Y CUSTOM'
|
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='GcisPyClient',
version='1.2.1',
author='Andrew Buddenberg',
author_email='andrew.buddenberg@noaa.gov',
packages=find_packages(),
scripts=['bin/example', 'bin/sync_surveys.py', 'bin/sync_states.py'],
url='http://data.globalchange.gov',
description='Client for GCIS webservices',
long_description=open('README.md').read(),
license='New BSD',
data_files=[("", ["LICENSE.txt", "README.md"])],
install_requires=[
"requests >= 2.1.0",
"python-dateutil >= 2.2",
"PyYAML >= 3.11",
"beautifulsoup4 >= 4.3.2",
"pytest >= 2.5.2"
],
cmdclass={'test': PyTest},
)
|
"""
Some standard discrete distribution.
"""
from .. import ScalarDistribution
from ..math.misc import combinations as C, is_integer, is_number
__all__ = (
'bernoulli',
'binomial',
'hypergeometric',
'uniform',
)
def bernoulli(p):
"""
The Bernoulli distribution:
x P(x)
0 1-p
1 p
Parameters
----------
p : float
A float between 0 and 1, the probability of a 1.
Returns
-------
d : ScalarDistribution
The Bernoulli distribution with probability `p`.
Raises
------
ValueError
Raised if not 0 <= `p` <= 1.
"""
return binomial(1, p)
def binomial(n, p):
"""
The binomial distribution:
.. math::
f(k;n,p) = P(X = k) = nCk p^k (1-p)^(n-k)
describes the number of successes in n i.i.d. draws each with probability
of success p.
Parameters
----------
n : int
A positive integer, the number of trials.
p : float
A float between 0 and 1, the probabilty of success of a trial.
Returns
-------
d : ScalarDistribution
The binomial distribution describes the number of successes in `n`
trials (identically and independently distributed draws) each with
probability of success `p`.
Raises
------
ValueError
Raised if `n` is not a positive integer.
Raised if not 0 <= `p` <= 1.
"""
if not is_integer(n) or n < 0:
raise ValueError("{0} is not a positive integer.".format(n))
if not is_number(p) or not 0 <= p <= 1:
raise ValueError("{0} is not a valid probability.".format(p))
pp = lambda n, k: C(n, k) * p**k * (1 - p)**(n - k)
outcomes = list(range(n + 1))
pmf = [pp(n, k) for k in outcomes]
return ScalarDistribution(outcomes, pmf)
def hypergeometric(N, K, n):
"""
The hypergeometric distribution:
.. math::
f(k;N,K,n) = P(X = k) = KCk * (N-K)C(n-k) / NCn
Parameters
----------
N : int
A positive integer, the size of the population.
K : int
The number of successes in the population.
n : int
The number of draws to make (without replacement) from the population.
Returns
-------
d : ScalarDistribution
The hypergeometric distribution of a population of size `N` with `K`
successes in the population, and `n` draws are made, without
replacement, from that population. P(k) is the probability of k
successes among the `n` draws.
Raises
------
ValueError
Raised if `N`, `K`, or `n` are not positive integers.
"""
if not is_integer(N) or N < 0:
raise ValueError("{0} is not a positive integer.".format(N))
if not is_integer(K) or K < 0:
raise ValueError("{0} is not a positive integer.".format(K))
if not is_integer(n) or n < 0:
raise ValueError("{0} is not a positive integer.".format(n))
outcomes = list(range(max(0, n + K - N), min(K, n) + 1))
pmf = [C(K, k) * C(N - K, n - k) / C(N, n) for k in outcomes]
return ScalarDistribution(outcomes, pmf)
def uniform(a, b=None):
"""
The discrete uniform distribution:
.. math::
P(x in a..b-1) = 1/(b-a)
Parameters
----------
a : int
The lower bound of the uniform distribution. If `b` is None, then `a` is
taken to be the length of the uniform with a lower bound of 0.
b : int, None
The upper bound of the uniform distribution. If None, a uniform
distribution over 0 .. `a` is returned.
Returns
-------
d : ScalarDistribution
The uniform distribution from `a` to `b`-1.
Raises
------
ValueError
Raised if `b` is not an integer or None, or if `a` is not an integer and
positive if `b` is None or larger than `b` if be is not None.
"""
if not (b is None or is_integer(b)):
msg = "{0} is not an integer or None."
raise ValueError(msg.format(b))
if b is None:
if not is_integer(a) or a <= 0:
msg = "{0} is not a positive integer."
raise ValueError(msg.format(a))
a, b = 0, a
else:
if not is_integer(a) or a >= b:
msg = "{0} is not an integer larger than {1}."
raise ValueError(msg.format(a, b))
outcomes = list(range(a, b))
pmf = [1 / (b - a)] * (b - a)
return ScalarDistribution(outcomes, pmf)
|
import os
import sys
import re
import string
import types
import codecs
from xml.etree import cElementTree
from itertools import chain, imap
try:
MODULE = os.path.dirname(os.path.abspath(__file__))
except:
MODULE = ""
from tree import Tree, Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table
from tree import SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR
def decode_string(v, encoding="utf-8"):
""" Returns the given value as a Unicode string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, str):
for e in encoding:
try: return v.decode(*e)
except:
pass
return v
return unicode(v)
def encode_string(v, encoding="utf-8"):
""" Returns the given value as a Python byte string (if possible).
"""
if isinstance(encoding, basestring):
encoding = ((encoding,),) + (("windows-1252",), ("utf-8", "ignore"))
if isinstance(v, unicode):
for e in encoding:
try: return v.encode(*e)
except:
pass
return v
return str(v)
decode_utf8 = decode_string
encode_utf8 = encode_string
PUNCTUATION = ".,;:!?()[]{}`''\"@#$^&*+-|=~_"
def ngrams(string, n=3, punctuation=PUNCTUATION, continuous=False):
""" Returns a list of n-grams (tuples of n successive words) from the given string.
Alternatively, you can supply a Text or Sentence object.
With continuous=False, n-grams will not run over sentence markers (i.e., .!?).
Punctuation marks are stripped from words.
"""
def strip_punctuation(s, punctuation=set(punctuation)):
return [w for w in s if (isinstance(w, Word) and w.string or w) not in punctuation]
if n <= 0:
return []
if isinstance(string, basestring):
s = [strip_punctuation(s.split(" ")) for s in tokenize(string)]
if isinstance(string, Sentence):
s = [strip_punctuation(string)]
if isinstance(string, Text):
s = [strip_punctuation(s) for s in string]
if continuous:
s = [sum(s, [])]
g = []
for s in s:
#s = [None] + s + [None]
g.extend([tuple(s[i:i+n]) for i in range(len(s)-n+1)])
return g
def pprint(string, token=[WORD, POS, CHUNK, PNP], column=4):
""" Pretty-prints the output of Parser.parse() as a table with outlined columns.
Alternatively, you can supply a tree.Text or tree.Sentence object.
"""
if isinstance(string, basestring):
print "\n\n".join([table(sentence, fill=column) for sentence in Text(string, token)])
if isinstance(string, Text):
print "\n\n".join([table(sentence, fill=column) for sentence in string])
if isinstance(string, Sentence):
print table(string, fill=column)
class lazydict(dict):
def load(self):
# Must be overridden in a subclass.
# Must load data with dict.__setitem__(self, k, v) instead of lazydict[k] = v.
pass
def _lazy(self, method, *args):
""" If the dictionary is empty, calls lazydict.load().
Replaces lazydict.method() with dict.method() and calls it.
"""
if dict.__len__(self) == 0:
self.load()
setattr(self, method, types.MethodType(getattr(dict, method), self))
return getattr(dict, method)(self, *args)
def __repr__(self):
return self._lazy("__repr__")
def __len__(self):
return self._lazy("__len__")
def __iter__(self):
return self._lazy("__iter__")
def __contains__(self, *args):
return self._lazy("__contains__", *args)
def __getitem__(self, *args):
return self._lazy("__getitem__", *args)
def __setitem__(self, *args):
return self._lazy("__setitem__", *args)
def setdefault(self, *args):
return self._lazy("setdefault", *args)
def get(self, *args, **kwargs):
return self._lazy("get", *args)
def items(self):
return self._lazy("items")
def keys(self):
return self._lazy("keys")
def values(self):
return self._lazy("values")
def update(self, *args):
return self._lazy("update", *args)
def pop(self, *args):
return self._lazy("pop", *args)
def popitem(self, *args):
return self._lazy("popitem", *args)
class lazylist(list):
def load(self):
# Must be overridden in a subclass.
# Must load data with list.append(self, v) instead of lazylist.append(v).
pass
def _lazy(self, method, *args):
""" If the list is empty, calls lazylist.load().
Replaces lazylist.method() with list.method() and calls it.
"""
if list.__len__(self) == 0:
self.load()
setattr(self, method, types.MethodType(getattr(list, method), self))
return getattr(list, method)(self, *args)
def __repr__(self):
return self._lazy("__repr__")
def __len__(self):
return self._lazy("__len__")
def __iter__(self):
return self._lazy("__iter__")
def __contains__(self, *args):
return self._lazy("__contains__", *args)
def insert(self, *args):
return self._lazy("insert", *args)
def append(self, *args):
return self._lazy("append", *args)
def extend(self, *args):
return self._lazy("extend", *args)
def remove(self, *args):
return self._lazy("remove", *args)
def pop(self, *args):
return self._lazy("pop", *args)
def _read(path, encoding="utf-8", comment=";;;"):
""" Returns an iterator over the lines in the file at the given path,
strippping comments and decoding each line to Unicode.
"""
if path:
if isinstance(path, basestring) and os.path.exists(path):
# From file path.
f = open(path)
elif isinstance(path, basestring):
# From string.
f = path.splitlines()
else:
# From file or buffer.
f = path
for i, line in enumerate(f):
line = line.strip(codecs.BOM_UTF8) if i == 0 and isinstance(line, str) else line
line = line.strip()
line = decode_utf8(line)
if not line or (comment and line.startswith(comment)):
continue
yield line
raise StopIteration
class Lexicon(lazydict):
def __init__(self, path="", morphology=None, context=None, entities=None, NNP="NNP", language=None):
""" A dictionary of words and their part-of-speech tags.
For unknown words, rules for word morphology, context and named entities can be used.
"""
self._path = path
self._language = language
self.morphology = Morphology(self, path=morphology)
self.context = Context(self, path=context)
self.entities = Entities(self, path=entities, tag=NNP)
def load(self):
# Arnold NNP x
dict.update(self, (x.split(" ")[:2] for x in _read(self._path)))
@property
def path(self):
return self._path
@property
def language(self):
return self._language
class Rules(object):
def __init__(self, lexicon={}, cmd=set()):
self.lexicon, self.cmd = lexicon, cmd
def apply(self, x):
""" Applies the rule to the given token or list of tokens.
"""
return x
class Morphology(lazylist, Rules):
def __init__(self, lexicon={}, path=""):
""" A list of rules based on word morphology (prefix, suffix).
"""
cmd = ("char", # Word contains x.
"haspref", # Word starts with x.
"hassuf", # Word end with x.
"addpref", # x + word is in lexicon.
"addsuf", # Word + x is in lexicon.
"deletepref", # Word without x at the start is in lexicon.
"deletesuf", # Word without x at the end is in lexicon.
"goodleft", # Word preceded by word x.
"goodright", # Word followed by word x.
)
cmd = set(cmd)
cmd.update([("f" + x) for x in cmd])
Rules.__init__(self, lexicon, cmd)
self._path = path
@property
def path(self):
return self._path
def load(self):
# ["NN", "s", "fhassuf", "1", "NNS", "x"]
list.extend(self, (x.split() for x in _read(self._path)))
def apply(self, token, previous=(None, None), next=(None, None)):
""" Applies lexical rules to the given token,
which is a [word, tag] list.
"""
w = token[0]
for r in self:
if r[1] in self.cmd: # Rule = ly hassuf 2 RB x
f, x, pos, cmd = bool(0), r[0], r[-2], r[1].lower()
if r[2] in self.cmd: # Rule = NN s fhassuf 1 NNS x
f, x, pos, cmd = bool(1), r[1], r[-2], r[2].lower().lstrip("f")
if f and token[1] != r[0]:
continue
if (cmd == "char" and x in w) \
or (cmd == "haspref" and w.startswith(x)) \
or (cmd == "hassuf" and w.endswith(x)) \
or (cmd == "addpref" and x + w in self.lexicon) \
or (cmd == "addsuf" and w + x in self.lexicon) \
or (cmd == "deletepref" and w.startswith(x) and w[len(x):] in self.lexicon) \
or (cmd == "deletesuf" and w.endswith(x) and w[:-len(x)] in self.lexicon) \
or (cmd == "goodleft" and x == next[0]) \
or (cmd == "goodright" and x == previous[0]):
token[1] = pos
return token
def insert(self, i, tag, affix, cmd="hassuf", tagged=None):
""" Inserts a new rule that assigns the given tag to words with the given affix
(and tagged as specified), e.g., Morphology.append("RB", "-ly").
"""
if affix.startswith("-") and affix.endswith("-"):
affix, cmd = affix[+1:-1], "char"
if affix.startswith("-"):
affix, cmd = affix[+1:-0], "hassuf"
if affix.endswith("-"):
affix, cmd = affix[+0:-1], "haspref"
if tagged:
r = [tagged, affix, "f"+cmd.lstrip("f"), tag, "x"]
else:
r = [affix, cmd.lstrip("f"), tag, "x"]
lazylist.insert(self, i, r)
def append(self, *args, **kwargs):
self.insert(len(self)-1, *args, **kwargs)
def extend(self, rules=[]):
for r in rules:
self.append(*r)
class Context(lazylist, Rules):
def __init__(self, lexicon={}, path=""):
""" A list of rules based on context (preceding and following words).
"""
cmd = ("prevtag", # Preceding word is tagged x.
"nexttag", # Following word is tagged x.
"prev2tag", # Word 2 before is tagged x.
"next2tag", # Word 2 after is tagged x.
"prev1or2tag", # One of 2 preceding words is tagged x.
"next1or2tag", # One of 2 following words is tagged x.
"prev1or2or3tag", # One of 3 preceding words is tagged x.
"next1or2or3tag", # One of 3 following words is tagged x.
"surroundtag", # Preceding word is tagged x and following word is tagged y.
"curwd", # Current word is x.
"prevwd", # Preceding word is x.
"nextwd", # Following word is x.
"prev1or2wd", # One of 2 preceding words is x.
"next1or2wd", # One of 2 following words is x.
"next1or2or3wd", # One of 3 preceding words is x.
"prev1or2or3wd", # One of 3 following words is x.
"prevwdtag", # Preceding word is x and tagged y.
"nextwdtag", # Following word is x and tagged y.
"wdprevtag", # Current word is y and preceding word is tagged x.
"wdnexttag", # Current word is x and following word is tagged y.
"wdand2aft", # Current word is x and word 2 after is y.
"wdand2tagbfr", # Current word is y and word 2 before is tagged x.
"wdand2tagaft", # Current word is x and word 2 after is tagged y.
"lbigram", # Current word is y and word before is x.
"rbigram", # Current word is x and word after is y.
"prevbigram", # Preceding word is tagged x and word before is tagged y.
"nextbigram", # Following word is tagged x and word after is tagged y.
)
Rules.__init__(self, lexicon, set(cmd))
self._path = path
@property
def path(self):
return self._path
def load(self):
# ["VBD", "VB", "PREVTAG", "TO"]
list.extend(self, (x.split() for x in _read(self._path)))
def apply(self, tokens):
""" Applies contextual rules to the given list of tokens,
where each token is a [word, tag] list.
"""
o = [("STAART", "STAART")] * 3 # Empty delimiters for look ahead/back.
t = o + tokens + o
for i, token in enumerate(t):
for r in self:
if token[1] == "STAART":
continue
if token[1] != r[0] and r[0] != "*":
continue
cmd, x, y = r[2], r[3], r[4] if len(r) > 4 else ""
cmd = cmd.lower()
if (cmd == "prevtag" and x == t[i-1][1]) \
or (cmd == "nexttag" and x == t[i+1][1]) \
or (cmd == "prev2tag" and x == t[i-2][1]) \
or (cmd == "next2tag" and x == t[i+2][1]) \
or (cmd == "prev1or2tag" and x in (t[i-1][1], t[i-2][1])) \
or (cmd == "next1or2tag" and x in (t[i+1][1], t[i+2][1])) \
or (cmd == "prev1or2or3tag" and x in (t[i-1][1], t[i-2][1], t[i-3][1])) \
or (cmd == "next1or2or3tag" and x in (t[i+1][1], t[i+2][1], t[i+3][1])) \
or (cmd == "surroundtag" and x == t[i-1][1] and y == t[i+1][1]) \
or (cmd == "curwd" and x == t[i+0][0]) \
or (cmd == "prevwd" and x == t[i-1][0]) \
or (cmd == "nextwd" and x == t[i+1][0]) \
or (cmd == "prev1or2wd" and x in (t[i-1][0], t[i-2][0])) \
or (cmd == "next1or2wd" and x in (t[i+1][0], t[i+2][0])) \
or (cmd == "prevwdtag" and x == t[i-1][0] and y == t[i-1][1]) \
or (cmd == "nextwdtag" and x == t[i+1][0] and y == t[i+1][1]) \
or (cmd == "wdprevtag" and x == t[i-1][1] and y == t[i+0][0]) \
or (cmd == "wdnexttag" and x == t[i+0][0] and y == t[i+1][1]) \
or (cmd == "wdand2aft" and x == t[i+0][0] and y == t[i+2][0]) \
or (cmd == "wdand2tagbfr" and x == t[i-2][1] and y == t[i+0][0]) \
or (cmd == "wdand2tagaft" and x == t[i+0][0] and y == t[i+2][1]) \
or (cmd == "lbigram" and x == t[i-1][0] and y == t[i+0][0]) \
or (cmd == "rbigram" and x == t[i+0][0] and y == t[i+1][0]) \
or (cmd == "prevbigram" and x == t[i-2][1] and y == t[i-1][1]) \
or (cmd == "nextbigram" and x == t[i+1][1] and y == t[i+2][1]):
t[i] = [t[i][0], r[1]]
return t[len(o):-len(o)]
def insert(self, i, tag1, tag2, cmd="prevtag", x=None, y=None):
""" Inserts a new rule that updates words with tag1 to tag2,
given constraints x and y, e.g., Context.append("TO < NN", "VB")
"""
if " < " in tag1 and not x and not y:
tag1, x = tag1.split(" < "); cmd="prevtag"
if " > " in tag1 and not x and not y:
x, tag1 = tag1.split(" > "); cmd="nexttag"
lazylist.insert(self, i, [tag1, tag2, cmd, x or "", y or ""])
def append(self, *args, **kwargs):
self.insert(len(self)-1, *args, **kwargs)
def extend(self, rules=[]):
for r in rules:
self.append(*r)
RE_ENTITY1 = re.compile(r"^http://") # http://www.domain.com/path
RE_ENTITY2 = re.compile(r"^www\..*?\.[com|org|net|edu|de|uk]$") # www.domain.com
RE_ENTITY3 = re.compile(r"^[\w\-\.\+]+@(\w[\w\-]+\.)+[\w\-]+$") # name@domain.com
class Entities(lazydict, Rules):
def __init__(self, lexicon={}, path="", tag="NNP"):
""" A dictionary of named entities and their labels.
For domain names and e-mail adresses, regular expressions are used.
"""
cmd = (
"pers", # Persons: George/NNP-PERS
"loc", # Locations: Washington/NNP-LOC
"org", # Organizations: Google/NNP-ORG
)
Rules.__init__(self, lexicon, set(cmd))
self._path = path
self.tag = tag
@property
def path(self):
return self._path
def load(self):
# ["Alexander", "the", "Great", "PERS"]
# {"alexander": [["alexander", "the", "great", "pers"], ...]}
for x in _read(self.path):
x = [x.lower() for x in x.split()]
dict.setdefault(self, x[0], []).append(x)
def apply(self, tokens):
""" Applies the named entity recognizer to the given list of tokens,
where each token is a [word, tag] list.
"""
# Note: we could also scan for patterns, e.g.,
# "my|his|her name is|was *" => NNP-PERS.
i = 0
while i < len(tokens):
w = tokens[i][0].lower()
if RE_ENTITY1.match(w) \
or RE_ENTITY2.match(w) \
or RE_ENTITY3.match(w):
tokens[i][1] = self.tag
if w in self:
for e in self[w]:
# Look ahead to see if successive words match the named entity.
e, tag = (e[:-1], "-"+e[-1].upper()) if e[-1] in self.cmd else (e, "")
b = True
for j, e in enumerate(e):
if i + j >= len(tokens) or tokens[i+j][0].lower() != e:
b = False; break
if b:
for token in tokens[i:i+j+1]:
token[1] = (token[1] == "NNPS" and token[1] or self.tag) + tag
i += j
break
i += 1
return tokens
def append(self, entity, name="pers"):
""" Appends a named entity to the lexicon,
e.g., Entities.append("Hooloovoo", "PERS")
"""
e = map(lambda s: s.lower(), entity.split(" ") + [name])
self.setdefault(e[0], []).append(e)
def extend(self, entities):
for entity, name in entities:
self.append(entity, name)
PTB = PENN = "penn"
class Parser(object):
def __init__(self, lexicon={}, default=("NN", "NNP", "CD"), language=None):
""" A simple shallow parser using a Brill-based part-of-speech tagger.
The given lexicon is a dictionary of known words and their part-of-speech tag.
The given default tags are used for unknown words.
Unknown words that start with a capital letter are tagged NNP (except for German).
Unknown words that contain only digits and punctuation are tagged CD.
The given language can be used to discern between
Germanic and Romance languages for phrase chunking.
"""
self.lexicon = lexicon
self.default = default
self.language = language
def find_tokens(self, string, **kwargs):
""" Returns a list of sentences from the given string.
Punctuation marks are separated from each word by a space.
"""
# "The cat purs." => ["The cat purs ."]
return find_tokens(string,
punctuation = kwargs.get( "punctuation", PUNCTUATION),
abbreviations = kwargs.get("abbreviations", ABBREVIATIONS),
replace = kwargs.get( "replace", replacements),
linebreak = r"\n{2,}")
def find_tags(self, tokens, **kwargs):
""" Annotates the given list of tokens with part-of-speech tags.
Returns a list of tokens, where each token is now a [word, tag]-list.
"""
# ["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]]
return find_tags(tokens,
language = kwargs.get("language", self.language),
lexicon = kwargs.get( "lexicon", self.lexicon),
default = kwargs.get( "default", self.default),
map = kwargs.get( "map", None))
def find_chunks(self, tokens, **kwargs):
""" Annotates the given list of tokens with chunk tags.
Several tags can be added, for example chunk + preposition tags.
"""
# [["The", "DT"], ["cat", "NN"], ["purs", "VB"]] =>
# [["The", "DT", "B-NP"], ["cat", "NN", "I-NP"], ["purs", "VB", "B-VP"]]
return find_prepositions(
find_chunks(tokens,
language = kwargs.get("language", self.language)))
def find_prepositions(self, tokens, **kwargs):
""" Annotates the given list of tokens with prepositional noun phrase tags.
"""
return find_prepositions(tokens) # See also Parser.find_chunks().
def find_labels(self, tokens, **kwargs):
""" Annotates the given list of tokens with verb/predicate tags.
"""
return find_relations(tokens)
def find_lemmata(self, tokens, **kwargs):
""" Annotates the given list of tokens with word lemmata.
"""
return [token + [token[0].lower()] for token in tokens]
def parse(self, s, tokenize=True, tags=True, chunks=True, relations=False, lemmata=False, encoding="utf-8", **kwargs):
""" Takes a string (sentences) and returns a tagged Unicode string (TaggedString).
Sentences in the output are separated by newlines.
With tokenize=True, punctuation is split from words and sentences are separated by \n.
With tags=True, part-of-speech tags are parsed (NN, VB, IN, ...).
With chunks=True, phrase chunk tags are parsed (NP, VP, PP, PNP, ...).
With relations=True, semantic role labels are parsed (SBJ, OBJ).
With lemmata=True, word lemmata are parsed.
Optional parameters are passed to
the tokenizer, tagger, chunker, labeler and lemmatizer.
"""
# Tokenizer.
if tokenize is True:
s = self.find_tokens(s)
if isinstance(s, (list, tuple)):
s = [isinstance(s, basestring) and s.split(" ") or s for s in s]
if isinstance(s, basestring):
s = [s.split(" ") for s in s.split("\n")]
# Unicode.
for i in range(len(s)):
for j in range(len(s[i])):
if isinstance(s[i][j], str):
s[i][j] = decode_string(s[i][j], encoding)
# Tagger (required by chunker, labeler & lemmatizer).
if tags or chunks or relations or lemmata:
s[i] = self.find_tags(s[i], **kwargs)
else:
s[i] = [[w] for w in s[i]]
# Chunker.
if chunks or relations:
s[i] = self.find_chunks(s[i], **kwargs)
# Labeler.
if relations:
s[i] = self.find_labels(s[i], **kwargs)
# Lemmatizer.
if lemmata:
s[i] = self.find_lemmata(s[i], **kwargs)
# Slash-formatted tagged string.
# With collapse=False (or split=True), returns raw list
# (this output is not usable by tree.Text).
if not kwargs.get("collapse", True) \
or kwargs.get("split", False):
return s
# Construct TaggedString.format.
# (this output is usable by tree.Text).
format = ["word"]
if tags:
format.append("part-of-speech")
if chunks:
format.extend(("chunk", "preposition"))
if relations:
format.append("relation")
if lemmata:
format.append("lemma")
# Collapse raw list.
# Sentences are separated by newlines, tokens by spaces, tags by slashes.
# Slashes in words are encoded with &slash;
for i in range(len(s)):
for j in range(len(s[i])):
s[i][j][0] = s[i][j][0].replace("/", "&slash;")
s[i][j] = "/".join(s[i][j])
s[i] = " ".join(s[i])
s = "\n".join(s)
s = TaggedString(s, format, language=kwargs.get("language", self.language))
return s
TOKENS = "tokens"
class TaggedString(unicode):
def __new__(self, string, tags=["word"], language=None):
""" Unicode string with tags and language attributes.
For example: TaggedString("cat/NN/NP", tags=["word", "pos", "chunk"]).
"""
# From a TaggedString:
if isinstance(string, unicode) and hasattr(string, "tags"):
tags, language = string.tags, string.language
# From a TaggedString.split(TOKENS) list:
if isinstance(string, list):
string = [[[x.replace("/", "&slash;") for x in token] for token in s] for s in string]
string = "\n".join(" ".join("/".join(token) for token in s) for s in string)
s = unicode.__new__(self, string)
s.tags = list(tags)
s.language = language
return s
def split(self, sep=TOKENS):
""" Returns a list of sentences, where each sentence is a list of tokens,
where each token is a list of word + tags.
"""
if sep != TOKENS:
return unicode.split(self, sep)
if len(self) == 0:
return []
return [[[x.replace("&slash;", "/") for x in token.split("/")]
for token in sentence.split(" ")]
for sentence in unicode.split(self, "\n")]
UNIVERSAL = "universal"
NOUN, VERB, ADJ, ADV, PRON, DET, PREP, ADP, NUM, CONJ, INTJ, PRT, PUNC, X = \
"NN", "VB", "JJ", "RB", "PR", "DT", "PP", "PP", "NO", "CJ", "UH", "PT", ".", "X"
def penntreebank2universal(token, tag):
""" Returns a (token, tag)-tuple with a simplified universal part-of-speech tag.
"""
if tag.startswith(("NNP-", "NNPS-")):
return (token, "%s-%s" % (NOUN, tag.split("-")[-1]))
if tag in ("NN", "NNS", "NNP", "NNPS", "NP"):
return (token, NOUN)
if tag in ("MD", "VB", "VBD", "VBG", "VBN", "VBP", "VBZ"):
return (token, VERB)
if tag in ("JJ", "JJR", "JJS"):
return (token, ADJ)
if tag in ("RB", "RBR", "RBS", "WRB"):
return (token, ADV)
if tag in ("PRP", "PRP$", "WP", "WP$"):
return (token, PRON)
if tag in ("DT", "PDT", "WDT", "EX"):
return (token, DET)
if tag in ("IN",):
return (token, PREP)
if tag in ("CD",):
return (token, NUM)
if tag in ("CC",):
return (token, CONJ)
if tag in ("UH",):
return (token, INTJ)
if tag in ("POS", "RP", "TO"):
return (token, PRT)
if tag in ("SYM", "LS", ".", "!", "?", ",", ":", "(", ")", "\"", "#", "$"):
return (token, PUNC)
return (token, X)
TOKEN = re.compile(r"(\S+)\s")
PUNCTUATION = \
punctuation = ".,;:!?()[]{}`''\"@#$^&*+-|=~_"
ABBREVIATIONS = abbreviations = set((
"a.", "adj.", "adv.", "al.", "a.m.", "c.", "cf.", "comp.", "conf.", "def.",
"ed.", "e.g.", "esp.", "etc.", "ex.", "f.", "fig.", "gen.", "id.", "i.e.",
"int.", "l.", "m.", "Med.", "Mil.", "Mr.", "n.", "n.q.", "orig.", "pl.",
"pred.", "pres.", "p.m.", "ref.", "v.", "vs.", "w/"
))
RE_ABBR1 = re.compile("^[A-Za-z]\.$") # single letter, "T. De Smedt"
RE_ABBR2 = re.compile("^([A-Za-z]\.)+$") # alternating letters, "U.S."
RE_ABBR3 = re.compile("^[A-Z][" + "|".join( # capital followed by consonants, "Mr."
"bcdfghjklmnpqrstvwxz") + "]+.$")
EMOTICONS = { # (facial expression, sentiment)-keys
("love" , +1.00): set(("<3", u"♥")),
("grin" , +1.00): set((">:D", ":-D", ":D", "=-D", "=D", "X-D", "x-D", "XD", "xD", "8-D")),
("taunt", +0.75): set((">:P", ":-P", ":P", ":-p", ":p", ":-b", ":b", ":c)", ":o)", ":^)")),
("smile", +0.50): set((">:)", ":-)", ":)", "=)", "=]", ":]", ":}", ":>", ":3", "8)", "8-)")),
("wink" , +0.25): set((">;]", ";-)", ";)", ";-]", ";]", ";D", ";^)", "*-)", "*)")),
("gasp" , +0.05): set((">:o", ":-O", ":O", ":o", ":-o", "o_O", "o.O", u"°O°", u"°o°")),
("worry", -0.25): set((">:/", ":-/", ":/", ":\\", ">:\\", ":-.", ":-s", ":s", ":S", ":-S", ">.>")),
("frown", -0.75): set((">:[", ":-(", ":(", "=(", ":-[", ":[", ":{", ":-<", ":c", ":-c", "=/")),
("cry" , -1.00): set((":'(", ":'''(", ";'("))
}
RE_EMOTICONS = [r" ?".join(map(re.escape, e)) for v in EMOTICONS.values() for e in v]
RE_EMOTICONS = re.compile(r"(%s)($|\s)" % "|".join(RE_EMOTICONS))
RE_SARCASM = re.compile(r"\( ?\! ?\)")
replacements = {
"'d": " 'd",
"'m": " 'm",
"'s": " 's",
"'ll": " 'll",
"'re": " 're",
"'ve": " 've",
"n't": " n't"
}
EOS = "END-OF-SENTENCE"
def find_tokens(string, punctuation=PUNCTUATION, abbreviations=ABBREVIATIONS, replace=replacements, linebreak=r"\n{2,}"):
""" Returns a list of sentences. Each sentence is a space-separated string of tokens (words).
Handles common cases of abbreviations (e.g., etc., ...).
Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence.
Headings without an ending period are inferred by line breaks.
"""
# Handle periods separately.
punctuation = tuple(punctuation.replace(".", ""))
# Handle replacements (contractions).
for a, b in replace.items():
string = re.sub(a, b, string)
# Handle Unicode quotes.
if isinstance(string, unicode):
string = string.replace(u"“", u" “ ")
string = string.replace(u"”", u" ” ")
string = string.replace(u"‘", u" ‘ ")
string = string.replace(u"’", u" ’ ")
# Collapse whitespace.
string = re.sub("\r\n", "\n", string)
string = re.sub(linebreak, " %s " % EOS, string)
string = re.sub(r"\s+", " ", string)
tokens = []
for t in TOKEN.findall(string+" "):
if len(t) > 0:
tail = []
while t.startswith(punctuation) and \
not t in replace:
# Split leading punctuation.
if t.startswith(punctuation):
tokens.append(t[0]); t=t[1:]
while t.endswith(punctuation+(".",)) and \
not t in replace:
# Split trailing punctuation.
if t.endswith(punctuation):
tail.append(t[-1]); t=t[:-1]
# Split ellipsis (...) before splitting period.
if t.endswith("..."):
tail.append("..."); t=t[:-3].rstrip(".")
# Split period (if not an abbreviation).
if t.endswith("."):
if t in abbreviations or \
RE_ABBR1.match(t) is not None or \
RE_ABBR2.match(t) is not None or \
RE_ABBR3.match(t) is not None:
break
else:
tail.append(t[-1]); t=t[:-1]
if t != "":
tokens.append(t)
tokens.extend(reversed(tail))
sentences, i, j = [[]], 0, 0
while j < len(tokens):
if tokens[j] in ("...", ".", "!", "?", EOS):
# There may be a trailing parenthesis.
while j < len(tokens) \
and tokens[j] in ("...", ".", "!", "?", ")", "'", "\"", u"”", u"’", EOS):
j += 1
sentences[-1].extend(t for t in tokens[i:j] if t != EOS)
sentences.append([])
i = j
j += 1
sentences[-1].extend(tokens[i:j])
sentences = (" ".join(s) for s in sentences if len(s) > 0)
sentences = (RE_SARCASM.sub("(!)", s) for s in sentences)
sentences = [RE_EMOTICONS.sub(
lambda m: m.group(1).replace(" ", "") + m.group(2), s) for s in sentences]
return sentences
CD = re.compile(r"^[0-9\-\,\.\:\/\%\$]+$")
def _suffix_rules(token, **kwargs):
""" Default morphological tagging rules for English, based on word suffixes.
"""
word, pos = token
if word.endswith("ing"):
pos = "VBG"
if word.endswith("ly"):
pos = "RB"
if word.endswith("s") and not word.endswith(("is", "ous", "ss")):
pos = "NNS"
if word.endswith(("able", "al", "ful", "ible", "ient", "ish", "ive", "less", "tic", "ous")) or "-" in word:
pos = "JJ"
if word.endswith("ed"):
pos = "VBN"
if word.endswith(("ate", "ify", "ise", "ize")):
pos = "VBP"
return [word, pos]
def find_tags(tokens, lexicon={}, default=("NN", "NNP", "CD"), language="en", map=None, **kwargs):
""" Returns a list of [token, tag]-items for the given list of tokens:
["The", "cat", "purs"] => [["The", "DT"], ["cat", "NN"], ["purs", "VB"]]
Words are tagged using the given lexicon of (word, tag)-items.
Unknown words are tagged NN by default.
Unknown words that start with a capital letter are tagged NNP (unless language="de").
Unknown words that consist only of digits and punctuation marks are tagged CD.
Unknown words are then improved with morphological rules.
All words are improved with contextual rules.
If map is a function, it is applied to each (token, tag) after applying all rules.
"""
tagged = []
if isinstance(lexicon, Lexicon):
f = lexicon.morphology.apply
elif language == "en":
f = _suffix_rules
else:
f = lambda token, **kwargs: token
for i, token in enumerate(tokens):
tagged.append([token, lexicon.get(token, i==0 and lexicon.get(token.lower()) or None)])
for i, (token, tag) in enumerate(tagged):
if tag is None:
if len(token) > 0 \
and token[0].isupper() \
and token[0].isalpha() \
and language != "de":
tagged[i] = [token, default[1]] # NNP
elif CD.match(token) is not None:
tagged[i] = [token, default[2]] # CD
else:
tagged[i] = [token, default[0]] # NN
tagged[i] = f(tagged[i],
previous = i > 0 and tagged[i-1] or (None, None),
next = i < len(tagged)-1 and tagged[i+1] or (None, None))
if isinstance(lexicon, Lexicon):
tagged = lexicon.context.apply(tagged)
tagged = lexicon.entities.apply(tagged)
if map is not None:
tagged = [list(map(token, tag)) or [token, default[0]] for token, tag in tagged]
return tagged
SEPARATOR = "/"
NN = r"NN|NNS|NNP|NNPS|NNPS?\-[A-Z]{3,4}|PR|PRP|PRP\$"
VB = r"VB|VBD|VBG|VBN|VBP|VBZ"
JJ = r"JJ|JJR|JJS"
RB = r"(?<!W)RB|RBR|RBS"
CHUNKS = [[
# Germanic languages: en, de, nl, ...
( "NP", re.compile(r"(("+NN+")/)*((DT|CD|CC|CJ)/)*(("+RB+"|"+JJ+")/)*(("+NN+")/)+")),
( "VP", re.compile(r"(((MD|"+RB+")/)*(("+VB+")/)+)+")),
( "VP", re.compile(r"((MD)/)")),
( "PP", re.compile(r"((IN|PP|TO)/)+")),
("ADJP", re.compile(r"((CC|CJ|"+RB+"|"+JJ+")/)*(("+JJ+")/)+")),
("ADVP", re.compile(r"(("+RB+"|WRB)/)+")),
], [
# Romance languages: es, fr, it, ...
( "NP", re.compile(r"(("+NN+")/)*((DT|CD|CC|CJ)/)*(("+RB+"|"+JJ+")/)*(("+NN+")/)+(("+RB+"|"+JJ+")/)*")),
( "VP", re.compile(r"(((MD|"+RB+")/)*(("+VB+")/)+(("+RB+")/)*)+")),
( "VP", re.compile(r"((MD)/)")),
( "PP", re.compile(r"((IN|PP|TO)/)+")),
("ADJP", re.compile(r"((CC|CJ|"+RB+"|"+JJ+")/)*(("+JJ+")/)+")),
("ADVP", re.compile(r"(("+RB+"|WRB)/)+")),
]]
CHUNKS[0].insert(1, CHUNKS[0].pop(3))
CHUNKS[1].insert(1, CHUNKS[1].pop(3))
def find_chunks(tagged, language="en"):
""" The input is a list of [token, tag]-items.
The output is a list of [token, tag, chunk]-items:
The/DT nice/JJ fish/NN is/VBZ dead/JJ ./. =>
The/DT/B-NP nice/JJ/I-NP fish/NN/I-NP is/VBZ/B-VP dead/JJ/B-ADJP ././O
"""
chunked = [x for x in tagged]
tags = "".join("%s%s" % (tag, SEPARATOR) for token, tag in tagged)
# Use Germanic or Romance chunking rules according to given language.
for tag, rule in CHUNKS[int(language in ("ca", "es", "pt", "fr", "it", "pt", "ro"))]:
for m in rule.finditer(tags):
# Find the start of chunks inside the tags-string.
# Number of preceding separators = number of preceding tokens.
i = m.start()
j = tags[:i].count(SEPARATOR)
n = m.group(0).count(SEPARATOR)
for k in range(j, j+n):
if len(chunked[k]) == 3:
continue
if len(chunked[k]) < 3:
# A conjunction can not be start of a chunk.
if k == j and chunked[k][1] in ("CC", "CJ", "KON", "Conj(neven)"):
j += 1
# Mark first token in chunk with B-.
elif k == j:
chunked[k].append("B-"+tag)
# Mark other tokens in chunk with I-.
else:
chunked[k].append("I-"+tag)
# Mark chinks (tokens outside of a chunk) with O-.
for chink in filter(lambda x: len(x) < 3, chunked):
chink.append("O")
# Post-processing corrections.
for i, (word, tag, chunk) in enumerate(chunked):
if tag.startswith("RB") and chunk == "B-NP":
# "Very nice work" (NP) <=> "Perhaps" (ADVP) + "you" (NP).
if i < len(chunked)-1 and not chunked[i+1][1].startswith("JJ"):
chunked[i+0][2] = "B-ADVP"
chunked[i+1][2] = "B-NP"
return chunked
def find_prepositions(chunked):
""" The input is a list of [token, tag, chunk]-items.
The output is a list of [token, tag, chunk, preposition]-items.
PP-chunks followed by NP-chunks make up a PNP-chunk.
"""
# Tokens that are not part of a preposition just get the O-tag.
for ch in chunked:
ch.append("O")
for i, chunk in enumerate(chunked):
if chunk[2].endswith("PP") and chunk[-1] == "O":
# Find PP followed by other PP, NP with nouns and pronouns, VP with a gerund.
if i < len(chunked)-1 and \
(chunked[i+1][2].endswith(("NP", "PP")) or \
chunked[i+1][1] in ("VBG", "VBN")):
chunk[-1] = "B-PNP"
pp = True
for ch in chunked[i+1:]:
if not (ch[2].endswith(("NP", "PP")) or ch[1] in ("VBG", "VBN")):
break
if ch[2].endswith("PP") and pp:
ch[-1] = "I-PNP"
if not ch[2].endswith("PP"):
ch[-1] = "I-PNP"
pp = False
return chunked
BE = dict.fromkeys(("be", "am", "are", "is", "being", "was", "were", "been"), True)
GO = dict.fromkeys(("go", "goes", "going", "went"), True)
def find_relations(chunked):
""" The input is a list of [token, tag, chunk]-items.
The output is a list of [token, tag, chunk, relation]-items.
A noun phrase preceding a verb phrase is perceived as sentence subject.
A noun phrase following a verb phrase is perceived as sentence object.
"""
tag = lambda token: token[2].split("-")[-1] # B-NP => NP
# Group successive tokens with the same chunk-tag.
chunks = []
for token in chunked:
if len(chunks) == 0 \
or token[2].startswith("B-") \
or tag(token) != tag(chunks[-1][-1]):
chunks.append([])
chunks[-1].append(token+["O"])
# If a VP is preceded by a NP, the NP is tagged as NP-SBJ-(id).
# If a VP is followed by a NP, the NP is tagged as NP-OBJ-(id).
# Chunks that are not part of a relation get an O-tag.
id = 0
for i, chunk in enumerate(chunks):
if tag(chunk[-1]) == "VP" and i > 0 and tag(chunks[i-1][-1]) == "NP":
if chunk[-1][-1] == "O":
id += 1
for token in chunk:
token[-1] = "VP-" + str(id)
for token in chunks[i-1]:
token[-1] += "*NP-SBJ-" + str(id)
token[-1] = token[-1].lstrip("O-*")
if tag(chunk[-1]) == "VP" and i < len(chunks)-1 and tag(chunks[i+1][-1]) == "NP":
if chunk[-1][-1] == "O":
id += 1
for token in chunk:
token[-1] = "VP-" + str(id)
for token in chunks[i+1]:
token[-1] = "*NP-OBJ-" + str(id)
token[-1] = token[-1].lstrip("O-*")
# This is more a proof-of-concept than useful in practice:
# PP-LOC = be + in|at + the|my
# PP-DIR = go + to|towards + the|my
for i, chunk in enumerate(chunks):
if 0 < i < len(chunks)-1 and len(chunk) == 1 and chunk[-1][-1] == "O":
t0, t1, t2 = chunks[i-1][-1], chunks[i][0], chunks[i+1][0] # previous / current / next
if tag(t1) == "PP" and t2[1] in ("DT", "PR", "PRP$"):
if t0[0] in BE and t1[0] in ("in", "at") : t1[-1] = "PP-LOC"
if t0[0] in GO and t1[0] in ("to", "towards") : t1[-1] = "PP-DIR"
related = []; [related.extend(chunk) for chunk in chunks]
return related
def commandline(parse=Parser().parse):
import optparse
import codecs
p = optparse.OptionParser()
p.add_option("-f", "--file", dest="file", action="store", help="text file to parse", metavar="FILE")
p.add_option("-s", "--string", dest="string", action="store", help="text string to parse", metavar="STRING")
p.add_option("-O", "--tokenize", dest="tokenize", action="store_true", help="tokenize the input")
p.add_option("-T", "--tags", dest="tags", action="store_true", help="parse part-of-speech tags")
p.add_option("-C", "--chunks", dest="chunks", action="store_true", help="parse chunk tags")
p.add_option("-R", "--relations", dest="relations", action="store_true", help="find verb/predicate relations")
p.add_option("-L", "--lemmata", dest="lemmata", action="store_true", help="find word lemmata")
p.add_option("-e", "--encoding", dest="encoding", action="store_true", help="character encoding", default="utf-8")
p.add_option("-v", "--version", dest="version", action="store_true", help="version info")
o, arguments = p.parse_args()
# Version info.
if o.version:
sys.path.insert(0, os.path.join(MODULE, "..", ".."))
from pattern import __version__
print __version__
sys.path.pop(0)
# Either a text file (-f) or a text string (-s) must be supplied.
s = o.file and codecs.open(o.file, "r", o.encoding).read() or o.string
# The given text can be parsed in two modes:
# - implicit: parse everything (tokenize, tag/chunk, find relations, lemmatize).
# - explicit: define what to parse manually.
if s:
explicit = False
for option in [o.tokenize, o.tags, o.chunks, o.relations, o.lemmata]:
if option is not None: explicit=True; break
if not explicit:
a = {"encoding": o.encoding }
else:
a = {"tokenize": o.tokenize or False,
"tags": o.tags or False,
"chunks": o.chunks or False,
"relations": o.relations or False,
"lemmata": o.lemmata or False,
"encoding": o.encoding }
s = parse(s, **a)
# The output can be either slash-formatted string or XML.
if "xml" in arguments:
s = Tree(s, s.tags).xml
print s
INFINITIVE, PRESENT, PAST, FUTURE = \
INF, PRES, PST, FUT = \
"infinitive", "present", "past", "future"
FIRST, SECOND, THIRD = \
1, 2, 3
SINGULAR, PLURAL = \
SG, PL = \
"singular", "plural"
INDICATIVE, IMPERATIVE, CONDITIONAL, SUBJUNCTIVE = \
IND, IMP, COND, SJV = \
"indicative", "imperative", "conditional", "subjunctive"
IMPERFECTIVE, PERFECTIVE, PROGRESSIVE = \
IPFV, PFV, PROG = \
"imperfective", "perfective", "progressive"
IMPERFECT = "imperfect"
PRETERITE = "preterite"
PARTICIPLE, GERUND = "participle", "gerund"
CONTINUOUS = CONT = "continuous"
_ = None # prettify the table =>
TENSES = {
None: (None, _, _, _, _, False, (None ,)), # ENGLISH SPANISH GERMAN DUTCH FRENCH
0 : ( INF, _, _, _, _, False, ("inf" ,)), # to be ser sein zijn être
1 : (PRES, 1, SG, IND, IPFV, False, ("1sg" ,)), # I am soy bin ben suis
2 : (PRES, 2, SG, IND, IPFV, False, ("2sg" ,)), # you are eres bist bent es
3 : (PRES, 3, SG, IND, IPFV, False, ("3sg" ,)), # (s)he is es ist is est
4 : (PRES, 1, PL, IND, IPFV, False, ("1pl" ,)), # we are somos sind zijn sommes
5 : (PRES, 2, PL, IND, IPFV, False, ("2pl" ,)), # you are sois seid zijn êtes
6 : (PRES, 3, PL, IND, IPFV, False, ("3pl" ,)), # they are son sind zijn sont
7 : (PRES, _, PL, IND, IPFV, False, ( "pl" ,)), # are
8 : (PRES, _, _, IND, PROG, False, ("part" ,)), # being siendo zijnd étant
9 : (PRES, 1, SG, IND, IPFV, True, ("1sg-" ,)), # I am not
10 : (PRES, 2, SG, IND, IPFV, True, ("2sg-" ,)), # you aren't
11 : (PRES, 3, SG, IND, IPFV, True, ("3sg-" ,)), # (s)he isn't
12 : (PRES, 1, PL, IND, IPFV, True, ("1pl-" ,)), # we aren't
13 : (PRES, 2, PL, IND, IPFV, True, ("2pl-" ,)), # you aren't
14 : (PRES, 3, PL, IND, IPFV, True, ("3pl-" ,)), # they aren't
15 : (PRES, _, PL, IND, IPFV, True, ( "pl-" ,)), # aren't
16 : (PRES, _, _, IND, IPFV, True, ( "-" ,)), # isn't
17 : ( PST, 1, SG, IND, IPFV, False, ("1sgp" ,)), # I was era war was étais
18 : ( PST, 2, SG, IND, IPFV, False, ("2sgp" ,)), # you were eras warst was étais
19 : ( PST, 3, SG, IND, IPFV, False, ("3sgp" ,)), # (s)he was era war was était
20 : ( PST, 1, PL, IND, IPFV, False, ("1ppl" ,)), # we were éramos waren waren étions
21 : ( PST, 2, PL, IND, IPFV, False, ("2ppl" ,)), # you were erais wart waren étiez
22 : ( PST, 3, PL, IND, IPFV, False, ("3ppl" ,)), # they were eran waren waren étaient
23 : ( PST, _, PL, IND, IPFV, False, ( "ppl" ,)), # were
24 : ( PST, _, _, IND, PROG, False, ("ppart",)), # been sido gewesen geweest été
25 : ( PST, _, _, IND, IPFV, False, ( "p" ,)), # was
26 : ( PST, 1, SG, IND, IPFV, True, ("1sgp-",)), # I wasn't
27 : ( PST, 2, SG, IND, IPFV, True, ("2sgp-",)), # you weren't
28 : ( PST, 3, SG, IND, IPFV, True, ("3sgp-",)), # (s)he wasn't
29 : ( PST, 1, PL, IND, IPFV, True, ("1ppl-",)), # we weren't
30 : ( PST, 2, PL, IND, IPFV, True, ("2ppl-",)), # you weren't
31 : ( PST, 3, PL, IND, IPFV, True, ("3ppl-",)), # they weren't
32 : ( PST, _, PL, IND, IPFV, True, ( "ppl-",)), # weren't
33 : ( PST, _, _, IND, IPFV, True, ( "p-" ,)), # wasn't
34 : ( PST, 1, SG, IND, PFV, False, ("1sg+" ,)), # I fui fus
35 : ( PST, 2, SG, IND, PFV, False, ("2sg+" ,)), # you fuiste fus
36 : ( PST, 3, SG, IND, PFV, False, ("3sg+" ,)), # (s)he fue fut
37 : ( PST, 1, PL, IND, PFV, False, ("1pl+" ,)), # we fuimos fûmes
38 : ( PST, 2, PL, IND, PFV, False, ("2pl+" ,)), # you fuisteis fûtes
39 : ( PST, 3, PL, IND, PFV, False, ("3pl+" ,)), # they fueron furent
40 : ( FUT, 1, SG, IND, IPFV, False, ("1sgf" ,)), # I seré serai
41 : ( FUT, 2, SG, IND, IPFV, False, ("2sgf" ,)), # you serás seras
42 : ( FUT, 3, SG, IND, IPFV, False, ("3sgf" ,)), # (s)he será sera
43 : ( FUT, 1, PL, IND, IPFV, False, ("1plf" ,)), # we seremos serons
44 : ( FUT, 2, PL, IND, IPFV, False, ("2plf" ,)), # you seréis serez
45 : ( FUT, 3, PL, IND, IPFV, False, ("3plf" ,)), # they serán seron
46 : (PRES, 1, SG, COND, IPFV, False, ("1sg->",)), # I sería serais
47 : (PRES, 2, SG, COND, IPFV, False, ("2sg->",)), # you serías serais
48 : (PRES, 3, SG, COND, IPFV, False, ("3sg->",)), # (s)he sería serait
49 : (PRES, 1, PL, COND, IPFV, False, ("1pl->",)), # we seríamos serions
50 : (PRES, 2, PL, COND, IPFV, False, ("2pl->",)), # you seríais seriez
51 : (PRES, 3, PL, COND, IPFV, False, ("3pl->",)), # they serían seraient
52 : (PRES, 2, SG, IMP, IPFV, False, ("2sg!" ,)), # you sé sei sois
521: (PRES, 3, SG, IMP, IPFV, False, ("3sg!" ,)), # (s)he
53 : (PRES, 1, PL, IMP, IPFV, False, ("1pl!" ,)), # we seien soyons
54 : (PRES, 2, PL, IMP, IPFV, False, ("2pl!" ,)), # you sed seid soyez
541: (PRES, 3, PL, IMP, IPFV, False, ("3pl!" ,)), # you
55 : (PRES, 1, SG, SJV, IPFV, False, ("1sg?" ,)), # I sea sei sois
56 : (PRES, 2, SG, SJV, IPFV, False, ("2sg?" ,)), # you seas seist sois
57 : (PRES, 3, SG, SJV, IPFV, False, ("3sg?" ,)), # (s)he sea sei soit
58 : (PRES, 1, PL, SJV, IPFV, False, ("1pl?" ,)), # we seamos seien soyons
59 : (PRES, 2, PL, SJV, IPFV, False, ("2pl?" ,)), # you seáis seiet soyez
60 : (PRES, 3, PL, SJV, IPFV, False, ("3pl?" ,)), # they sean seien soient
61 : (PRES, 1, SG, SJV, PFV, False, ("1sg?+",)), # I
62 : (PRES, 2, SG, SJV, PFV, False, ("2sg?+",)), # you
63 : (PRES, 3, SG, SJV, PFV, False, ("3sg?+",)), # (s)he
64 : (PRES, 1, PL, SJV, PFV, False, ("1pl?+",)), # we
65 : (PRES, 2, PL, SJV, PFV, False, ("2pl?+",)), # you
66 : (PRES, 3, PL, SJV, PFV, False, ("3pl?+",)), # they
67 : ( PST, 1, SG, SJV, IPFV, False, ("1sgp?",)), # I fuera wäre fusse
68 : ( PST, 2, SG, SJV, IPFV, False, ("2sgp?",)), # you fueras wärest fusses
69 : ( PST, 3, SG, SJV, IPFV, False, ("3sgp?",)), # (s)he fuera wäre fût
70 : ( PST, 1, PL, SJV, IPFV, False, ("1ppl?",)), # we fuéramos wären fussions
71 : ( PST, 2, PL, SJV, IPFV, False, ("2ppl?",)), # you fuerais wäret fussiez
72 : ( PST, 3, PL, SJV, IPFV, False, ("3ppl?",)), # they fueran wären fussent
}
TENSES_ID = {}
TENSES_ID[INFINITIVE] = 0
for i, (tense, person, number, mood, aspect, negated, aliases) in TENSES.items():
for a in aliases + (i,):
TENSES_ID[i] = \
TENSES_ID[a] = \
TENSES_ID[(tense, person, number, mood, aspect, negated)] = i
if number == SG:
for sg in ("s", "sg", "singular"):
TENSES_ID[(tense, person, sg, mood, aspect, negated)] = i
if number == PL:
for pl in ("p", "pl", "plural"):
TENSES_ID[(tense, person, pl, mood, aspect, negated)] = i
for tag, tense in (
("VB", 0 ), # infinitive
("VBP", 1 ), # present 1 singular
("VBZ", 3 ), # present 3 singular
("VBG", 8 ), # present participle
("VBN", 24), # past participle
("VBD", 25)): # past
TENSES_ID[tag.lower()] = tense
def tense_id(*args, **kwargs):
""" Returns the tense id for a given (tense, person, number, mood, aspect, negated).
Aliases and compound forms (e.g., IMPERFECT) are disambiguated.
"""
# Unpack tense given as a tuple, e.g., tense((PRESENT, 1, SG)):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
if args[0] not in ((PRESENT, PARTICIPLE), (PAST, PARTICIPLE)):
args = args[0]
# No parameters defaults to tense=INFINITIVE, tense=PRESENT otherwise.
if len(args) == 0 and len(kwargs) == 0:
t = INFINITIVE
else:
t = PRESENT
# Set default values.
tense = kwargs.get("tense" , args[0] if len(args) > 0 else t)
person = kwargs.get("person" , args[1] if len(args) > 1 else 3) or None
number = kwargs.get("number" , args[2] if len(args) > 2 else SINGULAR)
mood = kwargs.get("mood" , args[3] if len(args) > 3 else INDICATIVE)
aspect = kwargs.get("aspect" , args[4] if len(args) > 4 else IMPERFECTIVE)
negated = kwargs.get("negated", args[5] if len(args) > 5 else False)
# Disambiguate wrong order of parameters.
if mood in (PERFECTIVE, IMPERFECTIVE):
mood, aspect = INDICATIVE, mood
# Disambiguate INFINITIVE.
# Disambiguate PARTICIPLE, IMPERFECT, PRETERITE.
# These are often considered to be tenses but are in fact tense + aspect.
if tense == INFINITIVE:
person = number = mood = aspect = None; negated=False
if tense in ((PRESENT, PARTICIPLE), PRESENT+PARTICIPLE, PARTICIPLE, GERUND):
tense, aspect = PRESENT, PROGRESSIVE
if tense in ((PAST, PARTICIPLE), PAST+PARTICIPLE):
tense, aspect = PAST, PROGRESSIVE
if tense == IMPERFECT:
tense, aspect = PAST, IMPERFECTIVE
if tense == PRETERITE:
tense, aspect = PAST, PERFECTIVE
if aspect in (CONTINUOUS, PARTICIPLE, GERUND):
aspect = PROGRESSIVE
if aspect == PROGRESSIVE:
person = number = None
# Disambiguate CONDITIONAL.
# In Spanish, the conditional is regarded as an indicative tense.
if tense == CONDITIONAL and mood == INDICATIVE:
tense, mood = PRESENT, CONDITIONAL
# Disambiguate aliases: "pl" =>
# (PRESENT, None, PLURAL, INDICATIVE, IMPERFECTIVE, False).
return TENSES_ID.get(tense.lower(),
TENSES_ID.get((tense, person, number, mood, aspect, negated)))
tense = tense_id
class Verbs(lazydict):
def __init__(self, path="", format=[], default={}, language=None):
""" A dictionary of verb infinitives, each linked to a list of conjugated forms.
Each line in the file at the given path is one verb, with the tenses separated by a comma.
The format defines the order of tenses (see TENSES).
The default dictionary defines default tenses for omitted tenses.
"""
self._path = path
self._language = language
self._format = dict((TENSES_ID[id], i) for i, id in enumerate(format))
self._default = default
self._inverse = {}
def load(self):
# have,,,has,,having,,,,,had,had,haven't,,,hasn't,,,,,,,hadn't,hadn't
id = self._format[TENSES_ID[INFINITIVE]]
for v in _read(self._path):
v = v.split(",")
dict.__setitem__(self, v[id], v)
for x in (x for x in v if x):
self._inverse[x] = v[id]
@property
def path(self):
return self._path
@property
def language(self):
return self._language
@property
def infinitives(self):
""" Yields a dictionary of (infinitive, [inflections])-items.
"""
if dict.__len__(self) == 0:
self.load()
return self
@property
def inflections(self):
""" Yields a dictionary of (inflected, infinitive)-items.
"""
if dict.__len__(self) == 0:
self.load()
return self._inverse
@property
def TENSES(self):
""" Yields a list of tenses for this language, excluding negations.
Each tense is a (tense, person, number, mood, aspect)-tuple.
"""
a = set(TENSES[id] for id in self._format)
a = a.union(set(TENSES[id] for id in self._default.keys()))
a = a.union(set(TENSES[id] for id in self._default.values()))
a = sorted(x[:-2] for x in a if x[-2] is False) # Exclude negation.
return a
def lemma(self, verb, parse=True):
""" Returns the infinitive form of the given verb, or None.
"""
if dict.__len__(self) == 0:
self.load()
if verb.lower() in self._inverse:
return self._inverse[verb.lower()]
if verb in self._inverse:
return self._inverse[verb]
if parse is True: # rule-based
return self.find_lemma(verb)
def lexeme(self, verb, parse=True):
""" Returns a list of all possible inflections of the given verb.
"""
a = []
b = self.lemma(verb, parse=parse)
if b in self:
a = [x for x in self[b] if x != ""]
elif parse is True: # rule-based
a = self.find_lexeme(b)
u = []; [u.append(x) for x in a if x not in u]
return u
def conjugate(self, verb, *args, **kwargs):
""" Inflects the verb and returns the given tense (or None).
For example: be
- Verbs.conjugate("is", INFINITVE) => be
- Verbs.conjugate("be", PRESENT, 1, SINGULAR) => I am
- Verbs.conjugate("be", PRESENT, 1, PLURAL) => we are
- Verbs.conjugate("be", PAST, 3, SINGULAR) => he was
- Verbs.conjugate("be", PAST, aspect=PROGRESSIVE) => been
- Verbs.conjugate("be", PAST, person=1, negated=True) => I wasn't
"""
id = tense_id(*args, **kwargs)
# Get the tense index from the format description (or a default).
i1 = self._format.get(id)
i2 = self._format.get(self._default.get(id))
i3 = self._format.get(self._default.get(self._default.get(id)))
b = self.lemma(verb, parse=kwargs.get("parse", True))
v = []
# Get the verb lexeme and return the requested index.
if b in self:
v = self[b]
for i in (i1, i2, i3):
if i is not None and 0 <= i < len(v) and v[i]:
return v[i]
if kwargs.get("parse", True) is True: # rule-based
v = self.find_lexeme(b)
for i in (i1, i2, i3):
if i is not None and 0 <= i < len(v) and v[i]:
return v[i]
def tenses(self, verb, parse=True):
""" Returns a list of possible tenses for the given inflected verb.
"""
verb = verb.lower()
a = set()
b = self.lemma(verb, parse=parse)
v = []
if b in self:
v = self[b]
elif parse is True: # rule-based
v = self.find_lexeme(b)
# For each tense in the verb lexeme that matches the given tense,
# 1) retrieve the tense tuple,
# 2) retrieve the tense tuples for which that tense is a default.
for i, tense in enumerate(v):
if tense == verb:
for id, index in self._format.items():
if i == index:
a.add(id)
for id1, id2 in self._default.items():
if id2 in a:
a.add(id1)
for id1, id2 in self._default.items():
if id2 in a:
a.add(id1)
a = (TENSES[id][:-2] for id in a)
a = Tenses(sorted(a))
return a
def find_lemma(self, verb):
# Must be overridden in a subclass.
# Must return the infinitive for the given conjugated (unknown) verb.
return verb
def find_lexeme(self, verb):
# Must be overridden in a subclass.
# Must return the list of conjugations for the given (unknown) verb.
return []
class Tenses(list):
def __contains__(self, tense):
# t in tenses(verb) also works when t is an alias (e.g. "1sg").
return list.__contains__(self, TENSES[tense_id(tense)][:-2])
MOOD = "mood" # emoticons, emojis
IRONY = "irony" # sarcasm mark (!)
NOUN, VERB, ADJECTIVE, ADVERB = \
"NN", "VB", "JJ", "RB"
RE_SYNSET = re.compile(r"^[acdnrv][-_][0-9]+$")
def avg(list):
return sum(list) / float(len(list) or 1)
class Score(tuple):
def __new__(self, polarity, subjectivity, assessments=[]):
""" A (polarity, subjectivity)-tuple with an assessments property.
"""
return tuple.__new__(self, [polarity, subjectivity])
def __init__(self, polarity, subjectivity, assessments=[]):
self.assessments = assessments
class Sentiment(lazydict):
def __init__(self, path="", language=None, synset=None, confidence=None, **kwargs):
""" A dictionary of words (adjectives) and polarity scores (positive/negative).
The value for each word is a dictionary of part-of-speech tags.
The value for each word POS-tag is a tuple with values for
polarity (-1.0-1.0), subjectivity (0.0-1.0) and intensity (0.5-2.0).
"""
self._path = path # XML file path.
self._language = None # XML language attribute ("en", "fr", ...)
self._confidence = None # XML confidence attribute threshold (>=).
self._synset = synset # XML synset attribute ("wordnet_id", "cornetto_id", ...)
self._synsets = {} # {"a-01123879": (1.0, 1.0, 1.0)}
self.labeler = {} # {"dammit": "profanity"}
self.tokenizer = kwargs.get("tokenizer", find_tokens)
self.negations = kwargs.get("negations", ("no", "not", "n't", "never"))
self.modifiers = kwargs.get("modifiers", ("RB",))
self.modifier = kwargs.get("modifier" , lambda w: w.endswith("ly"))
@property
def path(self):
return self._path
@property
def language(self):
return self._language
@property
def confidence(self):
return self._confidence
def load(self, path=None):
""" Loads the XML-file (with sentiment annotations) from the given path.
By default, Sentiment.path is lazily loaded.
"""
# <word form="great" wordnet_id="a-01123879" pos="JJ" polarity="1.0" subjectivity="1.0" intensity="1.0" />
# <word form="damnmit" polarity="-0.75" subjectivity="1.0" label="profanity" />
if not path:
path = self._path
if not os.path.exists(path):
return
words, synsets, labels = {}, {}, {}
xml = cElementTree.parse(path)
xml = xml.getroot()
for w in xml.findall("word"):
if self._confidence is None \
or self._confidence <= float(w.attrib.get("confidence", 0.0)):
w, pos, p, s, i, label, synset = (
w.attrib.get("form"),
w.attrib.get("pos"),
w.attrib.get("polarity", 0.0),
w.attrib.get("subjectivity", 0.0),
w.attrib.get("intensity", 1.0),
w.attrib.get("label"),
w.attrib.get(self._synset) # wordnet_id, cornetto_id, ...
)
psi = (float(p), float(s), float(i))
if w:
words.setdefault(w, {}).setdefault(pos, []).append(psi)
if w and label:
labels[w] = label
if synset:
synsets.setdefault(synset, []).append(psi)
self._language = xml.attrib.get("language", self._language)
# Average scores of all word senses per part-of-speech tag.
for w in words:
words[w] = dict((pos, map(avg, zip(*psi))) for pos, psi in words[w].items())
# Average scores of all part-of-speech tags.
for w, pos in words.items():
words[w][None] = map(avg, zip(*pos.values()))
# Average scores of all synonyms per synset.
for id, psi in synsets.items():
synsets[id] = map(avg, zip(*psi))
dict.update(self, words)
dict.update(self.labeler, labels)
dict.update(self._synsets, synsets)
def synset(self, id, pos=ADJECTIVE):
""" Returns a (polarity, subjectivity)-tuple for the given synset id.
For example, the adjective "horrible" has id 193480 in WordNet:
Sentiment.synset(193480, pos="JJ") => (-0.6, 1.0, 1.0).
"""
id = str(id).zfill(8)
if not id.startswith(("n-", "v-", "a-", "r-")):
if pos == NOUN:
id = "n-" + id
if pos == VERB:
id = "v-" + id
if pos == ADJECTIVE:
id = "a-" + id
if pos == ADVERB:
id = "r-" + id
if dict.__len__(self) == 0:
self.load()
return tuple(self._synsets.get(id, (0.0, 0.0))[:2])
def __call__(self, s, negation=True, **kwargs):
""" Returns a (polarity, subjectivity)-tuple for the given sentence,
with polarity between -1.0 and 1.0 and subjectivity between 0.0 and 1.0.
The sentence can be a string, Synset, Text, Sentence, Chunk, Word, Document, Vector.
An optional weight parameter can be given,
as a function that takes a list of words and returns a weight.
"""
def avg(assessments, weighted=lambda w: 1):
s, n = 0, 0
for words, score in assessments:
w = weighted(words)
s += w * score
n += w
return s / float(n or 1)
# A pattern.en.wordnet.Synset.
# Sentiment(synsets("horrible", "JJ")[0]) => (-0.6, 1.0)
if hasattr(s, "gloss"):
a = [(s.synonyms[0],) + self.synset(s.id, pos=s.pos) + (None,)]
# A synset id.
# Sentiment("a-00193480") => horrible => (-0.6, 1.0) (English WordNet)
# Sentiment("c_267") => verschrikkelijk => (-0.9, 1.0) (Dutch Cornetto)
elif isinstance(s, basestring) and RE_SYNSET.match(s):
a = [(s.synonyms[0],) + self.synset(s.id, pos=s.pos) + (None,)]
# A string of words.
# Sentiment("a horrible movie") => (-0.6, 1.0)
elif isinstance(s, basestring):
a = self.assessments(((w.lower(), None) for w in " ".join(self.tokenizer(s)).split()), negation)
# A pattern.en.Text.
elif hasattr(s, "sentences"):
a = self.assessments(((w.lemma or w.string.lower(), w.pos[:2]) for w in chain(*s)), negation)
# A pattern.en.Sentence or pattern.en.Chunk.
elif hasattr(s, "lemmata"):
a = self.assessments(((w.lemma or w.string.lower(), w.pos[:2]) for w in s.words), negation)
# A pattern.en.Word.
elif hasattr(s, "lemma"):
a = self.assessments(((s.lemma or s.string.lower(), s.pos[:2]),), negation)
# A pattern.vector.Document.
# Average score = weighted average using feature weights.
# Bag-of words is unordered: inject None between each two words
# to stop assessments() from scanning for preceding negation & modifiers.
elif hasattr(s, "terms"):
a = self.assessments(chain(*(((w, None), (None, None)) for w in s)), negation)
kwargs.setdefault("weight", lambda w: s.terms[w[0]])
# A dict of (word, weight)-items.
elif isinstance(s, dict):
a = self.assessments(chain(*(((w, None), (None, None)) for w in s)), negation)
kwargs.setdefault("weight", lambda w: s[w[0]])
# A list of words.
elif isinstance(s, list):
a = self.assessments(((w, None) for w in s), negation)
else:
a = []
weight = kwargs.get("weight", lambda w: 1)
return Score(polarity = avg(map(lambda (w, p, s, x): (w, p), a), weight),
subjectivity = avg(map(lambda (w, p, s, x): (w, s), a), weight),
assessments = a)
def assessments(self, words=[], negation=True):
""" Returns a list of (chunk, polarity, subjectivity, label)-tuples for the given list of words:
where chunk is a list of successive words: a known word optionally
preceded by a modifier ("very good") or a negation ("not good").
"""
a = []
m = None # Preceding modifier (i.e., adverb or adjective).
n = None # Preceding negation (e.g., "not beautiful").
for w, pos in words:
# Only assess known words, preferably by part-of-speech tag.
# Including unknown words (polarity 0.0 and subjectivity 0.0) lowers the average.
if w is None:
continue
if w in self and pos in self[w]:
p, s, i = self[w][pos]
# Known word not preceded by a modifier ("good").
if m is None:
a.append(dict(w=[w], p=p, s=s, i=i, n=1, x=self.labeler.get(w)))
# Known word preceded by a modifier ("really good").
if m is not None:
a[-1]["w"].append(w)
a[-1]["p"] = max(-1.0, min(p * a[-1]["i"], +1.0))
a[-1]["s"] = max(-1.0, min(s * a[-1]["i"], +1.0))
a[-1]["i"] = i
a[-1]["x"] = self.labeler.get(w)
# Known word preceded by a negation ("not really good").
if n is not None:
a[-1]["w"].insert(0, n)
a[-1]["i"] = 1.0 / a[-1]["i"]
a[-1]["n"] = -1
# Known word may be a negation.
# Known word may be modifying the next word (i.e., it is a known adverb).
m = None
n = None
if pos and pos in self.modifiers or any(map(self[w].__contains__, self.modifiers)):
m = (w, pos)
if negation and w in self.negations:
n = w
else:
# Unknown word may be a negation ("not good").
if negation and w in self.negations:
n = w
# Unknown word. Retain negation across small words ("not a good").
elif n and len(w.strip("'")) > 1:
n = None
# Unknown word may be a negation preceded by a modifier ("really not good").
if n is not None and m is not None and (pos in self.modifiers or self.modifier(m[0])):
a[-1]["w"].append(n)
a[-1]["n"] = -1
n = None
# Unknown word. Retain modifier across small words ("really is a good").
elif m and len(w) > 2:
m = None
# Exclamation marks boost previous word.
if w == "!" and len(a) > 0:
a[-1]["w"].append("!")
a[-1]["p"] = max(-1.0, min(a[-1]["p"] * 1.25, +1.0))
# Exclamation marks in parentheses indicate sarcasm.
if w == "(!)":
a.append(dict(w=[w], p=0.0, s=1.0, i=1.0, n=1, x=IRONY))
# EMOTICONS: {("grin", +1.0): set((":-D", ":D"))}
if w.isalpha() is False and len(w) <= 5 and w not in PUNCTUATION: # speedup
for (type, p), e in EMOTICONS.items():
if w in imap(lambda e: e.lower(), e):
a.append(dict(w=[w], p=p, s=1.0, i=1.0, n=1, x=MOOD))
break
for i in range(len(a)):
w = a[i]["w"]
p = a[i]["p"]
s = a[i]["s"]
n = a[i]["n"]
x = a[i]["x"]
# "not good" = slightly bad, "not bad" = slightly good.
a[i] = (w, p * -0.5 if n < 0 else p, s, x)
return a
def annotate(self, word, pos=None, polarity=0.0, subjectivity=0.0, intensity=1.0, label=None):
""" Annotates the given word with polarity, subjectivity and intensity scores,
and optionally a semantic label (e.g., MOOD for emoticons, IRONY for "(!)").
"""
w = self.setdefault(word, {})
w[pos] = w[None] = (polarity, subjectivity, intensity)
if label:
self.labeler[word] = label
class Spelling(lazydict):
ALPHA = "abcdefghijklmnopqrstuvwxyz"
def __init__(self, path=""):
self._path = path
def load(self):
for x in _read(self._path):
x = x.split()
dict.__setitem__(self, x[0], int(x[1]))
@property
def path(self):
return self._path
@property
def language(self):
return self._language
@classmethod
def train(self, s, path="spelling.txt"):
""" Counts the words in the given string and saves the probabilities at the given path.
This can be used to generate a new model for the Spelling() constructor.
"""
model = {}
for w in re.findall("[a-z]+", s.lower()):
model[w] = w in model and model[w] + 1 or 1
model = ("%s %s" % (k, v) for k, v in sorted(model.items()))
model = "\n".join(model)
f = open(path, "w")
f.write(model)
f.close()
def _edit1(self, w):
""" Returns a set of words with edit distance 1 from the given word.
"""
# Of all spelling errors, 80% is covered by edit distance 1.
# Edit distance 1 = one character deleted, swapped, replaced or inserted.
split = [(w[:i], w[i:]) for i in range(len(w) + 1)]
delete, transpose, replace, insert = (
[a + b[1:] for a, b in split if b],
[a + b[1] + b[0] + b[2:] for a, b in split if len(b) > 1],
[a + c + b[1:] for a, b in split for c in Spelling.ALPHA if b],
[a + c + b[0:] for a, b in split for c in Spelling.ALPHA]
)
return set(delete + transpose + replace + insert)
def _edit2(self, w):
""" Returns a set of words with edit distance 2 from the given word
"""
# Of all spelling errors, 99% is covered by edit distance 2.
# Only keep candidates that are actually known words (20% speedup).
return set(e2 for e1 in self._edit1(w) for e2 in self._edit1(e1) if e2 in self)
def _known(self, words=[]):
""" Returns the given list of words filtered by known words.
"""
return set(w for w in words if w in self)
def suggest(self, w):
""" Return a list of (word, confidence) spelling corrections for the given word,
based on the probability of known words with edit distance 1-2 from the given word.
"""
if len(self) == 0:
self.load()
candidates = self._known([w]) \
or self._known(self._edit1(w)) \
or self._known(self._edit2(w)) \
or [w]
candidates = [(self.get(w, 0.0), w) for w in candidates]
s = float(sum(p for p, w in candidates) or 1)
candidates = sorted(((p / s, w) for p, w in candidates), reverse=True)
candidates = [(w, p) for p, w in candidates]
return candidates
LANGUAGES = ["en", "es", "de", "fr", "it", "nl"]
_modules = {}
def _multilingual(function, *args, **kwargs):
""" Returns the value from the function with the given name in the given language module.
By default, language="en".
"""
language = kwargs.pop("language", "en")
module = _modules.setdefault(language, __import__(language, globals(), {}, [], -1))
return getattr(module, function)(*args, **kwargs)
def tokenize(*args, **kwargs):
return _multilingual("tokenize", *args, **kwargs)
def parse(*args, **kwargs):
return _multilingual("parse", *args, **kwargs)
def parsetree(*args, **kwargs):
return _multilingual("parsetree", *args, **kwargs)
def split(*args, **kwargs):
return _multilingual("split", *args, **kwargs)
def tag(*args, **kwargs):
return _multilingual("tag", *args, **kwargs)
def sentiment(*args, **kwargs):
return _multilingual("sentiment", *args, **kwargs)
def singularize(*args, **kwargs):
return _multilingual("singularize", *args, **kwargs)
def pluralize(*args, **kwargs):
return _multilingual("pluralize", *args, **kwargs)
def conjugate(*args, **kwargs):
return _multilingual("conjugate", *args, **kwargs)
def predicative(*args, **kwargs):
return _multilingual("predicative", *args, **kwargs)
def suggest(*args, **kwargs):
return _multilingual("suggest", *args, **kwargs)
|
import unittest
from autoprotocol.protocol import Protocol, Ref
from autoprotocol.instruction import Instruction, Thermocycle, Incubate, Pipette, Spin
from autoprotocol.container_type import ContainerType
from autoprotocol.container import Container, WellGroup, Well
from autoprotocol.unit import Unit
from autoprotocol.pipette_tools import *
import json
class ProtocolMultipleExistTestCase(unittest.TestCase):
def runTest(self):
p1 = Protocol()
p2 = Protocol()
p1.spin("dummy_ref", "2000:rpm", "560:second")
self.assertEqual(len(p2.instructions), 0,
"incorrect number of instructions in empty protocol")
class ProtocolBasicTestCase(unittest.TestCase):
def runTest(self):
protocol = Protocol()
resource = protocol.ref("resource", None, "96-flat", discard=True)
pcr = protocol.ref("pcr", None, "96-flat", discard=True)
bacteria = protocol.ref("bacteria", None, "96-flat", discard=True)
self.assertEqual(len(protocol.as_dict()['refs']), 3, 'incorrect number of refs')
self.assertEqual(protocol.as_dict()['refs']['resource'], {"new": "96-flat",
"discard": True})
bacteria_wells = WellGroup([bacteria.well("B1"), bacteria.well("C5"),
bacteria.well("A5"), bacteria.well("A1")])
protocol.distribute(resource.well("A1").set_volume("40:microliter"),
pcr.wells_from('A1',5), "5:microliter")
protocol.distribute(resource.well("A1").set_volume("40:microliter"),
bacteria_wells, "5:microliter")
self.assertEqual(len(protocol.instructions), 1)
self.assertEqual(protocol.instructions[0].op, "pipette")
self.assertEqual(len(protocol.instructions[0].groups), 2)
protocol.incubate(bacteria, "warm_37", "30:minute")
self.assertEqual(len(protocol.instructions), 2)
self.assertEqual(protocol.instructions[1].op, "incubate")
self.assertEqual(protocol.instructions[1].duration, "30:minute")
class ProtocolAppendTestCase(unittest.TestCase):
def runTest(self):
p = Protocol()
self.assertEqual(len(p.instructions), 0,
"should not be any instructions before appending to empty protocol")
p.append(Spin("dummy_ref", "100:meter/second^2", "60:second"))
self.assertEqual(len(p.instructions), 1,
"incorrect number of instructions after single instruction append")
self.assertEqual(p.instructions[0].op, "spin",
"incorrect instruction appended")
p.append([
Incubate("dummy_ref", "ambient", "30:second"),
Spin("dummy_ref", "2000:rpm", "120:second")
])
self.assertEqual(len(p.instructions), 3,
"incorrect number of instructions after appending instruction list")
self.assertEqual(p.instructions[1].op, "incubate",
"incorrect instruction order after list append")
self.assertEqual(p.instructions[2].op, "spin",
"incorrect instruction at end after list append.")
class RefTestCase(unittest.TestCase):
def test_duplicates_not_allowed(self):
p = Protocol()
p.ref("test", None, "96-flat", discard=True)
with self.assertRaises(RuntimeError):
p.ref("test", None, "96-flat", storage="cold_20")
self.assertTrue(p.refs["test"].opts["discard"])
self.assertFalse("where" in p.refs["test"].opts)
class ThermocycleTestCase(unittest.TestCase):
def test_thermocycle_append(self):
t = Thermocycle("plate", [
{ "cycles": 1, "steps": [
{ "temperature": "95:celsius", "duration": "60:second" },
] },
{ "cycles": 30, "steps": [
{ "temperature": "95:celsius", "duration": "15:second" },
{ "temperature": "55:celsius", "duration": "15:second" },
{ "temperature": "72:celsius", "duration": "10:second" },
] },
{ "cycles": 1, "steps": [
{ "temperature": "72:celsius", "duration": "600:second" },
{ "temperature": "12:celsius", "duration": "120:second" },
] },
], "20:microliter")
self.assertEqual(len(t.groups), 3, 'incorrect number of groups')
self.assertEqual(t.volume, "20:microliter")
def test_thermocycle_dyes_and_datarefs(self):
self.assertRaises(ValueError,
Thermocycle,
"plate",
[{"cycles": 1,
"steps": [{
"temperature": "50: celsius",
"duration": "20:minute"
}]
}],
dyes={"FAM": ["A1"]})
self.assertRaises(ValueError,
Thermocycle,
"plate",
[{"cycles": 1,
"steps": [{
"temperature": "50: celsius",
"duration": "20:minute"
}]
}],
dataref="test_dataref")
self.assertRaises(ValueError,
Thermocycle,
"plate",
[{"cycles": 1,
"steps": [{
"temperature": "50: celsius",
"duration": "20:minute"
}]
}],
dyes={"ThisDyeIsInvalid": ["A1"]})
def test_thermocycle_melting(self):
self.assertRaises(ValueError,
Thermocycle,
"plate",
[{"cycles": 1,
"steps": [{
"temperature": "50: celsius",
"duration": "20:minute"
}]
}],
melting_start = "50:celsius")
self.assertRaises(ValueError,
Thermocycle,
"plate",
[{"cycles": 1,
"steps": [{
"temperature": "50: celsius",
"duration": "20:minute"
}]
}],
melting_start = "50:celsius",
melting_end = "60:celsius",
melting_increment = "1:celsius",
melting_rate = "2:minute")
class DistributeTestCase(unittest.TestCase):
def test_distribute_one_well(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
p.distribute(c.well(0).set_volume("20:microliter"),
c.well(1),
"5:microliter")
self.assertEqual(1, len(p.instructions))
self.assertEqual("distribute",
list(p.as_dict()["instructions"][0]["groups"][0].keys())[0])
self.assertTrue(5, c.well(1).volume.value)
self.assertTrue(15, c.well(0).volume.value)
def test_distribute_multiple_wells(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
p.distribute(c.well(0).set_volume("20:microliter"),
c.wells_from(1, 3),
"5:microliter")
self.assertEqual(1, len(p.instructions))
self.assertEqual("distribute",
list(p.as_dict()["instructions"][0]["groups"][0].keys())[0])
for w in c.wells_from(1, 3):
self.assertTrue(5, w.volume.value)
self.assertTrue(5, c.well(0).volume.value)
def test_fill_wells(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
srcs = c.wells_from(1, 2).set_volume("100:microliter")
dests = c.wells_from(7, 4)
p.distribute(srcs, dests, "30:microliter", allow_carryover=True)
self.assertEqual(2, len(p.instructions[0].groups))
# track source vols
self.assertEqual(10, c.well(1).volume.value)
self.assertEqual(70, c.well(2).volume.value)
# track dest vols
self.assertEqual(30, c.well(7).volume.value)
self.assertIs(None, c.well(6).volume)
# test distribute from Well to Well
p.distribute(c.well("A1").set_volume("20:microliter"), c.well("A2"), "5:microliter")
self.assertTrue("distribute" in p.instructions[-1].groups[-1])
def test_unit_conversion(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
with self.assertRaises(RuntimeError):
with self.assertRaises(ValueError):
p.distribute(c.well(0).set_volume("100:microliter"), c.well(1), ".0001:liter")
p.distribute(c.well(0).set_volume("100:microliter"), c.well(1), "200:nanoliter")
self.assertTrue(str(p.instructions[0].groups[0]["distribute"]["to"][0]["volume"]) == "0.2:microliter")
p.distribute(c.well(2).set_volume("100:microliter"), c.well(3), ".1:milliliter", new_group=True)
self.assertTrue(str(p.instructions[-1].groups[0]["distribute"]["to"][0]["volume"]) == "100.0:microliter")
class TransferTestCase(unittest.TestCase):
def test_single_transfer(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
p.transfer(c.well(0), c.well(1), "20:microliter")
self.assertEqual(Unit(20, "microliter"), c.well(1).volume)
self.assertEqual(None, c.well(0).volume)
self.assertTrue("transfer" in p.instructions[-1].groups[-1])
def test_gt_750uL_transfer(self):
p = Protocol()
c = p.ref("test", None, "96-deep", discard=True)
p.transfer(
c.well(0),
c.well(1),
"1800:microliter"
)
self.assertEqual(3, len(p.instructions[0].groups))
self.assertEqual(
Unit(750, 'microliter'),
p.instructions[0].groups[0]['transfer'][0]['volume']
)
self.assertEqual(
Unit(750, 'microliter'),
p.instructions[0].groups[1]['transfer'][0]['volume']
)
self.assertEqual(
Unit(300, 'microliter'),
p.instructions[0].groups[2]['transfer'][0]['volume']
)
def test_gt_750uL_wellgroup_transfer(self):
p = Protocol()
c = p.ref("test", None, "96-deep", discard=True)
p.transfer(
c.wells_from(0, 8, columnwise=True),
c.wells_from(1, 8, columnwise=True),
'1800:microliter'
)
self.assertEqual(
24,
len(p.instructions[0].groups)
)
def test_transfer_option_propagation(self):
p = Protocol()
c = p.ref("test", None, "96-deep", discard=True)
p.transfer(
c.well(0),
c.well(1),
"1800:microliter",
aspirate_source=aspirate_source(
depth("ll_bottom", distance=".004:meter")
)
)
self.assertEqual(
len(p.instructions[0].groups[0]['transfer'][0]),
len(p.instructions[0].groups[1]['transfer'][0])
)
self.assertEqual(
len(p.instructions[0].groups[0]['transfer'][0]),
len(p.instructions[0].groups[2]['transfer'][0])
)
def test_max_transfer(self):
p = Protocol()
c = p.ref("test", None, "micro-2.0", storage="cold_4")
p.transfer(c.well(0), c.well(0), "3050:microliter")
def test_multiple_transfers(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
p.transfer(c.wells_from(0, 2), c.wells_from(2, 2), "20:microliter")
self.assertEqual(c.well(2).volume, c.well(3).volume)
self.assertEqual(2, len(p.instructions[0].groups))
def test_one_tip(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
p.transfer(c.wells_from(0, 2), c.wells_from(2, 2), "20:microliter",
one_tip=True)
self.assertEqual(c.well(2).volume, c.well(3).volume)
self.assertEqual(1, len(p.instructions[0].groups))
def test_one_source(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
with self.assertRaises(RuntimeError):
p.transfer(c.wells_from(0, 2),
c.wells_from(2, 2), "40:microliter", one_source=True)
with self.assertRaises(RuntimeError):
p.transfer(c.wells_from(0, 2).set_volume("1:microliter"),
c.wells_from(1, 5), "10:microliter", one_source=True)
p.transfer(c.wells_from(0, 2).set_volume("50:microliter"),
c.wells_from(2, 2), "40:microliter", one_source=True)
self.assertEqual(2, len(p.instructions[0].groups))
self.assertFalse(p.instructions[0].groups[0]["transfer"][0]["from"] == p.instructions[0].groups[1]["transfer"][0]["from"])
p.transfer(c.wells_from(0, 2).set_volume("100:microliter"),
c.wells_from(2, 4), "40:microliter", one_source=True)
self.assertEqual(7, len(p.instructions[0].groups))
self.assertTrue(p.instructions[0].groups[2]["transfer"][0]["from"] == p.instructions[0].groups[4]["transfer"][0]["from"])
self.assertTrue(p.instructions[0].groups[4]["transfer"][0]["volume"] == Unit.fromstring("20:microliter"))
p.transfer(c.wells_from(0, 2).set_volume("100:microliter"),
c.wells_from(2, 4), ["20:microliter", "40:microliter", "60:microliter", "80:microliter"], one_source=True)
self.assertEqual(12, len(p.instructions[0].groups))
self.assertTrue(p.instructions[0].groups[7]["transfer"][0]["from"] == p.instructions[0].groups[9]["transfer"][0]["from"])
self.assertFalse(p.instructions[0].groups[9]["transfer"][0]["from"] == p.instructions[0].groups[10]["transfer"][0]["from"])
self.assertEqual(Unit.fromstring("20:microliter"), p.instructions[0].groups[10]["transfer"][0]["volume"])
p.transfer(c.wells_from(0, 2).set_volume("50:microliter"), c.wells(2), "100:microliter", one_source=True)
c.well(0).set_volume("50:microliter")
c.well(1).set_volume("200:microliter")
p.transfer(c.wells_from(0, 2), c.well(1), "100:microliter", one_source=True)
self.assertFalse(p.instructions[0].groups[14]["transfer"][0]["from"] == p.instructions[0].groups[15]["transfer"][0]["from"])
c.well(0).set_volume("100:microliter")
c.well(1).set_volume("0:microliter")
c.well(2).set_volume("100:microliter")
p.transfer(c.wells_from(0, 3), c.wells_from(3, 2), "100:microliter", one_source=True)
def test_unit_conversion(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
with self.assertRaises(ValueError):
p.transfer(c.well(0), c.well(1), "1:liter")
p.transfer(c.well(0), c.well(1), "200:nanoliter")
self.assertTrue(str(p.instructions[0].groups[0]['transfer'][0]['volume']) == "0.2:microliter")
p.transfer(c.well(1), c.well(2), ".5:milliliter", new_group=True)
self.assertTrue(str(p.instructions[-1].groups[0]['transfer'][0]['volume']) == "500.0:microliter")
def test_mix_before_and_after(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
with self.assertRaises(RuntimeError):
p.transfer(c.well(0), c.well(1), "10:microliter", mix_vol= "15:microliter")
p.transfer(c.well(0), c.well(1), "10:microliter", repetitions_a=21)
p.transfer(c.well(0), c.well(1), "10:microliter", repetitions=21)
p.transfer(c.well(0), c.well(1), "10:microliter", repetitions_b=21)
p.transfer(c.well(0), c.well(1), "10:microliter", flowrate_a="200:microliter/second")
p.transfer(c.well(0), c.well(1), "12:microliter", mix_after=True,
mix_vol="10:microliter", repetitions_a=20)
self.assertTrue(int(p.instructions[-1].groups[0]['transfer'][0]['mix_after']['repetitions']) == 20)
p.transfer(c.well(0), c.well(1), "12:microliter", mix_after=True,
mix_vol="10:microliter", repetitions_b=20)
self.assertTrue(int(p.instructions[-1].groups[-1]['transfer'][0]['mix_after']['repetitions']) == 10)
p.transfer(c.well(0), c.well(1), "12:microliter", mix_after=True)
self.assertTrue(int(p.instructions[-1].groups[-1]['transfer'][0]['mix_after']['repetitions']) == 10)
self.assertTrue(str(p.instructions[-1].groups[-1]['transfer'][0]['mix_after']['speed']) == "100:microliter/second")
self.assertTrue(str(p.instructions[-1].groups[-1]['transfer'][0]['mix_after']['volume']) == "6.0:microliter")
p.transfer(c.well(0), c.well(1), "12:microliter", mix_before=True,
mix_vol="10:microliter", repetitions_b=20)
self.assertTrue(int(p.instructions[-1].groups[-1]['transfer'][-1]['mix_before']['repetitions']) == 20)
p.transfer(c.well(0), c.well(1), "12:microliter", mix_before=True,
mix_vol="10:microliter", repetitions_a=20)
self.assertTrue(int(p.instructions[-1].groups[-1]['transfer'][-1]['mix_before']['repetitions']) == 10)
p.transfer(c.well(0), c.well(1), "12:microliter", mix_before=True)
self.assertTrue(int(p.instructions[-1].groups[-1]['transfer'][-1]['mix_before']['repetitions']) == 10)
self.assertTrue(str(p.instructions[-1].groups[-1]['transfer'][-1]['mix_before']['speed']) == "100:microliter/second")
self.assertTrue(str(p.instructions[-1].groups[-1]['transfer'][-1]['mix_before']['volume']) == "6.0:microliter")
class ConsolidateTestCase(unittest.TestCase):
def test_multiple_sources(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
with self.assertRaises(TypeError):
p.consolidate(c.wells_from(0, 3), c.wells_from(2, 3), "10:microliter")
with self.assertRaises(ValueError):
p.consolidate(c.wells_from(0, 3), c.well(4), ["10:microliter"])
p.consolidate(c.wells_from(0, 3), c.well(4), "10:microliter")
self.assertEqual(Unit(30, "microliter"), c.well(4).volume)
self.assertEqual(3, len(p.instructions[0].groups[0]["consolidate"]["from"]))
def test_one_source(self):
p = Protocol()
c = p.ref("test", None, "96-flat", discard=True)
p.consolidate(c.well(0), c.well(4), "30:microliter")
self.assertEqual(Unit(30, "microliter"), c.well(4).volume)
class StampTestCase(unittest.TestCase):
#TODO: Implement check for volume accounting
def test_single_transfers(self):
p = Protocol()
plate_1_6 = p.ref("plate_1_6", None, "6-flat", discard=True)
plate_1_96 = p.ref("plate_1_96", None, "96-flat", discard=True)
plate_2_96 = p.ref("plate_2_96", None, "96-flat", discard=True)
plate_1_384 = p.ref("plate_1_384", None, "384-flat", discard=True)
plate_2_384 = p.ref("plate_2_384", None, "384-flat", discard=True)
p.stamp(plate_1_96.well("G1"), plate_2_96.well("H1"),
"10:microliter", dict(rows=1, columns=12))
p.stamp(plate_1_96.well("A1"), plate_1_384.well("A2"),
"10:microliter", dict(rows=8, columns=2))
# Verify full plate to full plate transfer works for 96, 384 container input
p.stamp(plate_1_96, plate_2_96, "10:microliter")
p.stamp(plate_1_384, plate_2_384, "10:microliter")
with self.assertRaises(TypeError):
p.stamp(plate_1_96, plate_1_384, "10:microliter")
p.stamp(plate_1_384, plate_1_96, "10:microliter")
with self.assertRaises(ValueError):
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A2"),
"10:microliter", dict(rows=9, columns=1))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("B1"),
"10:microliter", dict(rows=1, columns=13))
p.stamp(plate_1_384.well("A1"), plate_2_384.well("A2"),
"10:microliter", dict(rows=9, columns=1))
p.stamp(plate_1_384.well("A1"), plate_2_384.well("B1"),
"10:microliter", dict(rows=1, columns=13))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("B1"),
"10:microliter", dict(rows=1, columns=12))
def test_multiple_transfers(self):
p = Protocol()
# Verify instruction list length
plate_1_96 = p.ref("plate_1_96", None, "96-flat", discard=True)
plate_2_96 = p.ref("plate_2_96", None, "96-flat", discard=True)
plate_3_96 = p.ref("plate_3_96", None, "96-flat", discard=True)
plate_4_96 = p.ref("plate_4_96", None, "96-flat", discard=True)
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter")
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter")
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter")
self.assertEqual(3, len(p.instructions[0].transfers))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter")
self.assertEqual(len(p.instructions), 2)
self.assertEqual(1, len(p.instructions[1].transfers))
# Test multiple plates
p.stamp(plate_1_96.well("A1"), plate_3_96.well("A1"),
"10:microliter")
p.stamp(plate_4_96.well("A1"), plate_3_96.well("A1"),
"10:microliter")
self.assertEqual(2, len(p.instructions[1].transfers))
self.assertEqual(1, len(p.instructions[2].transfers))
self.assertEqual(len(p.instructions), 3)
# Ensure full plates are chunked correctly
p.stamp(plate_1_96.well("G1"), plate_2_96.well("H1"),
"10:microliter", dict(rows=1, columns=12))
self.assertEqual(len(p.instructions), 4)
p.stamp(plate_1_96.well("G1"), plate_2_96.well("H1"),
"10:microliter", dict(rows=2, columns=12))
self.assertEqual(len(p.instructions), 4)
self.assertEqual(len(p.instructions[3].transfers), 2)
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=2))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A12"),
"10:microliter", dict(rows=8, columns=1))
self.assertEqual(len(p.instructions), 5)
self.assertEqual(len(p.instructions[3].transfers), 2)
# Check on max transfer limit - Full plate
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=12))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=12))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=12))
self.assertEqual(len(p.instructions), 6)
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=12))
self.assertEqual(len(p.instructions), 7)
self.assertEqual(len(p.instructions[5].transfers), 3)
self.assertEqual(len(p.instructions[6].transfers), 1)
# Check on max transfer limit - Row-wise
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=3, columns=12))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=1, columns=12))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=2, columns=12))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=2, columns=12))
self.assertEqual(len(p.instructions), 8)
for i in range(8):
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=1, columns=12))
self.assertEqual(len(p.instructions), 9)
self.assertEqual(len(p.instructions[7].transfers), 4)
self.assertEqual(len(p.instructions[8].transfers), 8)
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=1, columns=12))
self.assertEqual(len(p.instructions), 10)
# Check on max transfer limit - Col-wise
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=4))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=6))
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=2))
self.assertEqual(len(p.instructions), 11)
for i in range(12):
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=1))
self.assertEqual(len(p.instructions), 12)
self.assertEqual(len(p.instructions[10].transfers), 3)
self.assertEqual(len(p.instructions[11].transfers), 12)
p.stamp(plate_1_96.well("A1"), plate_2_96.well("A1"),
"10:microliter", dict(rows=8, columns=1))
self.assertEqual(len(p.instructions), 13)
class RefifyTestCase(unittest.TestCase):
def test_refifying_various(self):
p = Protocol()
# refify container
refs = {"plate": p.ref("test", None, "96-flat", "cold_20")}
self.assertEqual(p._refify(refs["plate"]), "test")
# refify dict
self.assertEqual(p._refify(refs), {"plate": "test"})
# refify Well
well = refs["plate"].well("A1")
self.assertEqual(p._refify(well), "test/0")
# refify WellGroup
wellgroup = refs["plate"].wells_from("A2", 3)
self.assertEqual(p._refify(wellgroup), ["test/1", "test/2", "test/3"])
# refify other
s = "randomstring"
i = 24
self.assertEqual("randomstring", p._refify(s))
self.assertEqual(24, p._refify(i))
class OutsTestCase(unittest.TestCase):
def test_outs(self):
p = Protocol()
self.assertFalse('outs' in p.as_dict())
plate = p.ref("plate", None, "96-pcr", discard=True)
plate.well(0).set_name("test_well")
self.assertTrue(plate.well(0).name == "test_well")
self.assertTrue(list(p.as_dict()['outs'].keys()) == ['plate'])
self.assertTrue(list(list(p.as_dict()['outs'].values())[0].keys()) == ['0'])
self.assertTrue(list(p.as_dict()['outs'].values())[0]['0'] == {'name': 'test_well'})
|
"""
Calculate BOLD confounds
^^^^^^^^^^^^^^^^^^^^^^^^
.. autofunction:: init_bold_confs_wf
.. autofunction:: init_ica_aroma_wf
"""
from os import getenv
from nipype.algorithms import confounds as nac
from nipype.interfaces import utility as niu, fsl
from nipype.pipeline import engine as pe
from templateflow.api import get as get_template
from ...config import DEFAULT_MEMORY_MIN_GB
from ...interfaces import (
GatherConfounds, ICAConfounds, FMRISummary, DerivativesDataSink
)
def init_bold_confs_wf(
mem_gb,
metadata,
regressors_all_comps,
regressors_dvars_th,
regressors_fd_th,
freesurfer=False,
name="bold_confs_wf",
):
"""
Build a workflow to generate and write out confounding signals.
This workflow calculates confounds for a BOLD series, and aggregates them
into a :abbr:`TSV (tab-separated value)` file, for use as nuisance
regressors in a :abbr:`GLM (general linear model)`.
The following confounds are calculated, with column headings in parentheses:
#. Region-wise average signal (``csf``, ``white_matter``, ``global_signal``)
#. DVARS - original and standardized variants (``dvars``, ``std_dvars``)
#. Framewise displacement, based on head-motion parameters
(``framewise_displacement``)
#. Temporal CompCor (``t_comp_cor_XX``)
#. Anatomical CompCor (``a_comp_cor_XX``)
#. Cosine basis set for high-pass filtering w/ 0.008 Hz cut-off
(``cosine_XX``)
#. Non-steady-state volumes (``non_steady_state_XX``)
#. Estimated head-motion parameters, in mm and rad
(``trans_x``, ``trans_y``, ``trans_z``, ``rot_x``, ``rot_y``, ``rot_z``)
Prior to estimating aCompCor and tCompCor, non-steady-state volumes are
censored and high-pass filtered using a :abbr:`DCT (discrete cosine
transform)` basis.
The cosine basis, as well as one regressor per censored volume, are included
for convenience.
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.confounds import init_bold_confs_wf
wf = init_bold_confs_wf(
mem_gb=1,
metadata={},
regressors_all_comps=False,
regressors_dvars_th=1.5,
regressors_fd_th=0.5,
)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB - please note that this size
should be calculated after resamplings that may extend
the FoV
metadata : :obj:`dict`
BIDS metadata for BOLD file
name : :obj:`str`
Name of workflow (default: ``bold_confs_wf``)
regressors_all_comps : :obj:`bool`
Indicates whether CompCor decompositions should return all
components instead of the minimal number of components necessary
to explain 50 percent of the variance in the decomposition mask.
regressors_dvars_th : :obj:`float`
Criterion for flagging DVARS outliers
regressors_fd_th : :obj:`float`
Criterion for flagging framewise displacement outliers
Inputs
------
bold
BOLD image, after the prescribed corrections (STC, HMC and SDC)
when available.
bold_mask
BOLD series mask
movpar_file
SPM-formatted motion parameters file
rmsd_file
Framewise displacement as measured by ``fsl_motion_outliers``.
skip_vols
number of non steady state volumes
t1w_mask
Mask of the skull-stripped template image
t1w_tpms
List of tissue probability maps in T1w space
t1_bold_xform
Affine matrix that maps the T1w space into alignment with
the native BOLD space
Outputs
-------
confounds_file
TSV of all aggregated confounds
rois_report
Reportlet visualizing white-matter/CSF mask used for aCompCor,
the ROI for tCompCor and the BOLD brain mask.
confounds_metadata
Confounds metadata dictionary.
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.confounds import ExpandModel, SpikeRegressors
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.images import SignalExtraction
from niworkflows.interfaces.masks import ROIsPlot
from niworkflows.interfaces.nibabel import ApplyMask, Binarize
from niworkflows.interfaces.patches import (
RobustACompCor as ACompCor,
RobustTCompCor as TCompCor,
)
from niworkflows.interfaces.plotting import (
CompCorVariancePlot, ConfoundsCorrelationPlot
)
from niworkflows.interfaces.utils import (
AddTSVHeader, TSV2JSON, DictMerge
)
from ...interfaces.confounds import aCompCorMasks
gm_desc = (
"dilating a GM mask extracted from the FreeSurfer's *aseg* segmentation" if freesurfer
else "thresholding the corresponding partial volume map at 0.05"
)
workflow = Workflow(name=name)
workflow.__desc__ = f"""\
Several confounding time-series were calculated based on the
*preprocessed BOLD*: framewise displacement (FD), DVARS and
three region-wise global signals.
FD was computed using two formulations following Power (absolute sum of
relative motions, @power_fd_dvars) and Jenkinson (relative root mean square
displacement between affines, @mcflirt).
FD and DVARS are calculated for each functional run, both using their
implementations in *Nipype* [following the definitions by @power_fd_dvars].
The three global signals are extracted within the CSF, the WM, and
the whole-brain masks.
Additionally, a set of physiological regressors were extracted to
allow for component-based noise correction [*CompCor*, @compcor].
Principal components are estimated after high-pass filtering the
*preprocessed BOLD* time-series (using a discrete cosine filter with
128s cut-off) for the two *CompCor* variants: temporal (tCompCor)
and anatomical (aCompCor).
tCompCor components are then calculated from the top 2% variable
voxels within the brain mask.
For aCompCor, three probabilistic masks (CSF, WM and combined CSF+WM)
are generated in anatomical space.
The implementation differs from that of Behzadi et al. in that instead
of eroding the masks by 2 pixels on BOLD space, the aCompCor masks are
subtracted a mask of pixels that likely contain a volume fraction of GM.
This mask is obtained by {gm_desc}, and it ensures components are not extracted
from voxels containing a minimal fraction of GM.
Finally, these masks are resampled into BOLD space and binarized by
thresholding at 0.99 (as in the original implementation).
Components are also calculated separately within the WM and CSF masks.
For each CompCor decomposition, the *k* components with the largest singular
values are retained, such that the retained components' time series are
sufficient to explain 50 percent of variance across the nuisance mask (CSF,
WM, combined, or temporal). The remaining components are dropped from
consideration.
The head-motion estimates calculated in the correction step were also
placed within the corresponding confounds file.
The confound time series derived from head motion estimates and global
signals were expanded with the inclusion of temporal derivatives and
quadratic terms for each [@confounds_satterthwaite_2013].
Frames that exceeded a threshold of {regressors_fd_th} mm FD or
{regressors_dvars_th} standardised DVARS were annotated as motion outliers.
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=['bold', 'bold_mask', 'movpar_file', 'rmsd_file',
'skip_vols', 't1w_mask', 't1w_tpms', 't1_bold_xform']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['confounds_file', 'confounds_metadata', 'acompcor_masks', 'tcompcor_mask']),
name='outputnode')
# DVARS
dvars = pe.Node(nac.ComputeDVARS(save_nstd=True, save_std=True, remove_zerovariance=True),
name="dvars", mem_gb=mem_gb)
# Frame displacement
fdisp = pe.Node(nac.FramewiseDisplacement(parameter_source="SPM"),
name="fdisp", mem_gb=mem_gb)
# Generate aCompCor probseg maps
acc_masks = pe.Node(aCompCorMasks(is_aseg=freesurfer), name="acc_masks")
# Resample probseg maps in BOLD space via T1w-to-BOLD transform
acc_msk_tfm = pe.MapNode(ApplyTransforms(
interpolation='Gaussian', float=False), iterfield=["input_image"],
name='acc_msk_tfm', mem_gb=0.1)
acc_msk_brain = pe.MapNode(ApplyMask(), name="acc_msk_brain",
iterfield=["in_file"])
acc_msk_bin = pe.MapNode(Binarize(thresh_low=0.99), name='acc_msk_bin',
iterfield=["in_file"])
acompcor = pe.Node(
ACompCor(components_file='acompcor.tsv', header_prefix='a_comp_cor_', pre_filter='cosine',
save_pre_filter=True, save_metadata=True, mask_names=['CSF', 'WM', 'combined'],
merge_method='none', failure_mode='NaN'),
name="acompcor", mem_gb=mem_gb)
tcompcor = pe.Node(
TCompCor(components_file='tcompcor.tsv', header_prefix='t_comp_cor_', pre_filter='cosine',
save_pre_filter=True, save_metadata=True, percentile_threshold=.02,
failure_mode='NaN'),
name="tcompcor", mem_gb=mem_gb)
# Set number of components
if regressors_all_comps:
acompcor.inputs.num_components = 'all'
tcompcor.inputs.num_components = 'all'
else:
acompcor.inputs.variance_threshold = 0.5
tcompcor.inputs.variance_threshold = 0.5
# Set TR if present
if 'RepetitionTime' in metadata:
tcompcor.inputs.repetition_time = metadata['RepetitionTime']
acompcor.inputs.repetition_time = metadata['RepetitionTime']
# Global and segment regressors
signals_class_labels = [
"global_signal", "csf", "white_matter", "csf_wm", "tcompcor",
]
merge_rois = pe.Node(niu.Merge(3, ravel_inputs=True), name='merge_rois',
run_without_submitting=True)
signals = pe.Node(SignalExtraction(class_labels=signals_class_labels),
name="signals", mem_gb=mem_gb)
# Arrange confounds
add_dvars_header = pe.Node(
AddTSVHeader(columns=["dvars"]),
name="add_dvars_header", mem_gb=0.01, run_without_submitting=True)
add_std_dvars_header = pe.Node(
AddTSVHeader(columns=["std_dvars"]),
name="add_std_dvars_header", mem_gb=0.01, run_without_submitting=True)
add_motion_headers = pe.Node(
AddTSVHeader(columns=["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]),
name="add_motion_headers", mem_gb=0.01, run_without_submitting=True)
add_rmsd_header = pe.Node(
AddTSVHeader(columns=["rmsd"]),
name="add_rmsd_header", mem_gb=0.01, run_without_submitting=True)
concat = pe.Node(GatherConfounds(), name="concat", mem_gb=0.01, run_without_submitting=True)
# CompCor metadata
tcc_metadata_fmt = pe.Node(
TSV2JSON(index_column='component', drop_columns=['mask'], output=None,
additional_metadata={'Method': 'tCompCor'}, enforce_case=True),
name='tcc_metadata_fmt')
acc_metadata_fmt = pe.Node(
TSV2JSON(index_column='component', output=None,
additional_metadata={'Method': 'aCompCor'}, enforce_case=True),
name='acc_metadata_fmt')
mrg_conf_metadata = pe.Node(niu.Merge(3), name='merge_confound_metadata',
run_without_submitting=True)
mrg_conf_metadata.inputs.in3 = {label: {'Method': 'Mean'}
for label in signals_class_labels}
mrg_conf_metadata2 = pe.Node(DictMerge(), name='merge_confound_metadata2',
run_without_submitting=True)
# Expand model to include derivatives and quadratics
model_expand = pe.Node(ExpandModel(
model_formula='(dd1(rps + wm + csf + gsr))^^2 + others'),
name='model_expansion')
# Add spike regressors
spike_regress = pe.Node(SpikeRegressors(
fd_thresh=regressors_fd_th,
dvars_thresh=regressors_dvars_th),
name='spike_regressors')
# Generate reportlet (ROIs)
mrg_compcor = pe.Node(niu.Merge(2, ravel_inputs=True),
name='mrg_compcor', run_without_submitting=True)
rois_plot = pe.Node(ROIsPlot(colors=['b', 'magenta'], generate_report=True),
name='rois_plot', mem_gb=mem_gb)
ds_report_bold_rois = pe.Node(
DerivativesDataSink(desc='rois', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_bold_rois', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
# Generate reportlet (CompCor)
mrg_cc_metadata = pe.Node(niu.Merge(2), name='merge_compcor_metadata',
run_without_submitting=True)
compcor_plot = pe.Node(
CompCorVariancePlot(variance_thresholds=(0.5, 0.7, 0.9),
metadata_sources=['tCompCor', 'aCompCor']),
name='compcor_plot')
ds_report_compcor = pe.Node(
DerivativesDataSink(desc='compcorvar', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_compcor', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
# Generate reportlet (Confound correlation)
conf_corr_plot = pe.Node(
ConfoundsCorrelationPlot(reference_column='global_signal', max_dim=20),
name='conf_corr_plot')
ds_report_conf_corr = pe.Node(
DerivativesDataSink(desc='confoundcorr', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_conf_corr', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
def _last(inlist):
return inlist[-1]
def _select_cols(table):
import pandas as pd
return [
col for col in pd.read_table(table, nrows=2).columns
if not col.startswith(("a_comp_cor_", "t_comp_cor_", "std_dvars"))
]
workflow.connect([
# connect inputnode to each non-anatomical confound node
(inputnode, dvars, [('bold', 'in_file'),
('bold_mask', 'in_mask')]),
(inputnode, fdisp, [('movpar_file', 'in_file')]),
# aCompCor
(inputnode, acompcor, [("bold", "realigned_file"),
("skip_vols", "ignore_initial_volumes")]),
(inputnode, acc_masks, [("t1w_tpms", "in_vfs"),
(("bold", _get_zooms), "bold_zooms")]),
(inputnode, acc_msk_tfm, [("t1_bold_xform", "transforms"),
("bold_mask", "reference_image")]),
(inputnode, acc_msk_brain, [("bold_mask", "in_mask")]),
(acc_masks, acc_msk_tfm, [("out_masks", "input_image")]),
(acc_msk_tfm, acc_msk_brain, [("output_image", "in_file")]),
(acc_msk_brain, acc_msk_bin, [("out_file", "in_file")]),
(acc_msk_bin, acompcor, [("out_file", "mask_files")]),
# tCompCor
(inputnode, tcompcor, [("bold", "realigned_file"),
("skip_vols", "ignore_initial_volumes"),
("bold_mask", "mask_files")]),
# Global signals extraction (constrained by anatomy)
(inputnode, signals, [('bold', 'in_file')]),
(inputnode, merge_rois, [('bold_mask', 'in1')]),
(acc_msk_bin, merge_rois, [('out_file', 'in2')]),
(tcompcor, merge_rois, [('high_variance_masks', 'in3')]),
(merge_rois, signals, [('out', 'label_files')]),
# Collate computed confounds together
(inputnode, add_motion_headers, [('movpar_file', 'in_file')]),
(inputnode, add_rmsd_header, [('rmsd_file', 'in_file')]),
(dvars, add_dvars_header, [('out_nstd', 'in_file')]),
(dvars, add_std_dvars_header, [('out_std', 'in_file')]),
(signals, concat, [('out_file', 'signals')]),
(fdisp, concat, [('out_file', 'fd')]),
(tcompcor, concat, [('components_file', 'tcompcor'),
('pre_filter_file', 'cos_basis')]),
(acompcor, concat, [('components_file', 'acompcor')]),
(add_motion_headers, concat, [('out_file', 'motion')]),
(add_rmsd_header, concat, [('out_file', 'rmsd')]),
(add_dvars_header, concat, [('out_file', 'dvars')]),
(add_std_dvars_header, concat, [('out_file', 'std_dvars')]),
# Confounds metadata
(tcompcor, tcc_metadata_fmt, [('metadata_file', 'in_file')]),
(acompcor, acc_metadata_fmt, [('metadata_file', 'in_file')]),
(tcc_metadata_fmt, mrg_conf_metadata, [('output', 'in1')]),
(acc_metadata_fmt, mrg_conf_metadata, [('output', 'in2')]),
(mrg_conf_metadata, mrg_conf_metadata2, [('out', 'in_dicts')]),
# Expand the model with derivatives, quadratics, and spikes
(concat, model_expand, [('confounds_file', 'confounds_file')]),
(model_expand, spike_regress, [('confounds_file', 'confounds_file')]),
# Set outputs
(spike_regress, outputnode, [('confounds_file', 'confounds_file')]),
(mrg_conf_metadata2, outputnode, [('out_dict', 'confounds_metadata')]),
(tcompcor, outputnode, [("high_variance_masks", "tcompcor_mask")]),
(acc_msk_bin, outputnode, [("out_file", "acompcor_masks")]),
(inputnode, rois_plot, [('bold', 'in_file'),
('bold_mask', 'in_mask')]),
(tcompcor, mrg_compcor, [('high_variance_masks', 'in1')]),
(acc_msk_bin, mrg_compcor, [(('out_file', _last), 'in2')]),
(mrg_compcor, rois_plot, [('out', 'in_rois')]),
(rois_plot, ds_report_bold_rois, [('out_report', 'in_file')]),
(tcompcor, mrg_cc_metadata, [('metadata_file', 'in1')]),
(acompcor, mrg_cc_metadata, [('metadata_file', 'in2')]),
(mrg_cc_metadata, compcor_plot, [('out', 'metadata_files')]),
(compcor_plot, ds_report_compcor, [('out_file', 'in_file')]),
(concat, conf_corr_plot, [('confounds_file', 'confounds_file'),
(('confounds_file', _select_cols), 'columns')]),
(conf_corr_plot, ds_report_conf_corr, [('out_file', 'in_file')]),
])
return workflow
def init_carpetplot_wf(mem_gb, metadata, cifti_output, name="bold_carpet_wf"):
"""
Build a workflow to generate *carpet* plots.
Resamples the MNI parcellation (ad-hoc parcellation derived from the
Harvard-Oxford template and others).
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB - please note that this size
should be calculated after resamplings that may extend
the FoV
metadata : :obj:`dict`
BIDS metadata for BOLD file
name : :obj:`str`
Name of workflow (default: ``bold_carpet_wf``)
Inputs
------
bold
BOLD image, after the prescribed corrections (STC, HMC and SDC)
when available.
bold_mask
BOLD series mask
confounds_file
TSV of all aggregated confounds
t1_bold_xform
Affine matrix that maps the T1w space into alignment with
the native BOLD space
std2anat_xfm
ANTs-compatible affine-and-warp transform file
cifti_bold
BOLD image in CIFTI format, to be used in place of volumetric BOLD
Outputs
-------
out_carpetplot
Path of the generated SVG file
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
inputnode = pe.Node(niu.IdentityInterface(
fields=['bold', 'bold_mask', 'confounds_file',
't1_bold_xform', 'std2anat_xfm', 'cifti_bold']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_carpetplot']), name='outputnode')
# List transforms
mrg_xfms = pe.Node(niu.Merge(2), name='mrg_xfms')
# Warp segmentation into EPI space
resample_parc = pe.Node(ApplyTransforms(
dimension=3,
input_image=str(get_template(
'MNI152NLin2009cAsym', resolution=1, desc='carpet',
suffix='dseg', extension=['.nii', '.nii.gz'])),
interpolation='MultiLabel'),
name='resample_parc')
# Carpetplot and confounds plot
conf_plot = pe.Node(FMRISummary(
tr=metadata['RepetitionTime'],
confounds_list=[
('global_signal', None, 'GS'),
('csf', None, 'GSCSF'),
('white_matter', None, 'GSWM'),
('std_dvars', None, 'DVARS'),
('framewise_displacement', 'mm', 'FD')]),
name='conf_plot', mem_gb=mem_gb)
ds_report_bold_conf = pe.Node(
DerivativesDataSink(desc='carpetplot', datatype="figures", extension="svg",
dismiss_entities=("echo",)),
name='ds_report_bold_conf', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
workflow = Workflow(name=name)
# no need for segmentations if using CIFTI
if cifti_output:
workflow.connect(inputnode, 'cifti_bold', conf_plot, 'in_func')
else:
workflow.connect([
(inputnode, mrg_xfms, [('t1_bold_xform', 'in1'),
('std2anat_xfm', 'in2')]),
(inputnode, resample_parc, [('bold_mask', 'reference_image')]),
(mrg_xfms, resample_parc, [('out', 'transforms')]),
# Carpetplot
(inputnode, conf_plot, [
('bold', 'in_func'),
('bold_mask', 'in_mask')]),
(resample_parc, conf_plot, [('output_image', 'in_segm')])
])
workflow.connect([
(inputnode, conf_plot, [('confounds_file', 'confounds_file')]),
(conf_plot, ds_report_bold_conf, [('out_file', 'in_file')]),
(conf_plot, outputnode, [('out_file', 'out_carpetplot')]),
])
return workflow
def init_ica_aroma_wf(
mem_gb,
metadata,
omp_nthreads,
aroma_melodic_dim=-200,
err_on_aroma_warn=False,
name='ica_aroma_wf',
susan_fwhm=6.0,
):
"""
Build a workflow that runs `ICA-AROMA`_.
This workflow wraps `ICA-AROMA`_ to identify and remove motion-related
independent components from a BOLD time series.
The following steps are performed:
#. Remove non-steady state volumes from the bold series.
#. Smooth data using FSL `susan`, with a kernel width FWHM=6.0mm.
#. Run FSL `melodic` outside of ICA-AROMA to generate the report
#. Run ICA-AROMA
#. Aggregate identified motion components (aggressive) to TSV
#. Return ``classified_motion_ICs`` and ``melodic_mix`` for user to complete
non-aggressive denoising in T1w space
#. Calculate ICA-AROMA-identified noise components
(columns named ``AROMAAggrCompXX``)
Additionally, non-aggressive denoising is performed on the BOLD series
resampled into MNI space.
There is a current discussion on whether other confounds should be extracted
before or after denoising `here
<http://nbviewer.jupyter.org/github/nipreps/fmriprep-notebooks/blob/922e436429b879271fa13e76767a6e73443e74d9/issue-817_aroma_confounds.ipynb>`__.
.. _ICA-AROMA: https://github.com/maartenmennes/ICA-AROMA
Workflow Graph
.. workflow::
:graph2use: orig
:simple_form: yes
from fmriprep.workflows.bold.confounds import init_ica_aroma_wf
wf = init_ica_aroma_wf(
mem_gb=3,
metadata={'RepetitionTime': 1.0},
omp_nthreads=1)
Parameters
----------
metadata : :obj:`dict`
BIDS metadata for BOLD file
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
name : :obj:`str`
Name of workflow (default: ``bold_tpl_trans_wf``)
susan_fwhm : :obj:`float`
Kernel width (FWHM in mm) for the smoothing step with
FSL ``susan`` (default: 6.0mm)
err_on_aroma_warn : :obj:`bool`
Do not fail on ICA-AROMA errors
aroma_melodic_dim : :obj:`int`
Set the dimensionality of the MELODIC ICA decomposition.
Negative numbers set a maximum on automatic dimensionality estimation.
Positive numbers set an exact number of components to extract.
(default: -200, i.e., estimate <=200 components)
Inputs
------
itk_bold_to_t1
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
anat2std_xfm
ANTs-compatible affine-and-warp transform file
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
skip_vols
number of non steady state volumes
bold_split
Individual 3D BOLD volumes, not motion corrected
bold_mask
BOLD series mask in template space
hmc_xforms
List of affine transforms aligning each volume to ``ref_image`` in ITK format
movpar_file
SPM-formatted motion parameters file
Outputs
-------
aroma_confounds
TSV of confounds identified as noise by ICA-AROMA
aroma_noise_ics
CSV of noise components identified by ICA-AROMA
melodic_mix
FSL MELODIC mixing matrix
nonaggr_denoised_file
BOLD series with non-aggressive ICA-AROMA denoising applied
"""
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.segmentation import ICA_AROMARPT
from niworkflows.interfaces.utility import KeySelect
from niworkflows.interfaces.utils import TSV2JSON
workflow = Workflow(name=name)
workflow.__postdesc__ = """\
Automatic removal of motion artifacts using independent component analysis
[ICA-AROMA, @aroma] was performed on the *preprocessed BOLD on MNI space*
time-series after removal of non-steady state volumes and spatial smoothing
with an isotropic, Gaussian kernel of 6mm FWHM (full-width half-maximum).
Corresponding "non-aggresively" denoised runs were produced after such
smoothing.
Additionally, the "aggressive" noise-regressors were collected and placed
in the corresponding confounds file.
"""
inputnode = pe.Node(niu.IdentityInterface(
fields=[
'bold_std',
'bold_mask_std',
'movpar_file',
'name_source',
'skip_vols',
'spatial_reference',
]), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['aroma_confounds', 'aroma_noise_ics', 'melodic_mix',
'nonaggr_denoised_file', 'aroma_metadata']), name='outputnode')
# extract out to BOLD base
select_std = pe.Node(KeySelect(fields=['bold_mask_std', 'bold_std']),
name='select_std', run_without_submitting=True)
select_std.inputs.key = 'MNI152NLin6Asym_res-2'
rm_non_steady_state = pe.Node(niu.Function(function=_remove_volumes,
output_names=['bold_cut']),
name='rm_nonsteady')
calc_median_val = pe.Node(fsl.ImageStats(op_string='-k %s -p 50'), name='calc_median_val')
calc_bold_mean = pe.Node(fsl.MeanImage(), name='calc_bold_mean')
def _getusans_func(image, thresh):
return [tuple([image, thresh])]
getusans = pe.Node(niu.Function(function=_getusans_func, output_names=['usans']),
name='getusans', mem_gb=0.01)
smooth = pe.Node(fsl.SUSAN(fwhm=susan_fwhm), name='smooth')
# melodic node
melodic = pe.Node(fsl.MELODIC(
no_bet=True, tr_sec=float(metadata['RepetitionTime']), mm_thresh=0.5, out_stats=True,
dim=aroma_melodic_dim), name="melodic")
# ica_aroma node
ica_aroma = pe.Node(ICA_AROMARPT(
denoise_type='nonaggr', generate_report=True, TR=metadata['RepetitionTime'],
args='-np'), name='ica_aroma')
add_non_steady_state = pe.Node(niu.Function(function=_add_volumes,
output_names=['bold_add']),
name='add_nonsteady')
# extract the confound ICs from the results
ica_aroma_confound_extraction = pe.Node(ICAConfounds(err_on_aroma_warn=err_on_aroma_warn),
name='ica_aroma_confound_extraction')
ica_aroma_metadata_fmt = pe.Node(
TSV2JSON(index_column='IC', output=None, enforce_case=True,
additional_metadata={'Method': {
'Name': 'ICA-AROMA',
'Version': getenv('AROMA_VERSION', 'n/a')}}),
name='ica_aroma_metadata_fmt')
ds_report_ica_aroma = pe.Node(
DerivativesDataSink(desc='aroma', datatype="figures", dismiss_entities=("echo",)),
name='ds_report_ica_aroma', run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB)
def _getbtthresh(medianval):
return 0.75 * medianval
# connect the nodes
workflow.connect([
(inputnode, select_std, [('spatial_reference', 'keys'),
('bold_std', 'bold_std'),
('bold_mask_std', 'bold_mask_std')]),
(inputnode, ica_aroma, [('movpar_file', 'motion_parameters')]),
(inputnode, rm_non_steady_state, [
('skip_vols', 'skip_vols')]),
(select_std, rm_non_steady_state, [
('bold_std', 'bold_file')]),
(select_std, calc_median_val, [
('bold_mask_std', 'mask_file')]),
(rm_non_steady_state, calc_median_val, [
('bold_cut', 'in_file')]),
(rm_non_steady_state, calc_bold_mean, [
('bold_cut', 'in_file')]),
(calc_bold_mean, getusans, [('out_file', 'image')]),
(calc_median_val, getusans, [('out_stat', 'thresh')]),
# Connect input nodes to complete smoothing
(rm_non_steady_state, smooth, [
('bold_cut', 'in_file')]),
(getusans, smooth, [('usans', 'usans')]),
(calc_median_val, smooth, [(('out_stat', _getbtthresh), 'brightness_threshold')]),
# connect smooth to melodic
(smooth, melodic, [('smoothed_file', 'in_files')]),
(select_std, melodic, [
('bold_mask_std', 'mask')]),
# connect nodes to ICA-AROMA
(smooth, ica_aroma, [('smoothed_file', 'in_file')]),
(select_std, ica_aroma, [
('bold_mask_std', 'report_mask'),
('bold_mask_std', 'mask')]),
(melodic, ica_aroma, [('out_dir', 'melodic_dir')]),
# generate tsvs from ICA-AROMA
(ica_aroma, ica_aroma_confound_extraction, [('out_dir', 'in_directory')]),
(inputnode, ica_aroma_confound_extraction, [
('skip_vols', 'skip_vols')]),
(ica_aroma_confound_extraction, ica_aroma_metadata_fmt, [
('aroma_metadata', 'in_file')]),
# output for processing and reporting
(ica_aroma_confound_extraction, outputnode, [('aroma_confounds', 'aroma_confounds'),
('aroma_noise_ics', 'aroma_noise_ics'),
('melodic_mix', 'melodic_mix')]),
(ica_aroma_metadata_fmt, outputnode, [('output', 'aroma_metadata')]),
(ica_aroma, add_non_steady_state, [
('nonaggr_denoised_file', 'bold_cut_file')]),
(select_std, add_non_steady_state, [
('bold_std', 'bold_file')]),
(inputnode, add_non_steady_state, [
('skip_vols', 'skip_vols')]),
(add_non_steady_state, outputnode, [('bold_add', 'nonaggr_denoised_file')]),
(ica_aroma, ds_report_ica_aroma, [('out_report', 'in_file')]),
])
return workflow
def _remove_volumes(bold_file, skip_vols):
"""Remove skip_vols from bold_file."""
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
if skip_vols == 0:
return bold_file
out = fname_presuffix(bold_file, suffix='_cut')
bold_img = nb.load(bold_file)
bold_img.__class__(bold_img.dataobj[..., skip_vols:],
bold_img.affine, bold_img.header).to_filename(out)
return out
def _add_volumes(bold_file, bold_cut_file, skip_vols):
"""Prepend skip_vols from bold_file onto bold_cut_file."""
import nibabel as nb
import numpy as np
from nipype.utils.filemanip import fname_presuffix
if skip_vols == 0:
return bold_cut_file
bold_img = nb.load(bold_file)
bold_cut_img = nb.load(bold_cut_file)
bold_data = np.concatenate((bold_img.dataobj[..., :skip_vols],
bold_cut_img.dataobj), axis=3)
out = fname_presuffix(bold_cut_file, suffix='_addnonsteady')
bold_img.__class__(bold_data, bold_img.affine, bold_img.header).to_filename(out)
return out
def _get_zooms(in_file):
import nibabel as nb
return tuple(nb.load(in_file).header.get_zooms()[:3])
|
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("LogisticRegression" , "iris" , "sqlite")
|
"""
Django admin models for a campaign application.
"""
from django.contrib import admin
from campaign.models import Supporter, Entry, Issue
class EntryAdmin(admin.ModelAdmin):
date_hierarchy = 'pub_date'
fieldsets = (
(u'Metadata', {'fields': (('title', 'slug'), 'subtitle', 'author', 'pub_date',)}),
(u'Entry', {'fields': ('summary_txt', 'body_txt',)}),
(u'Categorization', {'fields': ('tag_list', 'status',)}),
)
list_display = ('title', 'pub_date', 'author', 'status',)
list_filter = ['pub_date', 'status']
list_per_page = 50
search_fields = ['title', 'subtitle', 'body_txt']
prepopulated_fields = {'slug': ('title',)}
class IssueAdmin(admin.ModelAdmin):
fieldsets = (
(u'Metadata', {'fields': (('issue', 'slug',), 'ordering')}),
(u'Position', {'fields': ('position_summary_txt', 'position_txt',)}),
(u'Categorization', {'fields': ('tag_list', 'status',)}),
)
list_display = ('issue', 'ordering', 'tag_list', 'status',)
list_filter = ['status']
list_per_page = 50
search_fields = ['issue', 'position_summary_txt', 'position_txt', 'tag_list']
prepopulated_fields = {'slug': ('issue',)}
class SupporterAdmin(admin.ModelAdmin):
date_hierarchy = 'submit_date'
fieldsets = (
(u'Supporter', {'fields': ('first_name', 'last_name', 'title', 'organization',)}),
(u'Contact information', {'fields': (('address_1', 'address_2',), ('city', 'state',), ('country', 'zip_code',), 'email', 'phone',)}),
(u'Supporting actions', {'fields': (('yard_sign', 'yard_sign_delivered',), ('poster', 'poster_delivered',), ('volunteer', 'contacted',), 'fundraising', 'support_list', 'donated', 'message',)}),
(u'Meta', {'fields': ('submit_date', 'ip_address', 'status',)}),
)
list_display = ('__unicode__', 'email', 'yard_sign', 'poster', 'support_list', 'volunteer', 'fundraising', 'donated', 'contacted', 'status')
list_filter = ['submit_date', 'status', 'yard_sign', 'yard_sign_delivered', 'poster', 'poster_delivered', 'support_list', 'volunteer', 'fundraising', 'donated', 'contacted']
list_per_page = 100
search_fields = ['first_name', 'last_name', 'address_1', 'message']
admin.site.register(Entry, EntryAdmin)
admin.site.register(Issue, IssueAdmin)
admin.site.register(Supporter, SupporterAdmin)
|
import logging
from pathlib import Path
from flask import Blueprint, current_app, render_template, send_from_directory
from scout import __version__
from scout.server.utils import public_endpoint
LOG = logging.getLogger(__name__)
public_bp = Blueprint(
"public",
__name__,
template_folder="templates",
static_folder="static",
static_url_path="/public/static",
)
@public_bp.route("/")
@public_endpoint
def index():
"""Show the static landing page."""
badge_name = current_app.config.get("ACCREDITATION_BADGE")
if badge_name and not Path(public_bp.static_folder, badge_name).is_file():
LOG.warning(f'No file with name "{badge_name}" in {public_bp.static_folder}')
badge_name = None
return render_template("public/index.html", version=__version__, accred_badge=badge_name)
@public_bp.route("/favicon")
def favicon():
return send_from_directory(current_app.static_folder, "favicon.ico")
|
"""#R1 : threshold of the ndPAC.
This script provide an insight of the ndPAC's threshold.
"""
import json
with open("../../paper.json", 'r') as f: cfg = json.load(f) # noqa
import numpy as np
from scipy.special import erfinv
from tensorpac.signals import pac_signals_wavelet
from tensorpac import Pac
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
plt.style.use('seaborn-poster')
sns.set_style("white")
plt.rc('font', family=cfg["font"])
def custom_ndpac(pha, amp, p=.05):
npts = amp.shape[-1]
# Normalize amplitude :
# Use the sample standard deviation, as in original Matlab code from author
amp = np.subtract(amp, np.mean(amp, axis=-1, keepdims=True))
amp = np.divide(amp, np.std(amp, ddof=1, axis=-1, keepdims=True))
# Compute pac :
pac = np.abs(np.einsum('i...j, k...j->ik...', amp, np.exp(1j * pha)))
s = pac ** 2
pac /= npts
# Set to zero non-significant values:
xlim = npts * erfinv(1 - p)**2
pac_nt = pac.copy()
pac[s <= 2 * xlim] = np.nan
return pac_nt.squeeze(), pac.squeeze(), s.squeeze()
if __name__ == '__main__':
###########################################################################
p = .05
sf = 1024
n_epochs = 1
n_times = 100000
###########################################################################
data, time = pac_signals_wavelet(sf=sf, f_pha=10, f_amp=100, noise=2.,
n_epochs=n_epochs, n_times=n_times,
rnd_state=0)
# extract the phase and the amplitude
p_obj = Pac(idpac=(1, 0, 0), f_pha='hres', f_amp='hres')
pha = p_obj.filter(sf, data, ftype='phase', n_jobs=1)
amp = p_obj.filter(sf, data, ftype='amplitude', n_jobs=1)
# compute PAC (outside of the PAC object)
pac_nt, pac, s = custom_ndpac(pha, amp, p=p)
plt.figure(figsize=(22, 6))
plt.subplot(131)
p_obj.comodulogram(pac_nt, cblabel="", title='Non-thresholded PAC',
cmap=cfg["cmap"], vmin=0, colorbar=True)
plt.subplot(132)
p_obj.comodulogram(s, cblabel="", title='Threshold', cmap=cfg["cmap"],
vmin=0, colorbar=True)
plt.ylabel('')
plt.subplot(133)
p_obj.comodulogram(pac, cblabel="", title='Thresholded PAC',
cmap=cfg["cmap"], vmin=0, colorbar=True)
plt.ylabel('')
plt.tight_layout()
plt.savefig(f"../figures/r1_ndpac_threshold.png", dpi=300,
bbox_inches='tight')
plt.show()
|
from datetime import date, datetime, timedelta
import django # noqa
from django import forms
from django.db import models
from django.contrib.auth import get_user
from django.contrib.auth.models import AnonymousUser
from django.core import mail
from django.core.files.storage import default_storage as storage
from django.test.client import RequestFactory
from django.test.utils import override_settings
from unittest import mock
import pytest
import responses
import olympia # noqa
from olympia import amo
from olympia.access.models import Group, GroupUser
from olympia.addons.models import Addon, AddonUser
from olympia.amo.tests import (
addon_factory, collection_factory, TestCase, user_factory)
from olympia.bandwagon.models import Collection
from olympia.files.models import File
from olympia.ratings.models import Rating
from olympia.users.models import (
DeniedName, DisposableEmailDomainRestriction, generate_auth_id,
get_anonymized_username,
EmailReputationRestriction, EmailUserRestriction, IPNetworkUserRestriction,
IPReputationRestriction, UserEmailField, UserProfile)
from olympia.zadmin.models import set_config
class TestUserProfile(TestCase):
fixtures = ('base/addon_3615', 'base/user_2519', 'users/test_backends')
def test_is_addon_developer(self):
user = user_factory()
assert not user.addonuser_set.exists()
assert not user.is_developer
assert not user.is_addon_developer
assert not user.is_artist
addon = addon_factory(users=[user])
del user.cached_developer_status # it's a cached property.
assert user.is_developer
assert user.is_addon_developer
assert not user.is_artist
addon.delete()
del user.cached_developer_status
assert not user.is_developer
assert not user.is_addon_developer
assert not user.is_artist
def test_is_artist_of_static_theme(self):
user = user_factory()
assert not user.addonuser_set.exists()
assert not user.is_developer
assert not user.is_addon_developer
assert not user.is_artist
addon = addon_factory(users=[user], type=amo.ADDON_STATICTHEME)
del user.cached_developer_status # it's a cached property.
assert user.is_developer
assert not user.is_addon_developer
assert user.is_artist
addon.delete()
del user.cached_developer_status
assert not user.is_developer
assert not user.is_addon_developer
assert not user.is_artist
def test_delete(self):
user = UserProfile.objects.get(pk=4043307)
# Create a photo so that we can test deletion.
with storage.open(user.picture_path, 'wb') as fobj:
fobj.write(b'test data\n')
with storage.open(user.picture_path_original, 'wb') as fobj:
fobj.write(b'original test data\n')
assert storage.exists(user.picture_path_original)
assert storage.exists(user.picture_path)
assert not user.deleted
assert user.email == 'jbalogh@mozilla.com'
assert user.auth_id
assert user.fxa_id == '0824087ad88043e2a52bd41f51bbbe79'
assert user.username == 'jbalogh'
assert user.display_name
assert user.homepage
assert user.picture_type
assert user.last_login_ip
assert not user.has_anonymous_username
name = user.display_name
user.update(
averagerating=4.4,
biography='some life',
bypass_upload_restrictions=True,
location='some where',
occupation='some job',
read_dev_agreement=datetime.now(),
reviewer_name='QA')
old_auth_id = user.auth_id
user.delete()
user = UserProfile.objects.get(pk=4043307)
assert user.email == 'jbalogh@mozilla.com'
assert user.auth_id
assert user.auth_id != old_auth_id
assert user.fxa_id == '0824087ad88043e2a52bd41f51bbbe79'
assert user.display_name == ''
assert user.homepage == ''
assert user.picture_type is None
# last_login_ip is kept during deletion and later via
# clear_old_user_data command
assert user.last_login_ip
assert user.has_anonymous_username
assert user.averagerating is None
assert user.biography is None
assert user.bypass_upload_restrictions is False
assert user.location == ''
assert user.occupation == ''
assert user.read_dev_agreement is None
assert user.reviewer_name == ''
assert not storage.exists(user.picture_path)
assert not storage.exists(user.picture_path_original)
assert len(mail.outbox) == 1
email = mail.outbox[0]
assert email.to == [user.email]
assert f'message because your user account {name}' in email.body
assert email.reply_to == ['amo-admins+deleted@mozilla.com']
self.assertCloseToNow(user.modified)
def test_should_send_delete_email(self):
no_name = user_factory(email='email@moco', occupation='person')
assert not no_name.should_send_delete_email()
no_name.update(display_name='Steve Holt!')
assert no_name.should_send_delete_email()
addon_owner = user_factory()
addon = addon_factory(users=(addon_owner,))
assert addon_owner.should_send_delete_email()
collection_creator = collection_factory(author=user_factory()).author
assert collection_creator.should_send_delete_email()
rating_writer = user_factory()
Rating.objects.create(
user=rating_writer, addon=addon, version=addon.current_version)
assert rating_writer.should_send_delete_email()
@mock.patch.object(File, 'hide_disabled_file')
def test_ban_and_disable_related_content_bulk(self, hide_disabled_mock):
user_sole = user_factory(email='sole@foo.baa', fxa_id='13579',
last_login_ip='127.0.0.1')
addon_sole = addon_factory(users=[user_sole])
self.setup_user_to_be_have_content_disabled(user_sole)
user_multi = user_factory(email='multi@foo.baa', fxa_id='24680',
last_login_ip='127.0.0.2')
innocent_user = user_factory()
addon_multi = addon_factory(
users=UserProfile.objects.filter(
id__in=[user_multi.id, innocent_user.id]))
self.setup_user_to_be_have_content_disabled(user_multi)
# Now that everything is set up, disable/delete related content.
UserProfile.ban_and_disable_related_content_bulk(
[user_sole, user_multi])
addon_sole.reload()
addon_multi.reload()
# if sole dev should have been disabled, but the author retained
assert addon_sole.status == amo.STATUS_DISABLED
assert list(addon_sole.authors.all()) == [user_sole]
# shouldn't have been disabled as it has another author
assert addon_multi.status != amo.STATUS_DISABLED
assert list(addon_multi.authors.all()) == [innocent_user]
# the File objects have been disabled
assert not File.objects.filter(version__addon=addon_sole).exclude(
status=amo.STATUS_DISABLED).exists()
# But not for the Add-on that wasn't disabled
assert File.objects.filter(version__addon=addon_multi).exclude(
status=amo.STATUS_DISABLED).exists()
assert not user_sole._ratings_all.exists() # Even replies.
assert not user_sole.collections.exists()
assert not user_multi._ratings_all.exists() # Even replies.
assert not user_multi.collections.exists()
assert not storage.exists(user_sole.picture_path)
assert not storage.exists(user_sole.picture_path_original)
assert not storage.exists(user_multi.picture_path)
assert not storage.exists(user_multi.picture_path_original)
assert user_sole.deleted
self.assertCloseToNow(user_sole.banned)
self.assertCloseToNow(user_sole.modified)
assert user_sole.email == 'sole@foo.baa'
assert user_sole.auth_id
assert user_sole.fxa_id == '13579'
assert user_sole.last_login_ip == '127.0.0.1'
assert user_multi.deleted
self.assertCloseToNow(user_multi.banned)
self.assertCloseToNow(user_multi.modified)
assert user_multi.email == 'multi@foo.baa'
assert user_multi.auth_id
assert user_multi.fxa_id == '24680'
assert user_multi.last_login_ip == '127.0.0.2'
hide_disabled_mock.assert_not_called()
def setup_user_to_be_have_content_disabled(self, user):
addon = user.addons.last()
user.update(picture_type='image/png')
# Create a photo so that we can test deletion.
with storage.open(user.picture_path, 'wb') as fobj:
fobj.write(b'test data\n')
with storage.open(user.picture_path_original, 'wb') as fobj:
fobj.write(b'original test data\n')
assert user.addons.count() == 1
rating = Rating.objects.create(
user=user, addon=addon, version=addon.current_version)
Rating.objects.create(
user=user, addon=addon, version=addon.current_version,
reply_to=rating)
Collection.objects.create(author=user)
def test_delete_with_related_content_exclude_addons_with_other_devs(self):
from olympia.addons.models import update_search_index
user = UserProfile.objects.get(pk=55021)
addon = user.addons.last()
self.setup_user_to_be_have_content_disabled(user)
AddonUser.objects.create(addon=addon, user=user_factory())
# Now that everything is set up, disable/delete related content.
update_search_index.reset_mock()
user.delete()
update_search_index.assert_called_with(sender=Addon, instance=addon)
# The add-on should not have been touched, it has another dev.
assert not user.addons.exists()
addon.reload()
assert addon.status == amo.STATUS_APPROVED
assert not user._ratings_all.exists() # Even replies.
assert not user.collections.exists()
assert not storage.exists(user.picture_path)
assert not storage.exists(user.picture_path_original)
def test_delete_with_just_addon_with_other_devs(self):
from olympia.addons.models import update_search_index
user = UserProfile.objects.get(pk=55021)
addon = user.addons.last()
AddonUser.objects.create(addon=addon, user=user_factory())
# Now that everything is set up, disable/delete related content.
update_search_index.reset_mock()
user.delete()
update_search_index.assert_called_with(
sender=AddonUser, instance=addon)
# The add-on should not have been touched, it has another dev.
assert not user.addons.exists()
addon.reload()
assert addon.status == amo.STATUS_APPROVED
def test_delete_with_related_content_actually_delete(self):
addon = Addon.objects.latest('pk')
user = UserProfile.objects.get(pk=55021)
user.update(picture_type='image/png')
# Create a photo so that we can test deletion.
with storage.open(user.picture_path, 'wb') as fobj:
fobj.write(b'test data\n')
with storage.open(user.picture_path_original, 'wb') as fobj:
fobj.write(b'original test data\n')
assert user.addons.count() == 1
rating = Rating.objects.create(
user=user, addon=addon, version=addon.current_version)
Rating.objects.create(
user=user, addon=addon, version=addon.current_version,
reply_to=rating)
Collection.objects.create(author=user)
# Now that everything is set up, delete related content.
user.delete()
assert not user.addons.exists()
assert not user._ratings_all.exists() # Even replies.
assert not user.collections.exists()
assert not storage.exists(user.picture_path)
assert not storage.exists(user.picture_path_original)
def test_delete_picture(self):
user = UserProfile.objects.get(pk=55021)
user.update(picture_type='image/png')
# Create a photo so that we can test deletion.
with storage.open(user.picture_path, 'wb') as fobj:
fobj.write(b'test data\n')
with storage.open(user.picture_path_original, 'wb') as fobj:
fobj.write(b'original test data\n')
user.delete_picture()
user.reload()
assert user.picture_type is None
assert not storage.exists(user.picture_path)
assert not storage.exists(user.picture_path_original)
def test_groups_list(self):
user = UserProfile.objects.get(pk=55021)
group1 = Group.objects.create(name='un')
group2 = Group.objects.create(name='deux')
GroupUser.objects.create(user=user, group=group1)
GroupUser.objects.create(user=user, group=group2)
assert user.groups_list == list(user.groups.all())
assert len(user.groups_list) == 2
# Remove the user from one of the groups, groups_list should not have
# changed since it's a cached property.
GroupUser.objects.filter(group=group1).delete()
assert len(user.groups_list) == 2
# Delete the cached property, it should be updated.
del user.groups_list
assert len(user.groups_list) == 1
assert user.groups_list == [group2]
def test_welcome_name(self):
u1 = UserProfile.objects.create(username='sc')
u2 = UserProfile.objects.create(
username='sc2', display_name="Sarah Connor")
u3 = UserProfile.objects.create()
assert u1.welcome_name == 'Firefox user %s' % u1.id
assert u2.welcome_name == 'Sarah Connor'
assert u3.welcome_name == 'Firefox user %s' % u3.id
def test_welcome_name_anonymous(self):
user = UserProfile.objects.create(
username='anonymous-bb4f3cbd422e504080e32f2d9bbfcee0', id=1234)
assert user.welcome_name == 'Firefox user 1234'
def test_welcome_name_anonymous_with_display(self):
user = UserProfile.objects.create(display_name='John Connor')
user.username = get_anonymized_username()
assert user.welcome_name == 'John Connor'
def test_has_anonymous_username_no_names(self):
user = UserProfile.objects.create(display_name=None)
user.username = get_anonymized_username()
assert user.has_anonymous_username
def test_has_anonymous_username_username_set(self):
user = UserProfile.objects.create(username='bob', display_name=None)
assert not user.has_anonymous_username
def test_has_anonymous_username_display_name_set(self):
user = UserProfile.objects.create(display_name='Bob Bobbertson')
user.username = get_anonymized_username()
assert user.has_anonymous_username
def test_has_anonymous_username_both_names_set(self):
user = UserProfile.objects.create(
username='bob', display_name='Bob Bobbertson')
assert not user.has_anonymous_username
def test_has_anonymous_display_name_no_names(self):
user = UserProfile.objects.create(display_name=None)
user.username = get_anonymized_username()
assert user.has_anonymous_display_name
def test_has_anonymous_display_name_username_set(self):
user = UserProfile.objects.create(username='bob', display_name=None)
assert user.has_anonymous_display_name
def test_has_anonymous_display_name_display_name_set(self):
user = UserProfile.objects.create(display_name='Bob Bobbertson')
user.username = get_anonymized_username()
assert not user.has_anonymous_display_name
def test_has_anonymous_display_name_both_names_set(self):
user = UserProfile.objects.create(
username='bob', display_name='Bob Bobbertson')
assert not user.has_anonymous_display_name
def test_superuser(self):
user = UserProfile.objects.get(pk=9946)
assert not user.is_staff
assert not user.is_superuser
# Give the user '*:*'
# (groups_list cached_property is automatically cleared because we're
# creating a GroupUser instance).
group = Group.objects.filter(rules='*:*').get()
GroupUser.objects.create(group=group, user=user)
assert not user.is_staff
assert user.is_superuser
user.update(email='employee@mozilla.com')
assert user.is_staff
assert user.is_superuser
# No extra queries are made to check a second time, thanks to
# groups_list being a cached_property.
with self.assertNumQueries(0):
assert user.is_superuser
def test_staff_only(self):
group = Group.objects.create(
name='Admins of Something', rules='Admin:Something')
user = UserProfile.objects.get(pk=9946)
assert not user.is_staff
assert not user.is_superuser
# Even as part of an Admin:* group, the user is still not considered
# 'staff'.
GroupUser.objects.create(group=group, user=user)
assert not user.is_staff
assert not user.is_superuser
# Now that they have a mozilla.com email, they are.
user.update(email='employee@mozilla.com')
assert user.is_staff
assert not user.is_superuser
def test_give_and_then_remove_admin_powers(self):
group = Group.objects.create(name='Admins', rules='*:*')
user = UserProfile.objects.get(pk=9946)
relation = GroupUser.objects.create(group=group, user=user)
relation.delete()
assert not user.is_staff
assert not user.is_superuser
def test_picture_url(self):
"""
Test for a preview URL if image is set, or default image otherwise.
"""
u = UserProfile.objects.create(
id=1234, picture_type='image/png', modified=date.today(),
username='a')
u.picture_url.index('/userpics/0/1/1234.png?modified=')
u = UserProfile.objects.create(
id=1234567890, picture_type='image/png', modified=date.today(),
username='b')
u.picture_url.index('/userpics/1234/1234567/1234567890.png?modified=')
u = UserProfile.objects.create(
id=123456, picture_type=None, username='c')
assert u.picture_url.endswith('/anon_user.png')
def test_review_replies(self):
"""
Make sure that developer replies are not returned as if they were
original ratings.
"""
addon = Addon.objects.get(id=3615)
user = UserProfile.objects.get(pk=2519)
version = addon.find_latest_public_listed_version()
new_rating = Rating(version=version, user=user, rating=2, body='hello',
addon=addon)
new_rating.save()
new_reply = Rating(version=version, user=user, reply_to=new_rating,
addon=addon, body='my reply')
new_reply.save()
review_list = [rating.pk for rating in user.ratings]
assert len(review_list) == 1
assert new_rating.pk in review_list, (
'Original review must show up in ratings list.')
assert new_reply.pk not in review_list, (
'Developer reply must not show up in ratings list.')
def test_num_addons_listed(self):
"""Test that num_addons_listed is only considering add-ons for which
the user is marked as listed, and that only public and listed add-ons
are counted."""
user = UserProfile.objects.get(id=2519)
addon = Addon.objects.get(pk=3615)
AddonUser.objects.create(addon=addon, user=user, listed=True)
assert user.num_addons_listed == 1
extra_addon = addon_factory(status=amo.STATUS_NOMINATED)
AddonUser.objects.create(addon=extra_addon, user=user, listed=True)
extra_addon2 = addon_factory()
AddonUser.objects.create(addon=extra_addon2, user=user, listed=True)
self.make_addon_unlisted(extra_addon2)
assert user.num_addons_listed == 1
AddonUser.objects.filter(addon=addon, user=user).update(listed=False)
assert user.num_addons_listed == 0
def test_my_addons(self):
"""Test helper method to get N addons."""
addon1 = Addon.objects.create(name='test-1', type=amo.ADDON_EXTENSION)
AddonUser.objects.create(addon_id=addon1.id, user_id=2519, listed=True)
addon2 = Addon.objects.create(name='test-2', type=amo.ADDON_EXTENSION)
AddonUser.objects.create(addon_id=addon2.id, user_id=2519, listed=True)
addons = UserProfile.objects.get(id=2519).my_addons()
assert sorted(str(a.name) for a in addons) == [
addon1.name, addon2.name]
def test_mobile_collection(self):
u = UserProfile.objects.get(id='4043307')
assert not Collection.objects.filter(author=u)
c = u.mobile_collection()
assert c.type == amo.COLLECTION_MOBILE
assert c.slug == 'mobile'
def test_favorites_collection(self):
u = UserProfile.objects.get(id='4043307')
assert not Collection.objects.filter(author=u)
c = u.favorites_collection()
assert c.type == amo.COLLECTION_FAVORITES
assert c.slug == 'favorites'
def test_get_url_path(self):
assert UserProfile.objects.create(id=1).get_url_path() == (
'/en-US/firefox/user/1/')
assert UserProfile.objects.create(
username='yolo', id=2).get_url_path() == ('/en-US/firefox/user/2/')
def test_mobile_addons(self):
user = UserProfile.objects.get(id='4043307')
addon1 = Addon.objects.create(name='test-1', type=amo.ADDON_EXTENSION)
addon2 = Addon.objects.create(name='test-2', type=amo.ADDON_EXTENSION)
mobile_collection = user.mobile_collection()
mobile_collection.add_addon(addon1)
other_collection = Collection.objects.create(name='other')
other_collection.add_addon(addon2)
assert user.mobile_addons.count() == 1
assert user.mobile_addons[0] == addon1.pk
def test_favorite_addons(self):
user = UserProfile.objects.get(id='4043307')
addon1 = Addon.objects.create(name='test-1', type=amo.ADDON_EXTENSION)
addon2 = Addon.objects.create(name='test-2', type=amo.ADDON_EXTENSION)
favorites_collection = user.favorites_collection()
favorites_collection.add_addon(addon1)
other_collection = Collection.objects.create(name='other')
other_collection.add_addon(addon2)
assert user.favorite_addons.count() == 1
assert user.favorite_addons[0] == addon1.pk
def test_cannot_set_password(self):
user = UserProfile.objects.get(id='4043307')
with self.assertRaises(NotImplementedError):
user.set_password('password')
def test_cannot_check_password(self):
user = UserProfile.objects.get(id='4043307')
with self.assertRaises(NotImplementedError):
user.check_password('password')
def test_get_session_auth_hash(self):
user = UserProfile.objects.get(id=4043307)
user.update(auth_id=None)
assert user.get_session_auth_hash() is None
user.update(auth_id=12345)
hash1 = user.get_session_auth_hash()
assert hash1
user.update(auth_id=67890)
hash2 = user.get_session_auth_hash()
assert hash1 != hash2
def test_has_read_developer_agreement(self):
set_config('last_dev_agreement_change_date', '2019-06-12 00:00')
after_change = (
datetime(2019, 6, 12) + timedelta(days=1))
before_change = (
datetime(2019, 6, 12) - timedelta(days=42))
assert not UserProfile.objects.create(
username='a').has_read_developer_agreement()
assert not UserProfile.objects.create(
username='b', read_dev_agreement=None
).has_read_developer_agreement()
assert not UserProfile.objects.create(
username='c', read_dev_agreement=before_change
).has_read_developer_agreement()
# User has read the agreement after it was modified for
# post-review: it should return True.
assert UserProfile.objects.create(
username='d', read_dev_agreement=after_change
).has_read_developer_agreement()
def test_is_public(self):
user = UserProfile.objects.get(id=4043307)
assert not user.addonuser_set.exists()
assert not user.is_public
addon = Addon.objects.get(pk=3615)
addon_user = addon.addonuser_set.create(user=user)
assert user.is_public
# Only developer and owner roles make a profile public.
addon_user.update(role=amo.AUTHOR_ROLE_DEV)
assert user.is_public
addon_user.update(role=amo.AUTHOR_ROLE_OWNER)
assert user.is_public
# But only if they're listed
addon_user.update(role=amo.AUTHOR_ROLE_OWNER, listed=False)
assert not user.is_public
addon_user.update(listed=True)
assert user.is_public
addon_user.update(role=amo.AUTHOR_ROLE_DEV, listed=False)
assert not user.is_public
addon_user.update(listed=True)
assert user.is_public
# The add-on needs to be public.
self.make_addon_unlisted(addon) # Easy way to toggle status
assert not user.reload().is_public
self.make_addon_listed(addon)
addon.update(status=amo.STATUS_APPROVED)
assert user.reload().is_public
addon.delete()
assert not user.reload().is_public
@mock.patch('olympia.amo.tasks.sync_object_to_basket')
def test_user_field_changes_not_synced_to_basket(
self, sync_object_to_basket_mock):
user = UserProfile.objects.get(id=4043307)
# Note that basket_token is for newsletters, and is irrelevant here.
user.update(
basket_token='FOO', email='newemail@example.com', is_public=True,
read_dev_agreement=self.days_ago(42), notes='Blah',
biography='Something', auth_id=12345)
assert sync_object_to_basket_mock.delay.call_count == 0
@mock.patch('olympia.amo.tasks.sync_object_to_basket')
def test_user_field_changes_synced_to_basket(
self, sync_object_to_basket_mock):
user = UserProfile.objects.get(id=4043307)
user.update(last_login=self.days_ago(0))
assert sync_object_to_basket_mock.delay.call_count == 1
assert sync_object_to_basket_mock.delay.called_with(
'userprofile', 4043307)
sync_object_to_basket_mock.reset_mock()
user.update(display_name='Fôoo')
assert sync_object_to_basket_mock.delay.call_count == 1
assert sync_object_to_basket_mock.delay.called_with(
'userprofile', 4043307)
sync_object_to_basket_mock.reset_mock()
user.update(fxa_id='wât') # Can technically happen if admins do it.
assert sync_object_to_basket_mock.delay.call_count == 1
assert sync_object_to_basket_mock.delay.called_with(
'userprofile', 4043307)
@mock.patch('olympia.amo.tasks.sync_object_to_basket')
def test_user_deletion_synced_to_basket(
self, sync_object_to_basket_mock):
user = UserProfile.objects.get(id=4043307)
user.delete()
assert sync_object_to_basket_mock.delay.call_count == 1
assert sync_object_to_basket_mock.delay.called_with(
'userprofile', 4043307)
def test_get_lookup_field(self):
user = UserProfile.objects.get(id=55021)
lookup_field_pk = UserProfile.get_lookup_field(str(user.id))
assert lookup_field_pk == 'pk'
lookup_field_email = UserProfile.get_lookup_field(user.email)
assert lookup_field_email == 'email'
lookup_field_random_digit = UserProfile.get_lookup_field('123456')
assert lookup_field_random_digit == 'pk'
lookup_field_random_string = UserProfile.get_lookup_field('my@mail.co')
assert lookup_field_random_string == 'email'
class TestDeniedName(TestCase):
fixtures = ['users/test_backends']
def test_blocked(self):
assert DeniedName.blocked('IE6Fan')
assert DeniedName.blocked('IE6fantastic')
assert not DeniedName.blocked('IE6')
assert not DeniedName.blocked('testo')
class TestIPNetworkUserRestriction(TestCase):
def test_str(self):
obj = IPNetworkUserRestriction.objects.create(network='192.168.1.0/24')
assert str(obj) == '192.168.1.0/24'
def test_allowed_ip4_address(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = user_factory(last_login_ip='192.168.0.5')
IPNetworkUserRestriction.objects.create(network='192.168.1.0/28')
assert IPNetworkUserRestriction.allow_request(request)
request = RequestFactory(REMOTE_ADDR='10.8.0.1').get('/')
request.user = user_factory(last_login_ip='10.8.0.1')
IPNetworkUserRestriction.objects.create(network='10.8.0.0/32')
assert IPNetworkUserRestriction.allow_request(request)
def test_blocked_ip4_32_subnet(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.8').get('/')
request.user = user_factory(last_login_ip='192.168.1.1')
IPNetworkUserRestriction.objects.create(network='192.168.0.8/32')
assert not IPNetworkUserRestriction.allow_request(request)
def test_allowed_ip4_28_subnet(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.254').get('/')
request.user = user_factory(last_login_ip='192.168.1.1')
IPNetworkUserRestriction.objects.create(network='192.168.0.0/28')
assert IPNetworkUserRestriction.allow_request(request)
def test_blocked_ip4_24_subnet(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.254').get('/')
request.user = user_factory(last_login_ip='192.168.1.1')
IPNetworkUserRestriction.objects.create(network='192.168.0.0/24')
assert not IPNetworkUserRestriction.allow_request(request)
def test_blocked_ip4_address(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = user_factory(last_login_ip='192.168.1.1')
IPNetworkUserRestriction.objects.create(network='192.168.0.0/28')
assert not IPNetworkUserRestriction.allow_request(request)
request = RequestFactory(REMOTE_ADDR='10.8.0.1').get('/')
request.user = user_factory(last_login_ip='192.168.1.1')
IPNetworkUserRestriction.objects.create(network='10.8.0.0/28')
assert not IPNetworkUserRestriction.allow_request(request)
def test_ip4_address_validated(self):
with pytest.raises(forms.ValidationError) as exc_info:
IPNetworkUserRestriction(network='127.0.0.1/1218').full_clean()
assert exc_info.value.messages[0] == (
"'127.0.0.1/1218' does not appear to be an IPv4 or IPv6 network")
def test_ip6_address_validated(self):
with pytest.raises(forms.ValidationError) as exc_info:
IPNetworkUserRestriction(network='::1/1218').full_clean()
assert exc_info.value.messages[0] == (
"'::1/1218' does not appear to be an IPv4 or IPv6 network")
def test_blocked_user_login_ip(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.8').get('/')
request.user = user_factory(last_login_ip='192.168.1.1')
IPNetworkUserRestriction.objects.create(network='192.168.1.1/32')
assert not IPNetworkUserRestriction.allow_request(request)
class TestDisposableEmailDomainRestriction(TestCase):
def test_email_allowed(self):
DisposableEmailDomainRestriction.objects.create(domain='bar.com')
request = RequestFactory().get('/')
request.user = user_factory(email='bar@foo.com')
assert DisposableEmailDomainRestriction.allow_request(request)
def test_email_domain_blocked(self):
DisposableEmailDomainRestriction.objects.create(domain='bar.com')
request = RequestFactory().get('/')
request.user = user_factory(email='foo@bar.com')
assert not DisposableEmailDomainRestriction.allow_request(request)
def test_user_somehow_not_authenticated(self):
request = RequestFactory().get('/')
request.user = AnonymousUser()
assert not DisposableEmailDomainRestriction.allow_request(request)
class TestEmailUserRestriction(TestCase):
def test_str(self):
obj = EmailUserRestriction.objects.create(email_pattern='fôo@bar.com')
assert str(obj) == 'fôo@bar.com'
def test_email_allowed(self):
EmailUserRestriction.objects.create(email_pattern='foo@bar.com')
request = RequestFactory().get('/')
request.user = user_factory(email='bar@foo.com')
assert EmailUserRestriction.allow_request(request)
assert EmailUserRestriction.allow_email(request.user.email)
def test_blocked_email(self):
EmailUserRestriction.objects.create(email_pattern='foo@bar.com')
request = RequestFactory().get('/')
request.user = user_factory(email='foo@bar.com')
assert not EmailUserRestriction.allow_request(request)
assert not EmailUserRestriction.allow_email(request.user.email)
request.user.update(email='foo+something@bar.com')
assert not EmailUserRestriction.allow_request(request)
# allow_email() works, because the normalization is happening in
# allow_request() itself.
assert EmailUserRestriction.allow_email(request.user.email)
request.user.update(email='f.oo+else@bar.com')
assert not EmailUserRestriction.allow_request(request)
# allow_email() works, because the normalization is happening in
# allow_request() itself.
assert EmailUserRestriction.allow_email(request.user.email)
request.user.update(email='foo.different+something@bar.com')
assert EmailUserRestriction.allow_request(request)
assert EmailUserRestriction.allow_email(request.user.email)
def test_user_somehow_not_authenticated(self):
EmailUserRestriction.objects.create(email_pattern='foo@bar.com')
request = RequestFactory().get('/')
request.user = AnonymousUser()
assert not EmailUserRestriction.allow_request(request)
def test_blocked_subdomain(self):
EmailUserRestriction.objects.create(email_pattern='*@faz.bar.com')
request = RequestFactory().get('/')
request.user = user_factory(email='foo@faz.bar.com')
assert not EmailUserRestriction.allow_request(request)
assert not EmailUserRestriction.allow_email(request.user.email)
request.user = user_factory(email='foo@raz.bar.com')
assert EmailUserRestriction.allow_request(request)
assert EmailUserRestriction.allow_email(request.user.email)
def test_blocked_subdomain_but_allow_parent(self):
EmailUserRestriction.objects.create(email_pattern='*.mail.com')
request = RequestFactory().get('/')
request.user = user_factory(email='foo@faz.mail.com')
assert not EmailUserRestriction.allow_request(request)
assert not EmailUserRestriction.allow_email(request.user.email)
# We only block a subdomain pattern
request.user = user_factory(email='foo@mail.com')
assert EmailUserRestriction.allow_request(request)
assert EmailUserRestriction.allow_email(request.user.email)
# Which also allows similar domains to work
request.user = user_factory(email='foo@gmail.com')
assert EmailUserRestriction.allow_request(request)
assert EmailUserRestriction.allow_email(request.user.email)
def test_normalize_email_pattern_on_save(self):
eur = EmailUserRestriction.objects.create(
email_pattern='u.s.e.r@example.com'
)
assert eur.email_pattern == 'user@example.com'
@override_settings(
REPUTATION_SERVICE_URL='https://reputation.example.com',
REPUTATION_SERVICE_TOKEN='fancy_token',
REPUTATION_SERVICE_TIMEOUT=1.0)
class TestIPReputationRestriction(TestCase):
expected_url = 'https://reputation.example.com/type/ip/192.168.0.1'
restriction_class = IPReputationRestriction
@override_settings(REPUTATION_SERVICE_URL=None)
def test_allowed_reputation_service_url_not_configured(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 0
@override_settings(REPUTATION_SERVICE_TOKEN=None)
def test_allowed_reputation_service_token_not_configured(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 0
@override_settings(REPUTATION_SERVICE_TIMEOUT=None)
def test_allowed_reputation_service_timeout_not_configured(self):
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 0
def test_allowed_response_not_200(self):
responses.add(responses.GET, self.expected_url, status=404)
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 1
http_call = responses.calls[0].request
assert http_call.headers['Authorization'] == 'APIKey fancy_token'
assert http_call.url == self.expected_url
def test_allowed_reputation_threshold(self):
responses.add(
responses.GET, self.expected_url,
content_type='application/json',
json={'reputation': 100})
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 1
http_call = responses.calls[0].request
assert http_call.headers['Authorization'] == 'APIKey fancy_token'
assert http_call.url == self.expected_url
def test_blocked_reputation_threshold(self):
responses.add(
responses.GET, self.expected_url,
content_type='application/json',
json={'reputation': 45})
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert not self.restriction_class.allow_request(request)
assert len(responses.calls) == 1
http_call = responses.calls[0].request
assert http_call.headers['Authorization'] == 'APIKey fancy_token'
assert http_call.url == self.expected_url
def test_allowed_valueerror(self):
responses.add(
responses.GET, self.expected_url,
content_type='application/json',
body='garbage')
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 1
http_call = responses.calls[0].request
assert http_call.headers['Authorization'] == 'APIKey fancy_token'
assert http_call.url == self.expected_url
def test_allowed_valueerror_but_valid_json(self):
responses.add(
responses.GET, self.expected_url,
content_type='application/json',
json={'reputation': 'garbage'})
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 1
http_call = responses.calls[0].request
assert http_call.headers['Authorization'] == 'APIKey fancy_token'
assert http_call.url == self.expected_url
def test_allowed_keyerror(self):
responses.add(
responses.GET, self.expected_url,
content_type='application/json',
json={'no_reputation_oh_noes': 'garbage'})
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='foo@bar.com')
assert self.restriction_class.allow_request(request)
assert len(responses.calls) == 1
http_call = responses.calls[0].request
assert http_call.headers['Authorization'] == 'APIKey fancy_token'
assert http_call.url == self.expected_url
class TestEmailReputationRestriction(TestIPReputationRestriction):
expected_url = 'https://reputation.example.com/type/email/foo@bar.com'
restriction_class = EmailReputationRestriction
def test_blocked_reputation_threshold_email_variant(self):
responses.add(
responses.GET, self.expected_url,
content_type='application/json',
json={'reputation': 45})
request = RequestFactory(REMOTE_ADDR='192.168.0.1').get('/')
request.user = UserProfile(email='f.oo+something@bar.com')
# Still blocked as if it was foo@bar.com
assert not self.restriction_class.allow_request(request)
assert len(responses.calls) == 1
http_call = responses.calls[0].request
assert http_call.headers['Authorization'] == 'APIKey fancy_token'
assert http_call.url == self.expected_url
class TestUserEmailField(TestCase):
fixtures = ['base/user_2519']
def test_success(self):
user = UserProfile.objects.get(pk=2519)
assert UserEmailField(
queryset=UserProfile.objects.all()).clean(user.email) == user
def test_failure(self):
with pytest.raises(forms.ValidationError):
UserEmailField(
queryset=UserProfile.objects.all()).clean('xxx')
def test_empty_email(self):
UserProfile.objects.create(email='')
with pytest.raises(forms.ValidationError) as exc_info:
UserEmailField(
queryset=UserProfile.objects.all()).clean('')
assert exc_info.value.messages[0] == 'This field is required.'
class TestOnChangeName(TestCase):
def setUp(self):
super(TestOnChangeName, self).setUp()
# We're in a regular TestCase class so index_addons should have been
# mocked.
from olympia.addons.tasks import index_addons
self.index_addons_mock = index_addons
def test_changes_display_name_not_a_listed_author(self):
user = user_factory()
addon = addon_factory()
AddonUser.objects.create(user=user, addon=addon, listed=False)
self.index_addons_mock.reset_mock()
user.update(display_name=u'bâr')
assert self.index_addons_mock.delay.call_count == 0
def test_changes_display_name(self):
user = user_factory()
addon = addon_factory()
AddonUser.objects.create(user=user, addon=addon, listed=True)
self.index_addons_mock.reset_mock()
user.update(display_name=u'bâr')
assert self.index_addons_mock.delay.call_count == 1
assert self.index_addons_mock.delay.call_args[0] == ([addon.pk],)
def test_changes_username(self):
user = user_factory()
addon = addon_factory()
AddonUser.objects.create(user=user, addon=addon, listed=True)
self.index_addons_mock.reset_mock()
user.update(username=u'föo')
assert self.index_addons_mock.delay.call_count == 1
assert self.index_addons_mock.delay.call_args[0] == ([addon.pk],)
def test_changes_something_else(self):
user = user_factory()
addon = addon_factory()
AddonUser.objects.create(user=user, addon=addon, listed=True)
self.index_addons_mock.reset_mock()
user.update(last_login=self.days_ago(0))
assert self.index_addons_mock.delay.call_count == 0
def find_users(email):
"""
Given an email find all the possible users, by looking in
users and in their history.
"""
return UserProfile.objects.filter(
models.Q(email=email) | models.Q(history__email=email)).distinct()
class TestUserHistory(TestCase):
def test_user_history(self):
user = UserProfile.objects.create(email='foo@bar.com')
assert user.history.count() == 0
user.update(email='foopy@barby.com')
assert user.history.count() == 1
user.update(email='foopy@barby.com')
assert user.history.count() == 1
def test_user_find(self):
user = UserProfile.objects.create(email='luke@jedi.com')
# Checks that you can have multiple copies of the same email and
# that we only get distinct results back.
user.update(email='dark@sith.com')
user.update(email='luke@jedi.com')
user.update(email='dark@sith.com')
assert [user] == list(find_users('luke@jedi.com'))
assert [user] == list(find_users('dark@sith.com'))
def test_user_find_multiple(self):
user_1 = UserProfile.objects.create(username='user_1',
email='luke@jedi.com')
user_1.update(email='dark@sith.com')
user_2 = UserProfile.objects.create(username='user_2',
email='luke@jedi.com')
assert [user_1, user_2] == list(find_users('luke@jedi.com'))
class TestUserManager(TestCase):
fixtures = ('users/test_backends', )
def test_create_user(self):
user = UserProfile.objects.create_user("test@test.com", 'xxx')
assert user.pk is not None
def test_create_superuser(self):
user = UserProfile.objects.create_superuser(
"test",
"test@test.com",
)
assert user.pk is not None
Group.objects.get(name="Admins") in user.groups.all()
assert not user.is_staff # Not a mozilla.com email...
assert user.is_superuser
@pytest.mark.django_db
def test_get_session_auth_hash_is_used_for_session_auth():
user = user_factory()
client = amo.tests.TestClient()
assert not client.session.items()
assert client.login(email=user.email)
assert client.session.items()
request = RequestFactory().get('/')
request.session = client.session
assert get_user(request) == user
user.update(auth_id=generate_auth_id())
assert get_user(request) != user
|
from django.conf import settings
from django.conf.urls import url, include
from django.contrib.auth.views import login, logout
from social.backends.google import GooglePlusAuth
from .views import *
context = dict(
plus_id=getattr(settings, 'SOCIAL_AUTH_GOOGLE_PLUS_KEY', None),
plus_scope=' '.join(GooglePlusAuth.DEFAULT_SCOPE),
)
urlpatterns = [
url(r'^$', IndexPageView.as_view(), name='index'),
url(r'^retrieve-original/', RetrieveOriginalPageView.as_view(), name='retrieve-original'),
url(r'^retrieve-urls/', RetrieveURLsPageView.as_view(), name='retrieve-urls'),
url(r'^retrieve-details/(?P<pk>[0-9]+)/', RetrieveURLDetailsPageView.as_view(), name='retrieve-details'),
url(r'^login/', login, {'template_name': 'shorturl/login.html', 'extra_context': context}, name='login'),
url(r'^logout/', logout, {'template_name': 'shorturl/logout.html'}, name='logout'),
url(r'^social/', include('social.apps.django_app.urls', namespace='social')),
url(r'^(?P<short_url>\w+)/', OriginalURLRedirect.as_view(), name='service'),
]
|
from __future__ import division
try:
import torchTool
except ImportError:
print('pytorch not installed, related components are not available')
import math
import random
from PIL import Image, ImageOps
import PIL
try:
import accimage
except ImportError:
accimage = None
import numpyTool as np
import numbers
import types
import collections
class ToPILImage(object):
"""Convert a tensor to PIL Image.
Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
H x W x C to a PIL.Image while preserving the value range.
"""
def __call__(self, pic):
"""
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL.Image.
Returns:
PIL.Image: Image converted to PIL.Image.
"""
npimg = pic
mode = None
if isinstance(pic, torchTool.FloatTensor):
pic = pic.mul(255).byte()
if torchTool.is_tensor(pic):
npimg = np.transpose(pic.numpy(), (1, 2, 0))
assert isinstance(npimg, np.ndarray), 'pic should be Tensor or ndarray'
if npimg.shape[2] == 1:
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
mode = 'L'
if npimg.dtype == np.int16:
mode = 'I;16'
if npimg.dtype == np.int32:
mode = 'I'
elif npimg.dtype == np.float32:
mode = 'F'
else:
if npimg.dtype == np.uint8:
mode = 'RGB'
assert mode is not None, '{} is not supported'.format(npimg.dtype)
return Image.fromarray(npimg, mode=mode)
class TorchNormalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
std (sequence): Sequence of standard deviations for R, G, B channels
respecitvely.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m, s in zip(tensor, self.mean, self.std):
t.sub_(m).div_(s)
return tensor
class ToTensor(object):
"""Convert a ``PIL.Image`` or ``numpy.ndarray`` to tensor.
Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL.Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
"""
if isinstance(pic, np.ndarray):
# handle numpy array
img = torchTool.from_numpy(pic.transpose((2, 0, 1)))
# backward compatibility
return img.float().div(255)
if accimage is not None and isinstance(pic, accimage.Image):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torchTool.from_numpy(nppic)
# handle PIL Image
if pic.mode == 'I':
img = torchTool.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torchTool.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torchTool.ByteTensor(torchTool.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torchTool.ByteTensor):
return img.float().div(255)
else:
return img
|
import time
from datetime import datetime, timedelta
from django.db import connections
from django.db.models import Avg, F, Q, Sum
import multidb
import waffle
from celery import group
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon, FrozenAddon
from olympia.addons.tasks import (
update_addon_average_daily_users as _update_addon_average_daily_users,
update_addon_download_totals as _update_addon_download_totals,
update_appsupport)
from olympia.amo.decorators import use_primary_db
from olympia.amo.utils import chunked
from olympia.files.models import File
from olympia.lib.es.utils import raise_if_reindex_in_progress
from olympia.stats.models import UpdateCount
log = olympia.core.logger.getLogger('z.cron')
task_log = olympia.core.logger.getLogger('z.task')
def update_addon_average_daily_users():
"""Update add-ons ADU totals."""
if not waffle.switch_is_active('local-statistics-processing'):
return False
raise_if_reindex_in_progress('amo')
cursor = connections[multidb.get_replica()].cursor()
q = """SELECT addon_id, AVG(`count`)
FROM update_counts
WHERE `date` > DATE_SUB(CURDATE(), INTERVAL 13 DAY)
GROUP BY addon_id
ORDER BY addon_id"""
cursor.execute(q)
d = cursor.fetchall()
cursor.close()
ts = [_update_addon_average_daily_users.subtask(args=[chunk])
for chunk in chunked(d, 250)]
group(ts).apply_async()
def update_addon_download_totals():
"""Update add-on total and average downloads."""
if not waffle.switch_is_active('local-statistics-processing'):
return False
qs = (
Addon.objects
.annotate(sum_download_count=Sum('downloadcount__count'))
.values_list('id', 'sum_download_count')
.order_by('id')
)
ts = [_update_addon_download_totals.subtask(args=[chunk])
for chunk in chunked(qs, 250)]
group(ts).apply_async()
def _change_last_updated(next):
# We jump through some hoops here to make sure we only change the add-ons
# that really need it, and to invalidate properly.
current = dict(Addon.objects.values_list('id', 'last_updated'))
changes = {}
for addon, last_updated in next.items():
try:
if current[addon] != last_updated:
changes[addon] = last_updated
except KeyError:
pass
if not changes:
return
log.debug('Updating %s add-ons' % len(changes))
# Update + invalidate.
qs = Addon.objects.filter(id__in=changes).no_transforms()
for addon in qs:
addon.update(last_updated=changes[addon.id])
@use_primary_db
def addon_last_updated():
next = {}
for q in Addon._last_updated_queries().values():
for addon, last_updated in q.values_list('id', 'last_updated'):
next[addon] = last_updated
_change_last_updated(next)
# Get anything that didn't match above.
other = (Addon.objects.filter(last_updated__isnull=True)
.values_list('id', 'created'))
_change_last_updated(dict(other))
def update_addon_appsupport():
# Find all the add-ons that need their app support details updated.
newish = (Q(last_updated__gte=F('appsupport__created')) |
Q(appsupport__created__isnull=True))
# Search providers don't list supported apps.
has_app = Q(versions__apps__isnull=False) | Q(type=amo.ADDON_SEARCH)
has_file = Q(versions__files__status__in=amo.VALID_FILE_STATUSES)
good = Q(has_app, has_file)
ids = (Addon.objects.valid().distinct()
.filter(newish, good).values_list('id', flat=True))
task_log.info('Updating appsupport for %d new-ish addons.' % len(ids))
ts = [update_appsupport.subtask(args=[chunk])
for chunk in chunked(ids, 20)]
group(ts).apply_async()
def hide_disabled_files():
"""
Move files (on filesystem) belonging to disabled files (in database) to the
correct place if necessary, so they they are not publicly accessible
any more.
See also unhide_disabled_files().
"""
ids = (File.objects.filter(
Q(version__addon__status=amo.STATUS_DISABLED) |
Q(version__addon__disabled_by_user=True) |
Q(status=amo.STATUS_DISABLED)).values_list('id', flat=True))
for chunk in chunked(ids, 300):
qs = File.objects.select_related('version').filter(id__in=chunk)
for file_ in qs:
# This tries to move the file to the disabled location. If it
# didn't exist at the source, it will catch the exception, log it
# and continue.
file_.hide_disabled_file()
def unhide_disabled_files():
"""
Move files (on filesystem) belonging to public files (in database) to the
correct place if necessary, so they they publicly accessible.
See also hide_disabled_files().
"""
ids = (File.objects.exclude(
Q(version__addon__status=amo.STATUS_DISABLED) |
Q(version__addon__disabled_by_user=True) |
Q(status=amo.STATUS_DISABLED)).values_list('id', flat=True))
for chunk in chunked(ids, 300):
qs = File.objects.select_related('version').filter(id__in=chunk)
for file_ in qs:
# This tries to move the file to the public location. If it
# didn't exist at the source, it will catch the exception, log it
# and continue.
file_.unhide_disabled_file()
def deliver_hotness():
"""
Calculate hotness of all add-ons.
a = avg(users this week)
b = avg(users three weeks before this week)
hotness = (a-b) / b if a > 1000 and b > 1 else 0
"""
frozen = set(f.id for f in FrozenAddon.objects.all())
all_ids = list((Addon.objects.filter(status__in=amo.REVIEWED_STATUSES)
.values_list('id', flat=True)))
now = datetime.now()
one_week = now - timedelta(days=7)
four_weeks = now - timedelta(days=28)
for ids in chunked(all_ids, 300):
addons = Addon.objects.filter(id__in=ids).no_transforms()
ids = [a.id for a in addons if a.id not in frozen]
qs = (UpdateCount.objects.filter(addon__in=ids)
.values_list('addon').annotate(Avg('count')))
thisweek = dict(qs.filter(date__gte=one_week))
threeweek = dict(qs.filter(date__range=(four_weeks, one_week)))
for addon in addons:
this, three = thisweek.get(addon.id, 0), threeweek.get(addon.id, 0)
# Update the hotness score but only update hotness if necessary.
# We don't want to cause unnecessary re-indexes
if this > 1000 and three > 1:
hotness = (this - three) / float(three)
if addon.hotness != hotness:
addon.update(hotness=(this - three) / float(three))
else:
if addon.hotness != 0:
addon.update(hotness=0)
# Let the database catch its breath.
time.sleep(10)
|
"""
.. module:: burp1
:platform: Unix
:synopsis: Burp-UI burp1 backend module.
.. moduleauthor:: Ziirish <ziirish@ziirish.info>
"""
import re
import os
import socket
import time
import json
import datetime
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import shutil
import subprocess
import tempfile
import codecs
from pipes import quote
from urllib import unquote
from burpui.misc.utils import human_readable as _hr, BUIlogging, BUIcompress
from burpui.misc.backend.interface import BUIbackend, BUIserverException
from burpui.misc.parser.burp1 import Parser
g_burpport = '4972'
g_burphost = '::1'
g_burpbin = u'/usr/sbin/burp'
g_stripbin = u'/usr/sbin/vss_strip'
g_burpconfcli = None
g_burpconfsrv = u'/etc/burp/burp-server.conf'
g_tmpdir = u'/tmp/bui'
class Burp(BUIbackend, BUIlogging):
"""
The :class:`burpui.misc.backend.Burp1` class provides a consistent
backend for ``burp-1`` servers.
"""
states = {
'i': 'idle',
'r': 'running',
'c': 'client crashed',
'C': 'server crashed',
'1': 'scanning',
'2': 'backup',
'3': 'merging',
'4': 'shuffling',
'7': 'listing',
'8': 'restoring',
'9': 'verifying',
'0': 'deleting'
}
counters = [
'phase',
'Total',
'Files',
'Files (encrypted)',
'Metadata',
'Metadata (enc)',
'Directories',
'Softlink',
'Hardlink',
'Special files',
'VSS header',
'VSS header (enc)',
'VSS footer',
'VSS footer (enc)',
'Grand total',
'warning',
'estimated_bytes',
'bytes',
'bytes_in',
'bytes_out',
'start',
'path'
]
def __init__(self, server=None, conf=None, dummy=False):
"""
The :class:`burpui.misc.backend.Burp1` class provides a consistent
backend for ``burp-1`` servers.
:param server: ``Burp-UI`` server instance in order to access logger and/or some global settings
:type server: :class:`burpui.server.BUIServer`
:param conf: Configuration file to use
:type conf: str
:param dummy: Does not instanciate the object (used for development purpose)
:type dummy: boolean
"""
global g_burpport, g_burphost, g_burpbin, g_stripbin, g_burpconfcli, g_burpconfsrv, g_tmpdir
if dummy:
return
self.app = None
self.logger = None
self.acl_handler = False
if server:
if hasattr(server, 'app'):
self.app = server.app
self.set_logger(self.app.logger)
self.acl_handler = server.acl_handler
self.host = g_burphost
self.port = int(g_burpport)
self.burpbin = g_burpbin
self.stripbin = g_stripbin
self.burpconfcli = g_burpconfcli
self.burpconfsrv = g_burpconfsrv
self.tmpdir = g_tmpdir
self.running = []
self.defaults = {'bport': g_burpport, 'bhost': g_burphost, 'burpbin': g_burpbin, 'stripbin': g_stripbin, 'bconfcli': g_burpconfcli, 'bconfsrv': g_burpconfsrv, 'tmpdir': g_tmpdir}
if conf:
config = ConfigParser.ConfigParser(self.defaults)
with codecs.open(conf, 'r', 'utf-8') as fp:
config.readfp(fp)
self.port = self._safe_config_get(config.getint, 'bport', cast=int)
self.host = self._safe_config_get(config.get, 'bhost')
bbin = self._safe_config_get(config.get, 'burpbin')
strip = self._safe_config_get(config.get, 'stripbin')
confcli = self._safe_config_get(config.get, 'bconfcli')
confsrv = self._safe_config_get(config.get, 'bconfsrv')
tmpdir = self._safe_config_get(config.get, 'tmpdir')
if tmpdir and os.path.exists(tmpdir) and not os.path.isdir(tmpdir):
self._logger('warning', "'%s' is not a directory", tmpdir)
tmpdir = g_tmpdir
if confcli and not os.path.isfile(confcli):
self._logger('warning', "The file '%s' does not exist", confcli)
confcli = None
if confsrv and not os.path.isfile(confsrv):
self._logger('warning', "The file '%s' does not exist", confsrv)
confsrv = None
if self.host not in ['127.0.0.1', '::1']:
self._logger('warning', "Invalid value for 'bhost'. Must be '127.0.0.1' or '::1'. Falling back to '%s'", g_burphost)
self.host = g_burphost
if strip and not strip.startswith('/'):
self._logger('warning', "Please provide an absolute path for the 'stripbin' option. Fallback to '%s'", g_stripbin)
strip = g_stripbin
elif strip and not re.match('^\S+$', strip):
self._logger('warning', "Incorrect value for the 'stripbin' option. Fallback to '%s'", g_stripbin)
strip = g_stripbin
elif strip and (not os.path.isfile(strip) or not os.access(strip, os.X_OK)):
self._logger('warning', "'%s' does not exist or is not executable. Fallback to '%s'", strip, g_stripbin)
strip = g_stripbin
if strip and (not os.path.isfile(strip) or not os.access(strip, os.X_OK)):
self._logger('error', "Ooops, '%s' not found or is not executable", strip)
strip = None
if bbin and not bbin.startswith('/'):
self._logger('warning', "Please provide an absolute path for the 'burpbin' option. Fallback to '%s'", g_burpbin)
bbin = g_burpbin
elif bbin and not re.match('^\S+$', bbin):
self._logger('warning', "Incorrect value for the 'burpbin' option. Fallback to '%s'", g_burpbin)
bbin = g_burpbin
elif bbin and (not os.path.isfile(bbin) or not os.access(bbin, os.X_OK)):
self._logger('warning', "'%s' does not exist or is not executable. Fallback to '%s'", bbin, g_burpbin)
bbin = g_burpbin
if bbin and (not os.path.isfile(bbin) or not os.access(bbin, os.X_OK)):
self._logger('error', "Ooops, '%s' not found or is not executable", bbin)
bbin = None
self.burpbin = bbin
self.stripbin = strip
self.burpconfcli = confcli
self.burpconfsrv = confsrv
self.tmpdir = tmpdir
self.parser = Parser(self.app, self.burpconfsrv)
self.family = self._get_inet_family(self.host)
self._test_burp_server_address(self.host)
self._logger('info', 'burp port: %d', self.port)
self._logger('info', 'burp host: %s', self.host)
self._logger('info', 'burp binary: %s', self.burpbin)
self._logger('info', 'strip binary: %s', self.stripbin)
self._logger('info', 'burp conf cli: %s', self.burpconfcli)
self._logger('info', 'burp conf srv: %s', self.burpconfsrv)
self._logger('info', 'tmpdir: %s', self.tmpdir)
"""
Utilities functions
"""
def _safe_config_get(self, callback, key, sect='Burp1', cast=None):
"""
:func:`burpui.misc.backend.burp1._safe_config_get` is a wrapper to handle
Exceptions throwed by :mod:`ConfigParser`.
:param callback: Function to wrap
:type callback: callable
:param key: Key to retrieve
:type key: str
:param sect: Section of the config file to read
:type sect: str
:param cast: Cast the returned value if provided
:type case: callable
:returns: The value returned by the `callback`
"""
try:
return callback(sect, key)
except ConfigParser.NoOptionError as e:
self._logger('error', str(e))
except ConfigParser.NoSectionError as e:
self._logger('warning', str(e))
if key in self.defaults:
if cast:
return cast(self.defaults[key])
return self.defaults[key]
return None
def _get_inet_family(self, addr):
"""
The :func:`burpui.misc.backend.Burp1._get_inet_family` function
determines the inet family of a given address.
:param addr: Address to look at
:type addr: str
:returns: Inet family of the given address: :const:`socket.AF_INET` of :const:`socket.AF_INET6`
"""
if addr == '127.0.0.1':
return socket.AF_INET
else:
return socket.AF_INET6
def _test_burp_server_address(self, addr, retry=False):
"""
The :func:`burpui.misc.backend.Burp1._test_burp_server_address` function
determines if the given address is reachable or not.
:param addr: Address to look at
:type addr: str
:param retry: Flag to stop trying because this function is recursive
:type retry: bool
:returns: True or False wether we could find a valid address or not
"""
family = self._get_inet_family(addr)
try:
s = socket.socket(family, socket.SOCK_STREAM)
s.connect((addr, self.port))
s.close()
return True
except socket.error:
self._logger('warning', 'Cannot contact burp server at %s:%s', addr, self.port)
if not retry:
new_addr = ''
if self.host == '127.0.0.1':
new_addr = '::1'
else:
new_addr = '127.0.0.1'
self._logger('info', 'Trying %s:%s instead', new_addr, self.port)
if self._test_burp_server_address(new_addr, True):
self._logger('info', '%s:%s is reachable, switching to it for this runtime', new_addr, self.port)
self.host = new_addr
self.family = self._get_inet_family(new_addr)
return True
self._logger('error', 'Cannot guess burp server address')
return False
def status(self, query='\n', agent=None):
"""
Send queries to the Burp server
:param query: Query to send to the server
:type query: str
:param agent: What server to ask (only in multi-agent mode)
:type agent: str
:returns: The output returned by the server parsed as an array
"""
r = []
try:
if not query.endswith('\n'):
q = '{0}\n'.format(query)
else:
q = query
s = socket.socket(self.family, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.send(q)
s.shutdown(socket.SHUT_WR)
f = s.makefile()
s.close()
for l in f.readlines():
line = l.rstrip('\n')
if not line:
continue
ap = ''
try:
ap = line.decode('utf-8', 'replace')
except UnicodeDecodeError:
ap = line
r.append(ap)
f.close()
return r
except socket.error:
self._logger('error', 'Cannot contact burp server at %s:%s', self.host, self.port)
raise BUIserverException('Cannot contact burp server at {0}:{1}'.format(self.host, self.port))
def get_backup_logs(self, number, client, forward=False, agent=None):
"""
The :func:`burpui.misc.backend.Burp1.get_backup_logs` function is used
to retrieve the burp logs based on the :func:`_parse_backup_stats` or
:func:`_parse_backup_log` functions depending the burp-server version.
:param number: Backup number to work on
:type number: int
:param client: Client name to work on
:type client: str
:param forward: Is the client name needed in later process
:type forward: bool
:param agent: What server to ask (only in multi-agent mode)
:type agent: str
:returns: Dict containing the backup log
"""
if not client or not number:
return {}
f = self.status('c:{0}:b:{1}\n'.format(client, number))
found = False
ret = {}
for line in f:
if line == 'backup_stats':
found = True
break
if not found:
cl = None
if forward:
cl = client
f = self.status('c:{0}:b:{1}:f:log.gz\n'.format(client, number))
ret = self._parse_backup_log(f, number, cl)
else:
ret = self._parse_backup_stats(number, client, forward)
ret['encrypted'] = False
if 'files_enc' in ret and ret['files_enc']['total'] > 0:
ret['encrypted'] = True
return ret
def _parse_backup_stats(self, number, client, forward=False, agent=None):
"""
The :func:`burpui.misc.backend.Burp1._parse_backup_stats` function is
used to parse the burp logs.
:param number: Backup number to work on
:type number: int
:param client: Client name to work on
:type client: str
:param forward: Is the client name needed in later process
:type forward: bool
:param agent: What server to ask (only in multi-agent mode)
:type agent: str
:returns: Dict containing the backup log
"""
backup = {'windows': 'unknown', 'number': int(number)}
if forward:
backup['name'] = client
keys = {
'time_start': 'start',
'time_end': 'end',
'time_taken': 'duration',
'bytes_in_backup': 'totsize',
'bytes_received': 'received',
'files': ['files', 'new'],
'files_changed': ['files', 'changed'],
'files_same': ['files', 'unchanged'],
'files_deleted': ['files', 'deleted'],
'files_scanned': ['files', 'scanned'],
'files_total': ['files', 'total'],
'files_encrypted': ['files_enc', 'new'],
'files_encrypted_changed': ['files_enc', 'changed'],
'files_encrypted_same': ['files_enc', 'unchanged'],
'files_encrypted_deleted': ['files_enc', 'deleted'],
'files_encrypted_scanned': ['files_enc', 'scanned'],
'files_encrypted_total': ['files_enc', 'total'],
'directories': ['dir', 'new'],
'directories_changed': ['dir', 'changed'],
'directories_same': ['dir', 'unchanged'],
'directories_deleted': ['dir', 'deleted'],
'directories_scanned': ['dir', 'scanned'],
'directories_total': ['dir', 'total'],
'soft_links': ['softlink', 'new'],
'soft_links_changed': ['softlink', 'changed'],
'soft_links_same': ['softlink', 'unchanged'],
'soft_links_deleted': ['softlink', 'deleted'],
'soft_links_scanned': ['softlink', 'scanned'],
'soft_links_total': ['softlink', 'total'],
'hard_links': ['hardlink', 'new'],
'hard_links_changed': ['hardlink', 'changed'],
'hard_links_same': ['hardlink', 'unchanged'],
'hard_links_deleted': ['hardlink', 'deleted'],
'hard_links_scanned': ['hardlink', 'scanned'],
'hard_links_total': ['hardlink', 'total'],
'meta_data': ['meta', 'new'],
'meta_data_changed': ['meta', 'changed'],
'meta_data_same': ['meta', 'unchanged'],
'meta_data_deleted': ['meta', 'deleted'],
'meta_data_scanned': ['meta', 'scanned'],
'meta_data_total': ['meta', 'total'],
'meta_data_encrypted': ['meta_enc', 'new'],
'meta_data_encrypted_changed': ['meta_enc', 'changed'],
'meta_data_encrypted_same': ['meta_enc', 'unchanged'],
'meta_data_encrypted_deleted': ['meta_enc', 'deleted'],
'meta_data_encrypted_scanned': ['meta_enc', 'scanned'],
'meta_data_encrypted_total': ['meta_enc', 'total'],
'special_files': ['special', 'new'],
'special_files_changed': ['special', 'changed'],
'special_files_same': ['special', 'unchanged'],
'special_files_deleted': ['special', 'deleted'],
'special_files_scanned': ['special', 'scanned'],
'special_files_total': ['special', 'total'],
'efs_files': ['efs', 'new'],
'efs_files_changed': ['efs', 'changed'],
'efs_files_same': ['efs', 'unchanged'],
'efs_files_deleted': ['efs', 'deleted'],
'efs_files_scanned': ['efs', 'scanned'],
'efs_files_total': ['efs', 'total'],
'vss_headers': ['vssheader', 'new'],
'vss_headers_changed': ['vssheader', 'changed'],
'vss_headers_same': ['vssheader', 'unchanged'],
'vss_headers_deleted': ['vssheader', 'deleted'],
'vss_headers_scanned': ['vssheader', 'scanned'],
'vss_headers_total': ['vssheader', 'total'],
'vss_headers_encrypted': ['vssheader_enc', 'new'],
'vss_headers_encrypted_changed': ['vssheader_enc', 'changed'],
'vss_headers_encrypted_same': ['vssheader_enc', 'unchanged'],
'vss_headers_encrypted_deleted': ['vssheader_enc', 'deleted'],
'vss_headers_encrypted_scanned': ['vssheader_enc', 'scanned'],
'vss_headers_encrypted_total': ['vssheader_enc', 'total'],
'vss_footers': ['vssfooter', 'new'],
'vss_footers_changed': ['vssfooter', 'changed'],
'vss_footers_same': ['vssfooter', 'unchanged'],
'vss_footers_deleted': ['vssfooter', 'deleted'],
'vss_footers_scanned': ['vssfooter', 'scanned'],
'vss_footers_total': ['vssfooter', 'total'],
'vss_footers_encrypted': ['vssfooter_enc', 'new'],
'vss_footers_encrypted_changed': ['vssfooter_enc', 'changed'],
'vss_footers_encrypted_same': ['vssfooter_enc', 'unchanged'],
'vss_footers_encrypted_deleted': ['vssfooter_enc', 'deleted'],
'vss_footers_encrypted_scanned': ['vssfooter_enc', 'scanned'],
'vss_footers_encrypted_total': ['vssfooter_enc', 'total'],
'total': ['total', 'new'],
'total_changed': ['total', 'changed'],
'total_same': ['total', 'unchanged'],
'total_deleted': ['total', 'deleted'],
'total_scanned': ['total', 'scanned'],
'total_total': ['total', 'total']
}
f = self.status('c:{0}:b:{1}:f:backup_stats\n'.format(client, number), agent=agent)
for line in f:
if line == '-list begin-' or line == '-list end-':
continue
(key, val) = line.split(':')
if backup['windows'] == 'unknown' and key == 'client_is_windows':
if val == '1':
backup['windows'] = 'true'
else:
backup['windows'] = 'false'
continue
if key not in keys:
continue
rk = keys[key]
if isinstance(rk, list):
if not rk[0] in backup:
backup[rk[0]] = {}
backup[rk[0]][rk[1]] = int(val)
else:
backup[rk] = int(val)
return backup
def _parse_backup_log(self, fh, number, client=None, agent=None):
"""
The :func:`burpui.misc.backend.Burp1._parse_backup_log` function is
used to parse the log.gz of a given backup and returns a dict
containing different stats used to render the charts in the reporting
view.
:param fh: List representing the content of the log file
:type fh: list
:param number: Backup number to work on
:type number: int
:param client: Client name to work on
:type client: str
:param agent: What server to ask (only in multi-agent mode)
:type agent: str
:returns: Dict containing the backup log
"""
lookup_easy = {
'start': '^Start time: (.+)$',
'end': '^\s*End time: (.+)$',
'duration': '^Time taken: (.+)$',
'totsize': '^\s*Bytes in backup:\s+(\d+)',
'received': '^\s*Bytes received:\s+(\d+)'
}
lookup_complex = {
'files': '^\s*Files:?\s+(.+)\s+\|\s+(\d+)$',
'files_enc': '^\s*Files \(encrypted\):?\s+(.+)\s+\|\s+(\d+)$',
'dir': '^\s*Directories:?\s+(.+)\s+\|\s+(\d+)$',
'softlink': '^\s*Soft links:?\s+(.+)\s+\|\s+(\d+)$',
'hardlink': '^\s*Hard links:?\s+(.+)\s+\|\s+(\d+)$',
'meta': '^\s*Meta data:?\s+(.+)\s+\|\s+(\d+)$',
'meta_enc': '^\s*Meta data\(enc\):?\s+(.+)\s+\|\s+(\d+)$',
'special': '^\s*Special files:?\s+(.+)\s+\|\s+(\d+)$',
'efs': '^\s*EFS files:?\s+(.+)\s+\|\s+(\d+)$',
'vssheader': '^\s*VSS headers:?\s+(.+)\s+\|\s+(\d+)$',
'vssheader_enc': '^\s*VSS headers \(enc\):?\s+(.+)\s+\|\s+(\d+)$',
'vssfooter': '^\s*VSS footers:?\s+(.+)\s+\|\s+(\d+)$',
'vssfooter_enc': '^\s*VSS footers \(enc\):?\s+(.+)\s+\|\s+(\d+)$',
'total': '^\s*Grand total:?\s+(.+)\s+\|\s+(\d+)$'
}
backup = {'windows': 'false', 'number': int(number)}
if client is not None:
backup['name'] = client
useful = False
for line in fh:
if re.match('^\d{4}-\d{2}-\d{2} (\d{2}:){3} \w+\[\d+\] Client is Windows$', line):
backup['windows'] = 'true'
elif not useful and not re.match('^-+$', line):
continue
elif useful and re.match('^-+$', line):
useful = False
continue
elif re.match('^-+$', line):
useful = True
continue
found = False
# this method is not optimal, but it is easy to read and to maintain
for key, regex in lookup_easy.iteritems():
r = re.search(regex, line)
if r:
found = True
if key in ['start', 'end']:
backup[key] = int(time.mktime(datetime.datetime.strptime(r.group(1), '%Y-%m-%d %H:%M:%S').timetuple()))
elif key == 'duration':
tmp = r.group(1).split(':')
tmp.reverse()
i = 0
fields = [0] * 4
for v in tmp:
fields[i] = int(v)
i += 1
seconds = 0
seconds += fields[0]
seconds += fields[1] * 60
seconds += fields[2] * (60 * 60)
seconds += fields[3] * (60 * 60 * 24)
backup[key] = seconds
else:
backup[key] = int(r.group(1))
# break the loop as soon as we find a match
break
# if found is True, we already parsed the line so we can jump to the next one
if found:
continue
for key, regex in lookup_complex.iteritems():
r = re.search(regex, line)
if r:
# self._logger('debug', "match[1]: '{0}'".format(r.group(1)))
sp = re.split('\s+', r.group(1))
if len(sp) < 5:
return {}
backup[key] = {
'new': int(sp[0]),
'changed': int(sp[1]),
'unchanged': int(sp[2]),
'deleted': int(sp[3]),
'total': int(sp[4]),
'scanned': int(r.group(2))
}
break
return backup
def get_clients_report(self, clients, agent=None):
"""
get_clients_report returns the computed/compacted data to display clients
report.
It returns an array containing two dicts
"""
ret = []
cl = []
ba = []
for c in clients:
client = self.get_client(c['name'])
if not client:
continue
stats = self.get_backup_logs(client[-1]['number'], c['name'])
cl.append({'name': c['name'], 'stats': {'windows': stats['windows'], 'totsize': stats['totsize'], 'total': stats['total']['total']}})
ba.append({'name': c['name'], 'number': len(client)})
ret.append({'clients': cl, 'backups': ba})
return ret
def get_counters(self, name=None, agent=None):
"""
get_counters parses the stats of the live status for a given client and
returns a dict
"""
r = {}
if agent:
if not name or name not in self.running[agent]:
return r
else:
if not name or name not in self.running:
return r
f = self.status('c:{0}\n'.format(name))
if not f:
return r
for line in f:
# self._logger('debug', 'line: {0}'.format(line))
rs = re.search('^{0}\s+(\d)\s+(\S)\s+(.+)$'.format(name), line)
if rs and rs.group(2) == 'r' and int(rs.group(1)) == 2:
c = 0
for v in rs.group(3).split('\t'):
# self._logger('debug', '{0}: {1}'.format(self.counters[c], v))
if c > 0 and c < 15:
val = map(int, v.split('/'))
if val[0] > 0 or val[1] > 0 or val[2] or val[3] > 0:
r[self.counters[c]] = val
else:
if 'path' == self.counters[c]:
r[self.counters[c]] = v
else:
try:
r[self.counters[c]] = int(v)
except ValueError:
continue
c += 1
if r.viewkeys() & {'start', 'estimated_bytes', 'bytes_in'}:
try:
diff = time.time() - int(r['start'])
byteswant = int(r['estimated_bytes'])
bytesgot = int(r['bytes_in'])
bytespersec = bytesgot / diff
bytesleft = byteswant - bytesgot
r['speed'] = bytespersec
if (bytespersec > 0):
timeleft = int(bytesleft / bytespersec)
r['timeleft'] = timeleft
else:
r['timeleft'] = -1
except:
r['timeleft'] = -1
return r
def is_backup_running(self, name=None, agent=None):
"""
is_backup_running returns True if the given client is currently running a
backup
"""
if not name:
return False
try:
f = self.status('c:{0}\n'.format(name))
except BUIserverException:
return False
for line in f:
r = re.search('^{0}\s+\d\s+(\w)'.format(name), line)
if r and r.group(1) not in ['i', 'c', 'C']:
return True
return False
def is_one_backup_running(self, agent=None):
"""
is_one_backup_running returns a list of clients name that are currently
running a backup
"""
r = []
try:
cls = self.get_all_clients()
except BUIserverException:
return r
for c in cls:
if self.is_backup_running(c['name']):
r.append(c['name'])
self.running = r
return r
def get_all_clients(self, agent=None):
"""
get_all_clients returns a list of dict representing each clients with their
name, state and last backup date
"""
j = []
f = self.status()
for line in f:
regex = re.compile('\s*(\S+)\s+\d\s+(\S)\s+(.+)')
m = regex.search(line)
c = {}
c['name'] = m.group(1)
c['state'] = self.states[m.group(2)]
infos = m.group(3)
if c['state'] in ['running']:
c['last'] = 'now'
elif infos == "0":
c['last'] = 'never'
elif re.match('^\d+\s\d+\s\d+$', infos):
sp = infos.split()
c['last'] = datetime.datetime.fromtimestamp(int(sp[2])).strftime('%Y-%m-%d %H:%M:%S')
else:
sp = infos.split('\t')
c['last'] = datetime.datetime.fromtimestamp(int(sp[len(sp) - 2])).strftime('%Y-%m-%d %H:%M:%S')
j.append(c)
return j
def get_client(self, name=None, agent=None):
"""
get_client returns a list of dict representing the backups (with its number
and date) of a given client
"""
r = []
if not name:
return r
c = name
f = self.status('c:{0}\n'.format(c))
for line in f:
if not re.match('^{0}\t'.format(c), line):
continue
# self._logger('debug', "line: '{0}'".format(line))
regex = re.compile('\s*(\S+)\s+\d\s+(\S)\s+(.+)')
m = regex.search(line)
if m.group(3) == "0" or m.group(2) not in ['i', 'c', 'C']:
continue
backups = m.group(3).split('\t')
for b in backups:
ba = {}
sp = b.split()
ba['number'] = sp[0]
ba['deletable'] = (sp[1] == '1')
ba['date'] = datetime.datetime.fromtimestamp(int(sp[2])).strftime('%Y-%m-%d %H:%M:%S')
log = self.get_backup_logs(sp[0], name)
ba['encrypted'] = log['encrypted']
r.append(ba)
# Here we need to reverse the array so the backups are sorted by date ASC
r.reverse()
return r
def get_tree(self, name=None, backup=None, root=None, agent=None):
"""
get_tree returns a list of dict representing files/dir (with their attr)
within a given path
"""
r = []
if not name or not backup:
return r
if not root:
top = ''
else:
try:
top = root.decode('utf-8', 'replace')
except UnicodeDecodeError:
top = root
f = self.status('c:{0}:b:{1}:p:{2}\n'.format(name, backup, top))
useful = False
for line in f:
if not useful and re.match('^-list begin-$', line):
useful = True
continue
if useful and re.match('^-list end-$', line):
useful = False
continue
if useful:
t = {}
m = re.search('^(.{10})\s', line)
if m:
if re.match('^(d|l)', m.group(1)):
t['type'] = 'd'
else:
t['type'] = 'f'
sp = re.split('\s+', line, 7)
t['mode'] = sp[0]
t['inodes'] = sp[1]
t['uid'] = sp[2]
t['gid'] = sp[3]
t['size'] = '{0:.1eM}'.format(_hr(sp[4]))
t['date'] = '{0} {1}'.format(sp[5], sp[6])
t['name'] = sp[7]
t['parent'] = top
r.append(t)
return r
def restore_files(self, name=None, backup=None, files=None, strip=None, archive='zip', password=None, agent=None):
if not name or not backup or not files:
return None, 'At least one argument is missing'
if not self.stripbin:
return None, 'Missing \'strip\' binary'
if not self.burpbin:
return None, 'Missing \'burp\' binary'
flist = json.loads(files)
if password:
fh, tmpfile = tempfile.mkstemp()
tmpdir = tempfile.mkdtemp(prefix=self.tmpdir)
if 'restore' not in flist:
return None, 'Wrong call'
if os.path.isdir(tmpdir):
shutil.rmtree(tmpdir)
full_reg = u''
for r in flist['restore']:
reg = u''
if r['folder'] and r['key'] != '/':
reg += '^' + re.escape(r['key']) + '/|'
else:
reg += '^' + re.escape(r['key']) + '$|'
full_reg += reg
cmd = [self.burpbin, '-C', quote(name), '-a', 'r', '-b', quote(str(backup)), '-r', full_reg.rstrip('|'), '-d', tmpdir]
if password:
if not self.burpconfcli:
return None, 'No client configuration file specified'
content = []
fdh = os.fdopen(fh, 'w+')
with open(self.burpconfcli) as f:
shutil.copyfileobj(f, fdh)
fdh.write('encryption_password = {}\n'.format(password))
fdh.close()
cmd.append('-c')
cmd.append(tmpfile)
elif self.burpconfcli:
cmd.append('-c')
cmd.append(self.burpconfcli)
if strip and strip.isdigit() and int(strip) > 0:
cmd.append('-s')
cmd.append(strip)
self._logger('debug', cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
status = p.wait()
out, err = p.communicate()
if password:
os.remove(tmpfile)
self._logger('debug', out)
self._logger('debug', 'command returned: %d', status)
# hack to handle client-side encrypted backups
# this is now handled client-side, but we should never trust user input
# so we need to handle it server-side too
if 'zstrm inflate error: -3' in out and 'transfer file returning: -1' in out:
status = 1
out = 'encrypted'
# a return code of 2 means there were some warnings during restoration
# so we can assume the restoration was successful anyway
if status not in [0, 2]:
# out, err = p.communicate()
return None, out
zip_dir = tmpdir.rstrip(os.sep)
zip_file = zip_dir + '.zip'
if os.path.isfile(zip_file):
os.remove(zip_file)
zip_len = len(zip_dir) + 1
stripping = True
test_strip = True
with BUIcompress(zip_file, archive) as zf:
for dirname, subdirs, files in os.walk(zip_dir):
for filename in files:
path = os.path.join(dirname, filename)
# try to detect if the file contains vss headers
if test_strip:
test_strip = False
otp = None
try:
otp = subprocess.check_output([self.stripbin, '-p', '-i', path])
except subprocess.CalledProcessError as e:
self._logger('debug', "Stripping failed on '{}': {}".format(path, str(e)))
if not otp:
stripping = False
if stripping and os.path.isfile(path):
self._logger('debug', "stripping file: %s", path)
shutil.move(path, path + '.tmp')
status = subprocess.call([self.stripbin, '-i', path + '.tmp', '-o', path])
if status != 0:
os.remove(path)
shutil.move(path + '.tmp', path)
stripping = False
self._logger('debug', "Disable stripping since this file does not seem to embed VSS headers")
else:
os.remove(path + '.tmp')
entry = path[zip_len:]
zf.append(path, entry)
shutil.rmtree(tmpdir)
return zip_file, None
def read_conf_cli(self, client=None, conf=None, agent=None):
if not self.parser:
return []
return self.parser.read_client_conf(client, conf)
def read_conf_srv(self, conf=None, agent=None):
if not self.parser:
return []
return self.parser.read_server_conf(conf)
def store_conf_cli(self, data, client=None, conf=None, agent=None):
if not self.parser:
return []
try:
conf = unquote(conf)
except:
pass
return self.parser.store_client_conf(data, client, conf)
def store_conf_srv(self, data, conf=None, agent=None):
if not self.parser:
return []
try:
conf = unquote(conf)
except:
pass
return self.parser.store_conf(data, conf)
def expand_path(self, path=None, client=None, agent=None):
if not path:
return []
return self.parser.path_expander(path, client)
def delete_client(self, client=None, agent=None):
if not client:
return [2, "No client provided"]
return self.parser.remove_client(client)
def get_parser_attr(self, attr=None, agent=None):
"""
Using a method because of the bui-agent
"""
if not attr or not self.parser:
return []
return self.parser.get_priv_attr(attr)
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.http import HttpResponse, Http404
from django.conf import settings
from django.core import serializers
from django.db.models import Model
from django.apps import apps as django_apps
from django.db.models.query import QuerySet
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext as _
from datetime import date
import decimal, datetime
import csv
try:
import xlwt # XLS Writer, see pypi
xl_active = True
except ImportError:
xl_active = False
try:
import odf, odf.opendocument, odf.table # odfpy, see pypi
odf_active = True
except ImportError:
odf_active = False
import logging
logger = logging.getLogger(settings.PROJECT_NAME)
DEFAULT_PARAMS = {
'app_label': '',
'model_name': '',
'model': None,
'format': 'csv',
'fields': [],
'headers': [],
'charset': 'utf-8',
'filename': '',
'sheet_title': _('Export'),
}
ALLOWED_EXPORT_TYPES = {
'csv': {
'mimetype': 'text/csv',
# 'template': 'admin/export/csv',
'writer': csv.writer
},
'json': {
'mimetype': 'text/json',
'serializer': 'json',
},
'xml': {
'mimetype': 'text/xml',
'serializer': 'xml',
},
'yaml': {
'mimetype': 'text/yaml',
'serializer': 'yaml',
},
'py': {
'mimetype': 'application/python',
'serializer': 'python',
},
}
if xl_active:
class xlswriter(object):
"""
XLS creator as drop-in replacement for csv.writer
"""
# style0 = xlwt.easyxf('font: name Arial, color-index red, bold on', num_format_str='#,##0.00')
# style1 = xlwt.easyxf(num_format_str='D-MMM-YY')
def __init__(self, targetfile, **kwargs):
self.params = DEFAULT_PARAMS
self.params.update(kwargs)
self.stream = targetfile
self.xlwb = xlwt.Workbook(encoding=self.params['charset'])
self.xlws = self.xlwb.add_sheet(self.params['sheet_title'])
self.rowcounter = 0
def write_value(self, x, y, val, style):
self.xlws.write(y, x, val, style)
def write_formula(self, x, y, formula, style):
self.xlws.write(y, x, xlwt.Formula(formula), style)
def set_row_style(self, rownumber, style):
return self.xlws.row(rownumber).set_style(style)
def save(self, filename=None):
if not filename:
filename = self.stream
self.xlwb.save(filename)
self.rowcounter = 0
def writerow(self, fields, style=None):
if not style:
style = xlwt.Style.default_style
y = self.rowcounter
for x in range(len(fields)):
val = fields[x]
if hasattr(val, 'startswith') and val.startswith('='):
val = val.strip('=') # otherwise parsing error
self.write_formula(x, y, val, style)
else:
self.write_value(x, y, val, style)
self.set_row_style(y, style)
self.rowcounter += 1
def writerows(self, rows):
for row in rows:
self.writerow(row)
ALLOWED_EXPORT_TYPES['xls'] = {
'mimetype': 'application/vnd.ms-excel',
'writer': xlswriter
}
if odf_active:
class odswriter(object):
"""
ODS creator as drop-in replacement for csv.writer
"""
def __init__(self, targetfile, **kwargs):
self.params = DEFAULT_PARAMS
self.params.update(kwargs)
self.stream = targetfile
self.ods = odf.opendocument.OpenDocumentSpreadsheet()
self.odtable = odf.table.Table(name=self.params['sheet_title'])
self.ods.spreadsheet.addElement(self.odtable)
self.rowcounter = 0
def save(self, filename=None):
if not filename:
self.ods.write(self.stream)
else:
self.ods.save(filename)
self.rowcounter = 0
def writerow(self, fields, style=None):
row = odf.table.TableRow()
for x in range(len(fields)):
val = fields[x]
args = {'value':val}
if hasattr(val, 'startswith') and val.startswith('='):
args = {'formula': val}
elif type(val) in (str, unicode):
args = {'stringvalue': val, 'valuetype': 'string'}
elif type(val) in (decimal.Decimal,):
args = {'currency': 'EUR', 'valuetype': 'currency'}
elif type(val) in (int, float):
args['valuetype'] = 'float'
elif type(val) in (datetime.datetime, datetime.date):
args = {'datevalue': val, 'valuetype': 'date'}
elif type(val) in (datetime.time,):
args = {'timevalue': val, 'valuetype': 'time'}
elif type(val) in (bool,):
args = {'booleanvalue': val, 'valuetype': 'boolean'}
if style:
args['stylename'] = style
row.addElement(odf.table.TableCell(attributes=args))
self.odtable.addElement(row)
self.rowcounter += 1
def writerows(self, rows):
for row in rows:
self.writerow(row)
ALLOWED_EXPORT_TYPES['ods'] = {
'mimetype': 'application/vnd.oasis.opendocument.spreadsheet',
'writer': odswriter
}
def export(request, qs, **kwargs):
"""
This view exports data in one of several formats.
Keyword arguments:
:app_label:
application name
:model_name:
name of model within app_label
:model: django model
replacement for app_label and model_name
:format:
str, defined by `ALLOWED_EXPORT_TYPES`
csv, json, xml, yaml, py, xls, ods
default: csv
:fields:
list of model fields
default: all fields of given model
:headers:
column names for some formats
default: verbose_names of model's fields
:charset:
for text formats
default: utf-8
:filename:
output filename
default: <model_name>_<date>.<format>
"""
prm = DEFAULT_PARAMS
prm.update(kwargs)
exformat = prm['format']
if not exformat in ALLOWED_EXPORT_TYPES:
err = _(u'%s is not a supported format.') % exformat
logger.error(err)
raise Http404(err)
if prm['app_label'] and prm['model_name']:
model = django_apps.get_model(prm['app_label'], prm['model_name'])
elif prm['model']:
model = prm['model']
else:
model = None
if not prm['filename']:
prm['filename'] = '%s_%s.%s' % (
slugify(prm['model_name']),
date.today().strftime('%Y-%m-%d'),
exformat)
if model:
if not prm['fields']:
prm['fields'] = [f.name for f in model._meta.local_fields]
if not prm['headers']:
try:
prm['headers'] = [getattr(model, f).verbose_name for f in prm['fields']]
except Exception as e:
logger.error(e)
prm['headers'] = prm['fields']
mimetype = ALLOWED_EXPORT_TYPES[exformat]['mimetype']
response = HttpResponse(mimetype=mimetype)
response['Content-Type'] = '%s; charset=%s' % (mimetype, prm['charset'])
response['Content-Disposition'] = 'attachment; filename=%s' % prm['filename']
response['Cache-Control'] = 'must-revalidate'
response['Pragma'] = 'must-revalidate'
if 'writer' in ALLOWED_EXPORT_TYPES[exformat]:
writer = ALLOWED_EXPORT_TYPES[exformat]['writer'](response)
writer.writerow(prm['headers'])
for item in qs:
row = []
for field in prm['fields']:
val = getattr(item, field)
if callable(val):
val = val()
if isinstance(val, QuerySet):
val = ', '.join(x.__unicode__() for x in val.all())
elif isinstance(val, Model):
val = val.__unicode__()
elif isinstance(val, bool):
val = {True:_('Yes'), False:_('No')}[val]
elif val == None:
val = _('Unknown')
if type(val) is unicode and prm['format'] != 'ods':
val = val.encode(prm['charset'])
row.append(val)
writer.writerow(row)
if hasattr(writer, 'save'):
writer.save()
elif 'serializer' in ALLOWED_EXPORT_TYPES[exformat]:
serializer = serializers.get_serializer(
ALLOWED_EXPORT_TYPES[exformat]['serializer'])()
serializer.serialize(
qs.all(),
fields=prm['fields'],
ensure_ascii=False,
stream=response)
else:
err = _('Export type for %s must have value for writer or serializer') % exformat
logger.error(err)
raise Http404(err)
return response
|
class Instrument(object):
"""Librato Instrument Base class"""
def __init__(self, connection, name, id=None, streams=[], attributes={}, description=None):
self.connection = connection
self.name = name
self.streams = []
for i in streams:
if isinstance(i, Stream):
self.streams.append(i)
elif isinstance(i, dict): # Probably parsing JSON here
self.streams.append(Stream(i['metric'], i['source']))
else:
self.streams.append(Stream(*i))
self.attributes = attributes
self.id = id
@classmethod
def from_dict(cls, connection, data):
"""Returns a metric object from a dictionary item,
which is usually from librato's API"""
obj = cls(connection,
data['name'],
streams=data['streams'],
id=data['id'],
attributes=data['attributes'])
return obj
def get_payload(self):
return {'name': self.name,
'attributes': self.attributes,
'streams': [x.get_payload() for x in self.streams]}
def new_stream(self, metric, source='*'):
stream = Stream(metric, source)
self.streams.append(stream)
return stream
def save(self):
self.connection.update_instrument(self)
class Stream(object):
def __init__(self, metric, source='*'):
self.metric = metric
self.source = source
def get_payload(self):
return {'metric': self.metric,
'source': self.source}
|
from copy import deepcopy
import os.path as op
import pytest
import numpy as np
from scipy import linalg
from scipy.spatial.distance import cdist
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_almost_equal, assert_allclose,
assert_array_less)
import mne
from mne import (convert_forward_solution, read_forward_solution, compute_rank,
VolVectorSourceEstimate, VolSourceEstimate, EvokedArray,
pick_channels_cov)
from mne.beamformer import (make_lcmv, apply_lcmv, apply_lcmv_epochs,
apply_lcmv_raw, Beamformer,
read_beamformer, apply_lcmv_cov, make_dics)
from mne.beamformer._compute_beamformer import _prepare_beamformer_input
from mne.datasets import testing
from mne.fixes import _get_args
from mne.io.compensator import set_current_comp
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.simulation import simulate_evoked
from mne.utils import object_diff, requires_h5py, catch_logging
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label')
reject = dict(grad=4000e-13, mag=4e-12)
def _read_forward_solution_meg(*args, **kwargs):
fwd = read_forward_solution(*args)
fwd = convert_forward_solution(fwd, **kwargs)
return mne.pick_types_forward(fwd, meg=True, eeg=False)
def _get_data(tmin=-0.1, tmax=0.15, all_forward=True, epochs=True,
epochs_preload=True, data_cov=True, proj=True):
"""Read in data used in tests."""
label = mne.read_label(fname_label)
events = mne.read_events(fname_event)
raw = mne.io.read_raw_fif(fname_raw, preload=True)
forward = mne.read_forward_solution(fname_fwd)
if all_forward:
forward_surf_ori = _read_forward_solution_meg(
fname_fwd, surf_ori=True)
forward_fixed = _read_forward_solution_meg(
fname_fwd, force_fixed=True, surf_ori=True, use_cps=False)
forward_vol = _read_forward_solution_meg(fname_fwd_vol)
else:
forward_surf_ori = None
forward_fixed = None
forward_vol = None
event_id, tmin, tmax = 1, tmin, tmax
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bad channels
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True,
selection=left_temporal_channels)
picks = picks[::2] # decimate for speed
# add a couple channels we will consider bad
bad_picks = [100, 101]
bads = [raw.ch_names[pick] for pick in bad_picks]
assert not any(pick in picks for pick in bad_picks)
picks = np.concatenate([picks, bad_picks])
raw.pick_channels([raw.ch_names[ii] for ii in picks])
del picks
raw.info['bads'] = bads # add more bads
if proj:
raw.info.normalize_proj() # avoid projection warnings
else:
raw.del_proj()
if epochs:
# Read epochs
epochs = mne.Epochs(
raw, events, event_id, tmin, tmax, proj=True,
baseline=(None, 0), preload=epochs_preload, reject=reject)
if epochs_preload:
epochs.resample(200, npad=0)
epochs.crop(0, None)
evoked = epochs.average()
info = evoked.info
else:
epochs = None
evoked = None
info = raw.info
noise_cov = mne.read_cov(fname_cov)
noise_cov['projs'] = [] # avoid warning
noise_cov = mne.cov.regularize(noise_cov, info, mag=0.05, grad=0.05,
eeg=0.1, proj=True, rank=None)
if data_cov:
data_cov = mne.compute_covariance(
epochs, tmin=0.04, tmax=0.145, verbose='error') # baseline warning
else:
data_cov = None
return raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol
@testing.requires_testing_data
def test_lcmv_vector():
"""Test vector LCMV solutions."""
info = mne.io.read_raw_fif(fname_raw).info
# For speed and for rank-deficiency calculation simplicity,
# just use grads
info = mne.pick_info(info, mne.pick_types(info, meg='grad', exclude=()))
info.update(bads=[], projs=[])
forward = mne.read_forward_solution(fname_fwd)
forward = mne.pick_channels_forward(forward, info['ch_names'])
vertices = [s['vertno'][::100] for s in forward['src']]
n_vertices = sum(len(v) for v in vertices)
assert 5 < n_vertices < 20
amplitude = 100e-9
stc = mne.SourceEstimate(amplitude * np.eye(n_vertices), vertices,
0, 1. / info['sfreq'])
forward_sim = mne.convert_forward_solution(forward, force_fixed=True,
use_cps=True, copy=True)
forward_sim = mne.forward.restrict_forward_to_stc(forward_sim, stc)
noise_cov = mne.make_ad_hoc_cov(info)
noise_cov.update(data=np.diag(noise_cov['data']), diag=False)
evoked = simulate_evoked(forward_sim, stc, info, noise_cov, nave=1)
source_nn = forward_sim['source_nn']
source_rr = forward_sim['source_rr']
# Figure out our indices
mask = np.concatenate([np.in1d(s['vertno'], v)
for s, v in zip(forward['src'], vertices)])
mapping = np.where(mask)[0]
assert_array_equal(source_rr, forward['source_rr'][mapping])
# Don't check NN because we didn't rotate to surf ori
del forward_sim
# Let's do minimum norm as a sanity check (dipole_fit is slower)
inv = make_inverse_operator(info, forward, noise_cov, loose=1.)
stc_vector_mne = apply_inverse(evoked, inv, pick_ori='vector')
mne_ori = stc_vector_mne.data[mapping, :, np.arange(n_vertices)]
mne_ori /= np.linalg.norm(mne_ori, axis=-1)[:, np.newaxis]
mne_angles = np.rad2deg(np.arccos(np.sum(mne_ori * source_nn, axis=-1)))
assert np.mean(mne_angles) < 35
# Now let's do LCMV
data_cov = mne.make_ad_hoc_cov(info) # just a stub for later
with pytest.raises(ValueError, match="pick_ori"):
make_lcmv(info, forward, data_cov, 0.05, noise_cov, pick_ori='bad')
lcmv_ori = list()
for ti in range(n_vertices):
this_evoked = evoked.copy().crop(evoked.times[ti], evoked.times[ti])
data_cov['diag'] = False
data_cov['data'] = (np.outer(this_evoked.data, this_evoked.data) +
noise_cov['data'])
vals = linalg.svdvals(data_cov['data'])
assert vals[0] / vals[-1] < 1e5 # not rank deficient
with catch_logging() as log:
filters = make_lcmv(info, forward, data_cov, 0.05, noise_cov,
verbose=True)
log = log.getvalue()
assert '498 sources' in log
with catch_logging() as log:
filters_vector = make_lcmv(info, forward, data_cov, 0.05,
noise_cov, pick_ori='vector',
verbose=True)
log = log.getvalue()
assert '498 sources' in log
stc = apply_lcmv(this_evoked, filters)
stc_vector = apply_lcmv(this_evoked, filters_vector)
assert isinstance(stc, mne.SourceEstimate)
assert isinstance(stc_vector, mne.VectorSourceEstimate)
assert_allclose(stc.data, stc_vector.magnitude().data)
# Check the orientation by pooling across some neighbors, as LCMV can
# have some "holes" at the points of interest
idx = np.where(cdist(forward['source_rr'], source_rr[[ti]]) < 0.02)[0]
lcmv_ori.append(np.mean(stc_vector.data[idx, :, 0], axis=0))
lcmv_ori[-1] /= np.linalg.norm(lcmv_ori[-1])
lcmv_angles = np.rad2deg(np.arccos(np.sum(lcmv_ori * source_nn, axis=-1)))
assert np.mean(lcmv_angles) < 55
@pytest.mark.slowtest
@requires_h5py
@testing.requires_testing_data
@pytest.mark.parametrize('reg', (0.01, 0.))
@pytest.mark.parametrize('proj', (True, False))
def test_make_lcmv(tmpdir, reg, proj):
"""Test LCMV with evoked data and single trials."""
raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data(proj=proj)
for fwd in [forward, forward_vol]:
filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg,
noise_cov=noise_cov)
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
stc.crop(0.02, None)
stc_pow = np.sum(np.abs(stc.data), axis=1)
idx = np.argmax(stc_pow)
max_stc = stc.data[idx]
tmax = stc.times[np.argmax(max_stc)]
assert 0.08 < tmax < 0.15, tmax
assert 0.9 < np.max(max_stc) < 3.5, np.max(max_stc)
if fwd is forward:
# Test picking normal orientation (surface source space only).
filters = make_lcmv(evoked.info, forward_surf_ori, data_cov,
reg=reg, noise_cov=noise_cov,
pick_ori='normal', weight_norm=None)
stc_normal = apply_lcmv(evoked, filters, max_ori_out='signed')
stc_normal.crop(0.02, None)
stc_pow = np.sum(np.abs(stc_normal.data), axis=1)
idx = np.argmax(stc_pow)
max_stc = stc_normal.data[idx]
tmax = stc_normal.times[np.argmax(max_stc)]
lower = 0.04 if proj else 0.025
assert lower < tmax < 0.14, tmax
lower = 3e-7 if proj else 2e-7
assert lower < np.max(max_stc) < 3e-6, np.max(max_stc)
# No weight normalization was applied, so the amplitude of normal
# orientation results should always be smaller than free
# orientation results.
assert (np.abs(stc_normal.data) <= stc.data).all()
# Test picking source orientation maximizing output source power
filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg,
noise_cov=noise_cov, pick_ori='max-power')
stc_max_power = apply_lcmv(evoked, filters, max_ori_out='signed')
stc_max_power.crop(0.02, None)
stc_pow = np.sum(np.abs(stc_max_power.data), axis=1)
idx = np.argmax(stc_pow)
max_stc = np.abs(stc_max_power.data[idx])
tmax = stc.times[np.argmax(max_stc)]
lower = 0.08 if proj else 0.04
assert lower < tmax < 0.15, tmax
assert 0.8 < np.max(max_stc) < 3., np.max(max_stc)
stc_max_power.data[:, :] = np.abs(stc_max_power.data)
if fwd is forward:
# Maximum output source power orientation results should be
# similar to free orientation results in areas with channel
# coverage
label = mne.read_label(fname_label)
mean_stc = stc.extract_label_time_course(label, fwd['src'],
mode='mean')
mean_stc_max_pow = \
stc_max_power.extract_label_time_course(label, fwd['src'],
mode='mean')
assert_array_less(np.abs(mean_stc - mean_stc_max_pow), 1.0)
# Test NAI weight normalization:
filters = make_lcmv(evoked.info, fwd, data_cov, reg=reg,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='nai')
stc_nai = apply_lcmv(evoked, filters, max_ori_out='signed')
stc_nai.crop(0.02, None)
# Test whether unit-noise-gain solution is a scaled version of NAI
pearsoncorr = np.corrcoef(np.concatenate(np.abs(stc_nai.data)),
np.concatenate(stc_max_power.data))
assert_almost_equal(pearsoncorr[0, 1], 1.)
# Test if spatial filter contains src_type
assert 'src_type' in filters
# __repr__
assert len(evoked.ch_names) == 22
assert len(evoked.info['projs']) == (3 if proj else 0)
assert len(evoked.info['bads']) == 2
rank = 17 if proj else 20
assert 'LCMV' in repr(filters)
assert 'unknown subject' not in repr(filters)
assert '4157 vert' in repr(filters)
assert '20 ch' in repr(filters)
assert 'rank %s' % rank in repr(filters)
# I/O
fname = op.join(str(tmpdir), 'filters.h5')
with pytest.warns(RuntimeWarning, match='-lcmv.h5'):
filters.save(fname)
filters_read = read_beamformer(fname)
assert isinstance(filters, Beamformer)
assert isinstance(filters_read, Beamformer)
# deal with object_diff strictness
filters_read['rank'] = int(filters_read['rank'])
filters['rank'] = int(filters['rank'])
assert object_diff(filters, filters_read) == ''
# Test if fixed forward operator is detected when picking normal or
# max-power orientation
pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov,
reg=0.01, noise_cov=noise_cov, pick_ori='normal')
pytest.raises(ValueError, make_lcmv, evoked.info, forward_fixed, data_cov,
reg=0.01, noise_cov=noise_cov, pick_ori='max-power')
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
pytest.raises(ValueError, make_lcmv, evoked.info, forward, data_cov,
reg=0.01, noise_cov=noise_cov, pick_ori='normal')
# Test if volume forward operator is detected when picking normal
# orientation
pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol, data_cov,
reg=0.01, noise_cov=noise_cov, pick_ori='normal')
# Test if missing of noise covariance matrix is detected when more than
# one channel type is present in the data
pytest.raises(ValueError, make_lcmv, evoked.info, forward_vol,
data_cov=data_cov, reg=0.01, noise_cov=None,
pick_ori='max-power')
# Test if wrong channel selection is detected in application of filter
evoked_ch = deepcopy(evoked)
evoked_ch.pick_channels(evoked_ch.ch_names[1:])
filters = make_lcmv(evoked.info, forward_vol, data_cov, reg=0.01,
noise_cov=noise_cov)
pytest.raises(ValueError, apply_lcmv, evoked_ch, filters,
max_ori_out='signed')
# Test if discrepancies in channel selection of data and fwd model are
# handled correctly in apply_lcmv
# make filter with data where first channel was removed
filters = make_lcmv(evoked_ch.info, forward_vol, data_cov, reg=0.01,
noise_cov=noise_cov)
# applying that filter to the full data set should automatically exclude
# this channel from the data
# also test here that no warnings are thrown - implemented to check whether
# src should not be None warning occurs
with pytest.warns(None) as w:
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
assert len(w) == 0
# the result should be equal to applying this filter to a dataset without
# this channel:
stc_ch = apply_lcmv(evoked_ch, filters, max_ori_out='signed')
assert_array_almost_equal(stc.data, stc_ch.data)
# Test if non-matching SSP projection is detected in application of filter
if proj:
raw_proj = deepcopy(raw)
raw_proj.del_proj()
with pytest.raises(ValueError, match='do not match the projections'):
apply_lcmv_raw(raw_proj, filters, max_ori_out='signed')
# Test if spatial filter contains src_type
assert 'src_type' in filters
# check whether a filters object without src_type throws expected warning
del filters['src_type'] # emulate 0.16 behaviour to cause warning
with pytest.warns(RuntimeWarning, match='spatial filter does not contain '
'src_type'):
apply_lcmv(evoked, filters, max_ori_out='signed')
# Now test single trial using fixed orientation forward solution
# so we can compare it to the evoked solution
filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01,
noise_cov=noise_cov)
stcs = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')
stcs_ = apply_lcmv_epochs(epochs, filters, return_generator=True,
max_ori_out='signed')
assert_array_equal(stcs[0].data, next(stcs_).data)
epochs.drop_bad()
assert (len(epochs.events) == len(stcs))
# average the single trial estimates
stc_avg = np.zeros_like(stcs[0].data)
for this_stc in stcs:
stc_avg += this_stc.data
stc_avg /= len(stcs)
# compare it to the solution using evoked with fixed orientation
filters = make_lcmv(evoked.info, forward_fixed, data_cov, reg=0.01,
noise_cov=noise_cov)
stc_fixed = apply_lcmv(evoked, filters, max_ori_out='signed')
assert_array_almost_equal(stc_avg, stc_fixed.data)
# use a label so we have few source vertices and delayed computation is
# not used
filters = make_lcmv(epochs.info, forward_fixed, data_cov, reg=0.01,
noise_cov=noise_cov, label=label)
stcs_label = apply_lcmv_epochs(epochs, filters, max_ori_out='signed')
assert_array_almost_equal(stcs_label[0].data, stcs[0].in_label(label).data)
# Test condition where the filters weights are zero. There should not be
# any divide-by-zero errors
zero_cov = data_cov.copy()
zero_cov['data'][:] = 0
filters = make_lcmv(epochs.info, forward_fixed, zero_cov, reg=0.01,
noise_cov=noise_cov)
assert_array_equal(filters['weights'], 0)
# Test condition where one channel type is picked
# (avoid "grad data rank (13) did not match the noise rank (None)")
data_cov_grad = pick_channels_cov(
data_cov, [ch_name for ch_name in epochs.info['ch_names']
if ch_name.endswith(('2', '3'))])
assert len(data_cov_grad['names']) > 4
make_lcmv(epochs.info, forward_fixed, data_cov_grad, reg=0.01,
noise_cov=noise_cov)
@testing.requires_testing_data
@pytest.mark.slowtest
@pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain', 'nai'))
@pytest.mark.parametrize('pick_ori', (None, 'max-power', 'vector'))
def test_make_lcmv_sphere(pick_ori, weight_norm):
"""Test LCMV with sphere head model."""
# unit-noise gain beamformer and orientation
# selection and rank reduction of the leadfield
_, _, evoked, data_cov, noise_cov, _, _, _, _, _ = _get_data(proj=True)
assert 'eeg' not in evoked
assert 'meg' in evoked
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(
pos=25., sphere=sphere, mindist=5.0, exclude=2.0)
fwd_sphere = mne.make_forward_solution(evoked.info, None, src, sphere)
# Test that we get an error if not reducing rank
with pytest.raises(ValueError, match='Singular matrix detected'):
make_lcmv(
evoked.info, fwd_sphere, data_cov, reg=0.1,
noise_cov=noise_cov, weight_norm=weight_norm,
pick_ori=pick_ori, reduce_rank=False, rank='full')
# Now let's reduce it
filters = make_lcmv(evoked.info, fwd_sphere, data_cov, reg=0.1,
noise_cov=noise_cov, weight_norm=weight_norm,
pick_ori=pick_ori, reduce_rank=True)
stc_sphere = apply_lcmv(evoked, filters, max_ori_out='signed')
if isinstance(stc_sphere, VolVectorSourceEstimate):
stc_sphere = stc_sphere.magnitude()
else:
stc_sphere = abs(stc_sphere)
assert isinstance(stc_sphere, VolSourceEstimate)
stc_sphere.crop(0.02, None)
stc_pow = np.sum(stc_sphere.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc_sphere.data[idx]
tmax = stc_sphere.times[np.argmax(max_stc)]
assert 0.08 < tmax < 0.15, tmax
min_, max_ = 1.0, 4.5
if weight_norm is None:
min_ *= 2e-7
max_ *= 2e-7
assert min_ < np.max(max_stc) < max_, (min_, np.max(max_stc), max_)
@testing.requires_testing_data
def test_lcmv_raw():
"""Test LCMV with raw data."""
raw, _, _, _, noise_cov, label, forward, _, _, _ =\
_get_data(all_forward=False, epochs=False, data_cov=False)
tmin, tmax = 0, 20
start, stop = raw.time_as_index([tmin, tmax])
# use only the left-temporal MEG channels for LCMV
data_cov = mne.compute_raw_covariance(raw, tmin=tmin, tmax=tmax)
filters = make_lcmv(raw.info, forward, data_cov, reg=0.01,
noise_cov=noise_cov, label=label)
stc = apply_lcmv_raw(raw, filters, start=start, stop=stop,
max_ori_out='signed')
assert_array_almost_equal(np.array([tmin, tmax]),
np.array([stc.times[0], stc.times[-1]]),
decimal=2)
# make sure we get an stc with vertices only in the lh
vertno = [forward['src'][0]['vertno'], forward['src'][1]['vertno']]
assert len(stc.vertices[0]) == len(np.intersect1d(vertno[0],
label.vertices))
assert len(stc.vertices[1]) == 0
@testing.requires_testing_data
@pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain'))
@pytest.mark.parametrize('pick_ori', ('max-power', 'normal'))
def test_lcmv_cov(weight_norm, pick_ori):
"""Test LCMV source power computation."""
raw, epochs, evoked, data_cov, noise_cov, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
convert_forward_solution(forward, surf_ori=True, copy=False)
filters = make_lcmv(evoked.info, forward, data_cov, noise_cov=noise_cov,
weight_norm=weight_norm, pick_ori=pick_ori)
for cov in (data_cov, noise_cov):
this_cov = pick_channels_cov(cov, evoked.ch_names)
this_evoked = evoked.copy().pick_channels(this_cov['names'])
this_cov['projs'] = this_evoked.info['projs']
assert this_evoked.ch_names == this_cov['names']
stc = apply_lcmv_cov(this_cov, filters)
assert stc.data.min() > 0
assert stc.shape == (498, 1)
ev = EvokedArray(this_cov.data, this_evoked.info)
stc_1 = apply_lcmv(ev, filters)
assert stc_1.data.min() < 0
ev = EvokedArray(stc_1.data.T, this_evoked.info)
stc_2 = apply_lcmv(ev, filters)
assert stc_2.data.shape == (498, 498)
data = np.diag(stc_2.data)[:, np.newaxis]
assert data.min() > 0
assert_allclose(data, stc.data, rtol=1e-12)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_lcmv_ctf_comp():
"""Test interpolation with compensated CTF data."""
ctf_dir = op.join(testing.data_path(download=False), 'CTF')
raw_fname = op.join(ctf_dir, 'somMDYO-18av.ds')
raw = mne.io.read_raw_ctf(raw_fname, preload=True)
events = mne.make_fixed_length_events(raw, duration=0.2)[:2]
epochs = mne.Epochs(raw, events, tmin=-0.1, tmax=0.2)
evoked = epochs.average()
with pytest.warns(RuntimeWarning,
match='Too few samples .* estimate may be unreliable'):
data_cov = mne.compute_covariance(epochs)
fwd = mne.make_forward_solution(evoked.info, None,
mne.setup_volume_source_space(pos=15.0),
mne.make_sphere_model())
with pytest.raises(ValueError, match='reduce_rank'):
make_lcmv(evoked.info, fwd, data_cov)
filters = make_lcmv(evoked.info, fwd, data_cov, reduce_rank=True)
assert 'weights' in filters
# test whether different compensations throw error
info_comp = evoked.info.copy()
set_current_comp(info_comp, 1)
with pytest.raises(RuntimeError, match='Compensation grade .* not match'):
make_lcmv(info_comp, fwd, data_cov)
@testing.requires_testing_data
@pytest.mark.parametrize('proj', [False, True])
@pytest.mark.parametrize('weight_norm', (None, 'nai', 'unit-noise-gain'))
def test_lcmv_reg_proj(proj, weight_norm):
"""Test LCMV with and without proj."""
raw = mne.io.read_raw_fif(fname_raw, preload=True)
events = mne.find_events(raw)
raw.pick_types(meg=True)
assert len(raw.ch_names) == 305
epochs = mne.Epochs(raw, events, None, preload=True, proj=proj)
with pytest.warns(RuntimeWarning, match='Too few samples'):
noise_cov = mne.compute_covariance(epochs, tmax=0)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
forward = mne.read_forward_solution(fname_fwd)
filters = make_lcmv(epochs.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='nai', rank=None, verbose=True)
want_rank = 302 # 305 good channels - 3 MEG projs
assert filters['rank'] == want_rank
# And also with and without noise_cov
with pytest.raises(ValueError, match='several sensor types'):
make_lcmv(epochs.info, forward, data_cov, reg=0.05,
noise_cov=None)
epochs.pick_types(meg='grad')
kwargs = dict(reg=0.05, pick_ori=None, weight_norm=weight_norm)
filters_cov = make_lcmv(epochs.info, forward, data_cov,
noise_cov=noise_cov, **kwargs)
filters_nocov = make_lcmv(epochs.info, forward, data_cov,
noise_cov=None, **kwargs)
ad_hoc = mne.make_ad_hoc_cov(epochs.info)
filters_adhoc = make_lcmv(epochs.info, forward, data_cov,
noise_cov=ad_hoc, **kwargs)
evoked = epochs.average()
stc_cov = apply_lcmv(evoked, filters_cov)
stc_nocov = apply_lcmv(evoked, filters_nocov)
stc_adhoc = apply_lcmv(evoked, filters_adhoc)
# Compare adhoc and nocov: scale difference is necessitated by using std=1.
if weight_norm == 'unit-noise-gain':
scale = np.sqrt(ad_hoc['data'][0])
else:
scale = 1.
assert_allclose(stc_nocov.data, stc_adhoc.data * scale)
a = np.dot(filters_nocov['weights'], filters_nocov['whitener'])
b = np.dot(filters_adhoc['weights'], filters_adhoc['whitener']) * scale
atol = np.mean(np.sqrt(a * a)) * 1e-7
assert_allclose(a, b, atol=atol, rtol=1e-7)
# Compare adhoc and cov: locs might not be equivalent, but the same
# general profile should persist, so look at the std and be lenient:
if weight_norm == 'unit-noise-gain':
adhoc_scale = 0.12
else:
adhoc_scale = 1.
assert_allclose(
np.linalg.norm(stc_adhoc.data, axis=0) * adhoc_scale,
np.linalg.norm(stc_cov.data, axis=0), rtol=0.3)
assert_allclose(
np.linalg.norm(stc_nocov.data, axis=0) / scale * adhoc_scale,
np.linalg.norm(stc_cov.data, axis=0), rtol=0.3)
if weight_norm == 'nai':
# NAI is always normalized by noise-level (based on eigenvalues)
for stc in (stc_nocov, stc_cov):
assert_allclose(stc.data.std(), 0.584, rtol=0.2)
elif weight_norm is None:
# None always represents something not normalized, reflecting channel
# weights
for stc in (stc_nocov, stc_cov):
assert_allclose(stc.data.std(), 2.8e-8, rtol=0.1)
else:
assert weight_norm == 'unit-noise-gain'
# Channel scalings depend on presence of noise_cov
assert_allclose(stc_nocov.data.std(), 7.8e-13, rtol=0.1)
assert_allclose(stc_cov.data.std(), 0.187, rtol=0.2)
@pytest.mark.parametrize('reg, weight_norm, use_cov, depth, lower, upper', [
(0.05, 'unit-noise-gain', True, None, 97, 98),
(0.05, 'nai', True, None, 96, 98),
(0.05, 'nai', True, 0.8, 96, 98),
(0.05, None, True, None, 74, 76),
(0.05, None, True, 0.8, 90, 93), # depth improves weight_norm=None
(0.05, 'unit-noise-gain', False, None, 83, 86),
(0.05, 'unit-noise-gain', False, 0.8, 83, 86), # depth same for wn != None
# no reg
(0.00, 'unit-noise-gain', True, None, 35, 99), # TODO: Still not stable
])
def test_localization_bias_fixed(bias_params_fixed, reg, weight_norm, use_cov,
depth, lower, upper):
"""Test localization bias for fixed-orientation LCMV."""
evoked, fwd, noise_cov, data_cov, want = bias_params_fixed
if not use_cov:
evoked.pick_types(meg='grad')
noise_cov = None
assert data_cov['data'].shape[0] == len(data_cov['names'])
loc = apply_lcmv(evoked, make_lcmv(evoked.info, fwd, data_cov, reg,
noise_cov, depth=depth,
weight_norm=weight_norm)).data
loc = np.abs(loc)
# Compute the percentage of sources for which there is no loc bias:
perc = (want == np.argmax(loc, axis=0)).mean() * 100
assert lower <= perc <= upper
@pytest.mark.parametrize(
'reg, pick_ori, weight_norm, use_cov, depth, lower, upper', [
(0.05, 'vector', 'unit-noise-gain-invariant', False, None, 26, 28),
(0.05, 'vector', 'unit-noise-gain-invariant', True, None, 40, 42),
(0.05, 'vector', 'unit-noise-gain', False, None, 13, 14),
(0.05, 'vector', 'unit-noise-gain', True, None, 35, 37),
(0.05, 'vector', 'nai', True, None, 35, 37),
(0.05, 'vector', None, True, None, 12, 14),
(0.05, 'vector', None, True, 0.8, 39, 43),
(0.05, 'max-power', 'unit-noise-gain-invariant', False, None, 17, 20),
(0.05, 'max-power', 'unit-noise-gain', False, None, 17, 20),
(0.05, 'max-power', 'nai', True, None, 21, 24),
(0.05, 'max-power', None, True, None, 7, 10),
(0.05, 'max-power', None, True, 0.8, 15, 18),
(0.05, None, None, True, 0.8, 40, 42),
# no reg
(0.00, 'vector', None, True, None, 21, 32),
(0.00, 'vector', 'unit-noise-gain-invariant', True, None, 50, 65),
(0.00, 'vector', 'unit-noise-gain', True, None, 42, 65),
(0.00, 'vector', 'nai', True, None, 42, 65),
(0.00, 'max-power', None, True, None, 13, 19),
(0.00, 'max-power', 'unit-noise-gain-invariant', True, None, 43, 50),
(0.00, 'max-power', 'unit-noise-gain', True, None, 43, 50),
(0.00, 'max-power', 'nai', True, None, 43, 50),
])
def test_localization_bias_free(bias_params_free, reg, pick_ori, weight_norm,
use_cov, depth, lower, upper):
"""Test localization bias for free-orientation LCMV."""
evoked, fwd, noise_cov, data_cov, want = bias_params_free
if not use_cov:
evoked.pick_types(meg='grad')
noise_cov = None
loc = apply_lcmv(evoked, make_lcmv(evoked.info, fwd, data_cov, reg,
noise_cov, pick_ori=pick_ori,
weight_norm=weight_norm,
depth=depth)).data
loc = np.linalg.norm(loc, axis=1) if pick_ori == 'vector' else np.abs(loc)
# Compute the percentage of sources for which there is no loc bias:
perc = (want == np.argmax(loc, axis=0)).mean() * 100
assert lower <= perc <= upper
@pytest.mark.parametrize('weight_norm', ('nai', 'unit-noise-gain'))
@pytest.mark.parametrize('pick_ori', ('vector', 'max-power', None))
def test_depth_does_not_matter(bias_params_free, weight_norm, pick_ori):
"""Test that depth weighting does not matter for normalized filters."""
evoked, fwd, noise_cov, data_cov, _ = bias_params_free
data = apply_lcmv(evoked, make_lcmv(
evoked.info, fwd, data_cov, 0.05, noise_cov, pick_ori=pick_ori,
weight_norm=weight_norm, depth=0.)).data
data_depth = apply_lcmv(evoked, make_lcmv(
evoked.info, fwd, data_cov, 0.05, noise_cov, pick_ori=pick_ori,
weight_norm=weight_norm, depth=1.)).data
assert data.shape == data_depth.shape
for d1, d2 in zip(data, data_depth):
# Sign flips can change when nearly orthogonal to the normal direction
d2 *= np.sign(np.dot(d1.ravel(), d2.ravel()))
atol = np.linalg.norm(d1) * 1e-7
assert_allclose(d1, d2, atol=atol)
@testing.requires_testing_data
def test_lcmv_maxfiltered():
"""Test LCMV on maxfiltered data."""
raw = mne.io.read_raw_fif(fname_raw).fix_mag_coil_types()
raw_sss = mne.preprocessing.maxwell_filter(raw)
events = mne.find_events(raw_sss)
del raw
raw_sss.pick_types(meg='mag')
assert len(raw_sss.ch_names) == 102
epochs = mne.Epochs(raw_sss, events)
data_cov = mne.compute_covariance(epochs, tmin=0)
fwd = mne.read_forward_solution(fname_fwd)
rank = compute_rank(data_cov, info=epochs.info)
assert rank == {'mag': 71}
for use_rank in ('info', rank, 'full', None):
make_lcmv(epochs.info, fwd, data_cov, rank=use_rank)
@testing.requires_testing_data
@pytest.mark.parametrize('pick_ori', ['vector', 'max-power', 'normal'])
@pytest.mark.parametrize(
'weight_norm', ['unit-noise-gain', 'nai', 'unit-noise-gain-invariant'])
@pytest.mark.parametrize('reg', (0.05, 0.))
@pytest.mark.parametrize('inversion', ['matrix', 'single'])
def test_unit_noise_gain_formula(pick_ori, weight_norm, reg, inversion):
"""Test unit-noise-gain filter against formula."""
raw = mne.io.read_raw_fif(fname_raw, preload=True)
events = mne.find_events(raw)
raw.pick_types(meg='mag')
assert len(raw.ch_names) == 102
epochs = mne.Epochs(raw, events, None, preload=True)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15)
# for now, avoid whitening to make life easier
noise_cov = mne.make_ad_hoc_cov(epochs.info, std=dict(grad=1., mag=1.))
forward = mne.read_forward_solution(fname_fwd)
convert_forward_solution(forward, surf_ori=True, copy=False)
rank = None
kwargs = dict(reg=reg, noise_cov=noise_cov, pick_ori=pick_ori,
weight_norm=weight_norm, rank=rank, inversion=inversion)
if inversion == 'single' and pick_ori == 'vector' and \
weight_norm == 'unit-noise-gain-invariant':
with pytest.raises(ValueError, match='Cannot use'):
make_lcmv(epochs.info, forward, data_cov, **kwargs)
return
filters = make_lcmv(epochs.info, forward, data_cov, **kwargs)
_, _, _, _, G, _, _, _ = _prepare_beamformer_input(
epochs.info, forward, None, 'vector', noise_cov=noise_cov, rank=rank,
pca=False, exp=None)
n_channels, n_sources = G.shape
n_sources //= 3
G.shape = (n_channels, n_sources, 3)
G = G.transpose(1, 2, 0) # verts, orient, ch
_assert_weight_norm(filters, G)
def _assert_weight_norm(filters, G):
"""Check the result of the chosen weight normalization strategy."""
weights, max_power_ori = filters['weights'], filters['max_power_ori']
# Make the dimensions of the weight matrix equal for both DICS (which
# defines weights for multiple frequencies) and LCMV (which does not).
if filters['kind'] == 'LCMV':
weights = weights[np.newaxis]
if max_power_ori is not None:
max_power_ori = max_power_ori[np.newaxis]
if max_power_ori is not None:
max_power_ori = max_power_ori[..., np.newaxis]
weight_norm = filters['weight_norm']
inversion = filters['inversion']
n_channels = weights.shape[2]
if inversion == 'matrix':
# Dipoles are grouped in groups with size n_orient
n_sources = filters['n_sources']
n_orient = 3 if filters['is_free_ori'] else 1
elif inversion == 'single':
# Every dipole is treated as a unique source
n_sources = weights.shape[1]
n_orient = 1
for wi, w in enumerate(weights):
w = w.reshape(n_sources, n_orient, n_channels)
# Compute leadfield in the direction chosen during the computation of
# the beamformer.
if filters['pick_ori'] == 'max-power':
use_G = np.sum(G * max_power_ori[wi], axis=1, keepdims=True)
elif filters['pick_ori'] == 'normal':
use_G = G[:, -1:]
else:
use_G = G
if inversion == 'single':
# Every dipole is treated as a unique source
use_G = use_G.reshape(n_sources, 1, n_channels)
assert w.shape == use_G.shape == (n_sources, n_orient, n_channels)
# Test weight normalization scheme
got = np.matmul(w, w.conj().swapaxes(-2, -1))
desired = np.repeat(np.eye(n_orient)[np.newaxis], w.shape[0], axis=0)
if n_orient == 3 and weight_norm in ('unit-noise-gain', 'nai'):
# only the diagonal is correct!
assert not np.allclose(got, desired, atol=1e-7)
got = got.reshape(n_sources, -1)[:, ::n_orient + 1]
desired = np.ones_like(got)
if weight_norm == 'nai': # additional scale factor, should be fixed
atol = 1e-7 * got.flat[0]
desired *= got.flat[0]
else:
atol = 1e-7
assert_allclose(got, desired, atol=atol, err_msg='w @ w.conj().T = I')
# Check that the result here is a diagonal matrix for Sekihara
if n_orient > 1 and weight_norm != 'unit-noise-gain-invariant':
got = w @ use_G.swapaxes(-2, -1)
diags = np.diagonal(got, 0, -2, -1)
want = np.apply_along_axis(np.diagflat, 1, diags)
atol = np.mean(diags).real * 1e-12
assert_allclose(got, want, atol=atol, err_msg='G.T @ w = θI')
def test_api():
"""Test LCMV/DICS API equivalence."""
lcmv_names = _get_args(make_lcmv)
dics_names = _get_args(make_dics)
dics_names[dics_names.index('csd')] = 'data_cov'
dics_names[dics_names.index('noise_csd')] = 'noise_cov'
dics_names.pop(dics_names.index('real_filter')) # not a thing for LCMV
assert lcmv_names == dics_names
|
from django import forms
from django.template.defaultfilters import slugify
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext, get_language
import django_select2
from hvad.forms import TranslatableModelForm
import taggit
from unidecode import unidecode
from .models import Tag, News
class MultipleTagForm(forms.ModelForm):
class Meta:
widgets = {
'tags': django_select2.Select2MultipleWidget
}
class NewsTagWidget(django_select2.widgets.Select2Mixin, taggit.forms.TagWidget):
def __init__(self, *args, **kwargs):
options = kwargs.get('select2_options', {})
options['tags'] = list(Tag.objects.values_list('name', flat=True))
options['tokenSeparators'] = [' ', ',']
kwargs['select2_options'] = options
super(NewsTagWidget, self).__init__(*args, **kwargs)
def render_js_code(self, *args, **kwargs):
js_code = super(NewsTagWidget, self).render_js_code(*args, **kwargs)
return js_code.replace('$', 'jQuery')
class AutoSlugForm(TranslatableModelForm):
slug_field = 'slug'
slugified_field = None
def clean(self):
super(AutoSlugForm, self).clean()
if not self.fields.get(self.slug_field):
return self.cleaned_data
if not self.data.get(self.slug_field):
slug = self.generate_slug()
# add to self.data in order to show generated slug in the form in case of an error
self.data[self.slug_field] = self.cleaned_data[self.slug_field] = slug
else:
if self._errors.get(self.slug_field):
return self.cleaned_data
slug = self.cleaned_data[self.slug_field]
# validate uniqueness
conflict = self.get_slug_conflict(slug=slug)
if conflict:
self.report_error(conflict=conflict)
return self.cleaned_data
def generate_slug(self):
content_to_slugify = self.cleaned_data.get(self.slugified_field, '')
return slugify(unidecode(content_to_slugify))
def get_slug_conflict(self, slug):
translations_model = self.instance._meta.translations_model
try:
language_code = self.instance.language_code
except translations_model.DoesNotExist:
language_code = get_language()
conflicts = translations_model.objects.filter(slug=slug, language_code=language_code)
if self.is_edit_action():
conflicts = conflicts.exclude(master=self.instance)
try:
return conflicts.get()
except translations_model.DoesNotExist:
return None
def report_error(self, conflict):
address = '<a href="%(url)s" target="_blank">%(label)s</a>' % {
'url': conflict.master.get_absolute_url(),
'label': ugettext('the conflicting object')}
error_message = ugettext('Conflicting slug. See %(address)s.') % {'address': address}
self.append_to_errors(field='slug', message=mark_safe(error_message))
def append_to_errors(self, field, message):
try:
self._errors[field].append(message)
except KeyError:
self._errors[field] = self.error_class([message])
def is_edit_action(self):
return self.instance.pk is not None
class CategoryForm(AutoSlugForm):
slugified_field = 'name'
class Meta:
fields = ['name', 'slug']
class NewsForm(AutoSlugForm):
slugified_field = 'title'
class Meta:
widgets = {
'tags': NewsTagWidget
}
class LinksForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(LinksForm, self).__init__(*args, **kwargs)
self.fields['news'].queryset = News.objects.language()
class Meta:
model = News
exclude = []
|
from django.test import TestCase
from django.urls import reverse
from corehq.apps.domain.models import Domain
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.domain.tests.test_views import BaseAutocompleteTest
from corehq.apps.users.dbaccessors import delete_all_users
from corehq.apps.users.models import CommCareUser, WebUser
class TestEmailAuthenticationFormAutocomplete(BaseAutocompleteTest):
def test_autocomplete_enabled(self):
self.verify(True, reverse("login"), "auth-username")
def test_autocomplete_disabled(self):
self.verify(False, reverse("login"), "auth-username")
class TestBugReport(TestCase):
domain = 'test-bug-report'
@classmethod
def setUpClass(cls):
super(TestBugReport, cls).setUpClass()
delete_all_users()
cls.project = create_domain(cls.domain)
cls.web_user = WebUser.create(
cls.domain,
'bug-dude',
password='***',
created_by=None,
created_via=None,
)
cls.web_user.is_superuser = True
cls.web_user.save()
cls.commcare_user = CommCareUser.create(
cls.domain,
'bug-kid',
password='***',
created_by=None,
created_via=None,
)
cls.url = reverse("bug_report")
@classmethod
def tearDownClass(cls):
delete_all_users()
cls.project.delete()
super(TestBugReport, cls).tearDownClass()
def _default_payload(self, username):
return {
'subject': 'Bug',
'username': username,
'domain': self.domain,
'url': 'www.bugs.com',
'message': 'CommCare is broken, help!',
'app_id': '',
'cc': '',
'email': '',
'500traceback': '',
'sentry_event_id': '',
}
def _post_bug_report(self, payload):
return self.client.post(
self.url,
payload,
HTTP_USER_AGENT='firefox',
)
def test_basic_bug_submit(self):
self.client.login(username=self.web_user.username, password='***')
payload = self._default_payload(self.web_user.username)
response = self._post_bug_report(payload)
self.assertEqual(response.status_code, 200)
def test_project_description_web_user(self):
self.client.login(username=self.web_user.username, password='***')
payload = self._default_payload(self.web_user.username)
payload.update({
'project_description': 'Great NGO, Just Great',
})
domain_object = Domain.get_by_name(self.domain)
self.assertIsNone(domain_object.project_description)
response = self._post_bug_report(payload)
self.assertEqual(response.status_code, 200)
domain_object = Domain.get_by_name(self.domain)
self.assertEqual(domain_object.project_description, 'Great NGO, Just Great')
# Don't update if they've made it blank
payload.update({
'project_description': '',
})
response = self._post_bug_report(payload)
self.assertEqual(response.status_code, 200)
domain_object = Domain.get_by_name(self.domain)
self.assertEqual(domain_object.project_description, 'Great NGO, Just Great')
def test_project_description_commcare_user(self):
self.client.login(username=self.commcare_user.username, password='***')
payload = self._default_payload(self.commcare_user.username)
payload.update({
'project_description': 'Great NGO, Just Great',
})
domain_object = Domain.get_by_name(self.domain)
self.assertIsNone(domain_object.project_description)
response = self._post_bug_report(payload)
self.assertEqual(response.status_code, 200)
domain_object = Domain.get_by_name(self.domain)
# Shouldn't be able to update description as commcare user
self.assertIsNone(domain_object.project_description)
|
"""
Doc string here.
@author mje
@email: mads [] cnru.dk
"""
import sys
import subprocess
if len(sys.argv) == 4:
cpu_number = sys.argv[3]
else:
cpu_number = 1
submit_cmd = 'submit_to_cluster \"python %s %s\"' % (sys.argv[1], cpu_number)
print(submit_cmd)
|
from django.conf import settings
def get_profile_model():
"""
Returns configured user profile model or None if not found
"""
auth_profile_module = getattr(settings, 'AUTH_PROFILE_MODULE', None)
profile_model = None
if auth_profile_module:
# get the profile model. TODO: super flacky, refactor
app_label, model = auth_profile_module.split('.')
profile_model = getattr(__import__("%s.models" % app_label, \
globals(), locals(), [model, ], -1), model, None)
return profile_model
|
from django.test import TestCase
from django.utils.timezone import now
from ..models import Promise, VerificationDocument
from popolo.models import Person
nownow = now()
class VerificationDocumentTestCase(TestCase):
def setUp(self):
self.person = Person.objects.create(name=u"A person")
self.promise = Promise.objects.create(name="this is a promise",\
person = self.person,\
date=nownow
)
def test_instanciate_a_verification_document(self):
'''Instanciate a verification document'''
document = VerificationDocument.objects.create(promise=self.promise, \
date=nownow,\
url='http://verification.com',\
display_name='verification page')
self.assertTrue(document)
self.assertEquals(document.promise, self.promise)
self.assertEquals(document.date, nownow)
self.assertEquals(document.url, 'http://verification.com')
self.assertEquals(document.display_name, 'verification page')
document = VerificationDocument.objects.create(promise=self.promise, \
url='http://verification.com',\
display_name='verification page')
self.assertIsNone(document.date)
def test_related_name_for_relationship(self):
'''A promise has verification_documents'''
document = VerificationDocument.objects.create(promise=self.promise, \
date=nownow,\
url='http://verification.com',\
display_name='verification page')
self.assertIn(document, self.promise.verification_documents.all())
|
"""Univariate polynomials with galois field coefficients."""
import random
import modint
import sparse_poly
def GFPolyFactory(p):
"""Create custom class for specific coefficient type."""
coefficient_type = modint.ModularIntegerFactory(p)
class newClass(sparse_poly.SparsePolynomial):
coeff_type = coefficient_type
zero = coeff_type(0)
@staticmethod
def from_int_dict(int_dict):
"""Alternative construction, through integers."""
result_dict = {}
for e, c in int_dict.iteritems():
cc = coefficient_type(c)
if cc:
result_dict[e] = cc
return newClass(result_dict)
def to_int_dict(self):
"""Returns the dictionaries of integer representators."""
result_dict = {}
for e, c in self.coeffs.iteritems():
result_dict[e] = c.value
return result_dict
def to_sym_int_dict(self):
"""Returns the dictionaries of symmetric integer representators."""
result_dict = {}
for e, c in self.coeffs.iteritems():
result_dict[e] = int(c)
return result_dict
@staticmethod
def random(min_degree, max_degree, monic=True):
"""Generate random polynomial in given degree range."""
degree = random.randrange(min_degree, max_degree + 1)
p = coefficient_type.modulus
result_dict = {}
if monic:
result_dict[degree] = coefficient_type(1)
degree -= 1
for e in xrange(0, degree + 1):
c = coefficient_type(random.randrange(p))
if c:
result_dict[e] = c
return newClass(result_dict)
def monic(self):
if not self:
return self.coeff_type(0), self
leading_coeff = self[self.degree]
return leading_coeff, self.scale(self.coeff_type(1)/leading_coeff)
newClass.__name__ = "%sPoly" % coefficient_type.__name__
return newClass
def div(f, g):
"""Division with remainder."""
q = f.__class__()
r = f
if not g:
return q, r
deg_diff = r.degree - g.degree
while deg_diff >= 0:
quot = f.__class__({deg_diff: r[r.degree]/g[g.degree]})
q += quot
r -= quot*g
deg_diff = r.degree - g.degree
return q, r
def gcd(f, g):
"""Euclidean algorithm."""
while g:
f, g = g, div(f,g)[1]
return f.monic()[1]
def lcm(f, g):
q, r = div(f*g, gcd(f,g))
assert not r
return q.monic()[1]
def xgcd(f, g):
"""Extended euclidean algorithm.
Outputs the gcd, s and t, such that:
h == s*f + t*g
"""
one = f.coeff_type(1)
p, q, r, s, t = [], [], [], [], []
pp, rr = f.monic()
p.append(pp)
r.append(rr)
pp, rr = g.monic()
p.append(pp)
r.append(rr)
s.append(f.__class__({0:(one/p[0])}))
s.append(f.__class__())
t.append(f.__class__())
t.append(f.__class__({0:(one/p[1])}))
while True:
q.append(div(r[-2], r[-1])[0])
pp, rr = (r[-2] - q[-1]*r[-1]).monic()
if not rr:
return r[-1], s[-1], t[-1]
p.append(pp)
r.append(rr)
pp = one/pp
s.append((s[-2] - q[-1]*s[-1]).scale(pp))
t.append((t[-2] - q[-1]*t[-1]).scale(pp))
def truncate(f, n):
"""The remainder from division by x**n."""
result_dict = {}
for e, c in f.coeffs.iteritems():
if e < n:
result_dict[e] = c
return f.__class__(result_dict)
def pow_mod(f, n, p):
"""Repeated squaring."""
assert isinstance(n, (int, long)) and n >= 0
if n == 0:
return f.__class__({0: f.__class__.coeff_type(1)})
binary_n = []
while n:
if n % 2:
binary_n.insert(0, 1)
n = (n - 1) / 2
else:
binary_n.insert(0, 0)
n /= 2
result = div(f, p)[1]
for k in binary_n[1:]:
result *= result
result = div(result, p)[1]
if k:
result *= f
result = div(result, p)[1]
return result
def distinct_degree_factor(f):
"""Return a list of divisors.
Each polynomial has only factors of a specific degree.
"""
result = []
coeff_type = f.__class__.coeff_type
p = coeff_type.modulus
x_poly = f.__class__({1: coeff_type(1)})
one_poly = f.__class__({0: coeff_type(1)})
h = x_poly
while f != one_poly:
h = pow_mod(h, p, f) # h <- h**p mod f
g = gcd(h - x_poly, f)
f, r = div(f, g)
assert not r
result.append(g)
# Early abort:
if f.degree < 2*(g.degree + 1):
result.append(f)
break
return result
def equal_degree_split(f, degree):
"""Finds divisor of a result from distinct-degree factorization."""
coeff_type = f.__class__.coeff_type
one_poly = f.__class__({0: coeff_type(1)})
a = f.random(1, f.degree - 1)
g = gcd(f, a)
if g != one_poly:
return g
b = pow_mod(a, (coeff_type.modulus**degree - 1)/2, f)
g = gcd(b - one_poly, f)
if g != one_poly and g != f:
return g
return None # Failure, try again with another random a.
def equal_degree_factor(f, degree):
"""Finds all divisors of a result from distinct-degree factorization."""
if f.degree == degree:
return [f]
g = None
while g is None:
g = equal_degree_split(f, degree)
q, r = div(f, g)
assert not r
return equal_degree_factor(g, degree) + equal_degree_factor(q, degree)
def factor(f):
"""Factorization of a univariate polynomial over a Galois field.
Returns a list of the leading coefficient of f and the monic
factors with their multiplicities.
"""
p = f.__class__.coeff_type.modulus
leading_coeff, f = f.monic()
one_poly = f.__class__({0: f.__class__.coeff_type(1)})
x_poly = f.__class__({1: f.__class__.coeff_type(1)})
h = x_poly
i = 0
result = [leading_coeff]
while f != one_poly:
i += 1
# One distinct-degree factorization step.
h = pow_mod(h, p, f) # h <- h**p mod f
g = gcd(h - x_poly, f)
if g != one_poly:
# Equal-degree factorization for degree i:
g_factors = equal_degree_factor(g, i)
# Now determine multiplicities of factors.
for gg in g_factors:
e = 0
q, r = div(f, gg)
while not r: # gg**e divides f
e += 1
f = q
q, r = div(f, gg)
result.append((gg, e))
return result
def factor_sqf(f):
"""Factorization of a univariate square-free polynomial over a Galois field.
Returns a list of the leading coefficient and the monic factors of f.
"""
one_poly = f.__class__({0: f.__class__.coeff_type(1)})
leading_coeff, f = f.monic()
result = [leading_coeff]
for degree, divisor in enumerate(distinct_degree_factor(f)):
if divisor == one_poly:
continue
result += equal_degree_factor(divisor, degree + 1)
return result
|
""" test with the .transform """
from io import StringIO
import numpy as np
import pytest
from pandas._libs import groupby
from pandas.core.dtypes.common import ensure_platform_int, is_timedelta64_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, concat, date_range)
from pandas.core.groupby.groupby import DataError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
def test_transform():
data = Series(np.arange(9) // 3, index=np.arange(9))
index = np.arange(9)
np.random.shuffle(index)
data = data.reindex(index)
grouped = data.groupby(lambda x: x // 3)
transformed = grouped.transform(lambda x: x * x.sum())
assert transformed[7] == 12
# GH 8046
# make sure that we preserve the input order
df = DataFrame(
np.arange(6, dtype='int64').reshape(
3, 2), columns=["a", "b"], index=[0, 2, 1])
key = [0, 0, 1]
expected = df.sort_index().groupby(key).transform(
lambda x: x - x.mean()).groupby(key).mean()
result = df.groupby(key).transform(lambda x: x - x.mean()).groupby(
key).mean()
assert_frame_equal(result, expected)
def demean(arr):
return arr - arr.mean()
people = DataFrame(np.random.randn(5, 5),
columns=['a', 'b', 'c', 'd', 'e'],
index=['Joe', 'Steve', 'Wes', 'Jim', 'Travis'])
key = ['one', 'two', 'one', 'two', 'one']
result = people.groupby(key).transform(demean).groupby(key).mean()
expected = people.groupby(key).apply(demean).groupby(key).mean()
assert_frame_equal(result, expected)
# GH 8430
df = tm.makeTimeDataFrame()
g = df.groupby(pd.Grouper(freq='M'))
g.transform(lambda x: x - 1)
# GH 9700
df = DataFrame({'a': range(5, 10), 'b': range(5)})
result = df.groupby('a').transform(max)
expected = DataFrame({'b': range(5)})
tm.assert_frame_equal(result, expected)
def test_transform_fast():
df = DataFrame({'id': np.arange(100000) / 3,
'val': np.random.randn(100000)})
grp = df.groupby('id')['val']
values = np.repeat(grp.mean().values,
ensure_platform_int(grp.count().values))
expected = pd.Series(values, index=df.index, name='val')
result = grp.transform(np.mean)
assert_series_equal(result, expected)
result = grp.transform('mean')
assert_series_equal(result, expected)
# GH 12737
df = pd.DataFrame({'grouping': [0, 1, 1, 3], 'f': [1.1, 2.1, 3.1, 4.5],
'd': pd.date_range('2014-1-1', '2014-1-4'),
'i': [1, 2, 3, 4]},
columns=['grouping', 'f', 'i', 'd'])
result = df.groupby('grouping').transform('first')
dates = [pd.Timestamp('2014-1-1'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-4')]
expected = pd.DataFrame({'f': [1.1, 2.1, 2.1, 4.5],
'd': dates,
'i': [1, 2, 2, 4]},
columns=['f', 'i', 'd'])
assert_frame_equal(result, expected)
# selection
result = df.groupby('grouping')[['f', 'i']].transform('first')
expected = expected[['f', 'i']]
assert_frame_equal(result, expected)
# dup columns
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['g', 'a', 'a'])
result = df.groupby('g').transform('first')
expected = df.drop('g', axis=1)
assert_frame_equal(result, expected)
def test_transform_broadcast(tsframe, ts):
grouped = ts.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, ts.index)
for _, gp in grouped:
assert_fp_equal(result.reindex(gp.index), gp.mean())
grouped = tsframe.groupby(lambda x: x.month)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
for _, gp in grouped:
agged = gp.mean()
res = result.reindex(gp.index)
for col in tsframe:
assert_fp_equal(res[col], agged[col])
# group columns
grouped = tsframe.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
axis=1)
result = grouped.transform(np.mean)
tm.assert_index_equal(result.index, tsframe.index)
tm.assert_index_equal(result.columns, tsframe.columns)
for _, gp in grouped:
agged = gp.mean(1)
res = result.reindex(columns=gp.columns)
for idx in gp.index:
assert_fp_equal(res.xs(idx), agged[idx])
def test_transform_axis(tsframe):
# make sure that we are setting the axes
# correctly when on axis=0 or 1
# in the presence of a non-monotonic indexer
# GH12713
base = tsframe.iloc[0:5]
r = len(base.index)
c = len(base.columns)
tso = DataFrame(np.random.randn(r, c),
index=base.index,
columns=base.columns,
dtype='float64')
# monotonic
ts = tso
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
# non-monotonic
ts = tso.iloc[[1, 0] + list(range(2, len(base)))]
grouped = ts.groupby(lambda x: x.weekday())
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: x - x.mean())
assert_frame_equal(result, expected)
ts = ts.T
grouped = ts.groupby(lambda x: x.weekday(), axis=1)
result = ts - grouped.transform('mean')
expected = grouped.apply(lambda x: (x.T - x.mean(1)).T)
assert_frame_equal(result, expected)
def test_transform_dtype():
# GH 9807
# Check transform dtype output is preserved
df = DataFrame([[1, 3], [2, 3]])
result = df.groupby(1).transform('mean')
expected = DataFrame([[1.5], [1.5]])
assert_frame_equal(result, expected)
def test_transform_bug():
# GH 5712
# transforming on a datetime column
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
result = df.groupby('A')['B'].transform(
lambda x: x.rank(ascending=False))
expected = Series(np.arange(5, 0, step=-1), name='B')
assert_series_equal(result, expected)
def test_transform_numeric_to_boolean():
# GH 16875
# inconsistency in transforming boolean values
expected = pd.Series([True, True], name='A')
df = pd.DataFrame({'A': [1.1, 2.2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
df = pd.DataFrame({'A': [1, 2], 'B': [1, 2]})
result = df.groupby('B').A.transform(lambda x: True)
assert_series_equal(result, expected)
def test_transform_datetime_to_timedelta():
# GH 15429
# transforming a datetime to timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=np.arange(5)))
expected = pd.Series([
Timestamp('20130101') - Timestamp('20130101')] * 5, name='A')
# this does date math without changing result type in transform
base_time = df['A'][0]
result = df.groupby('A')['A'].transform(
lambda x: x.max() - x.min() + base_time) - base_time
assert_series_equal(result, expected)
# this does date math and causes the transform to return timedelta
result = df.groupby('A')['A'].transform(lambda x: x.max() - x.min())
assert_series_equal(result, expected)
def test_transform_datetime_to_numeric():
# GH 10972
# convert dt to float
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.mean())
expected = Series([-0.5, 0.5], name='b')
assert_series_equal(result, expected)
# convert dt to int
df = DataFrame({
'a': 1, 'b': date_range('2015-01-01', periods=2, freq='D')})
result = df.groupby('a').b.transform(
lambda x: x.dt.dayofweek - x.dt.dayofweek.min())
expected = Series([0, 1], name='b')
assert_series_equal(result, expected)
def test_transform_casting():
# 13046
data = """
idx A ID3 DATETIME
0 B-028 b76cd912ff "2014-10-08 13:43:27"
1 B-054 4a57ed0b02 "2014-10-08 14:26:19"
2 B-076 1a682034f8 "2014-10-08 14:29:01"
3 B-023 b76cd912ff "2014-10-08 18:39:34"
4 B-023 f88g8d7sds "2014-10-08 18:40:18"
5 B-033 b76cd912ff "2014-10-08 18:44:30"
6 B-032 b76cd912ff "2014-10-08 18:46:00"
7 B-037 b76cd912ff "2014-10-08 18:52:15"
8 B-046 db959faf02 "2014-10-08 18:59:59"
9 B-053 b76cd912ff "2014-10-08 19:17:48"
10 B-065 b76cd912ff "2014-10-08 19:21:38"
"""
df = pd.read_csv(StringIO(data), sep=r'\s+',
index_col=[0], parse_dates=['DATETIME'])
result = df.groupby('ID3')['DATETIME'].transform(lambda x: x.diff())
assert is_timedelta64_dtype(result.dtype)
result = df[['ID3', 'DATETIME']].groupby('ID3').transform(
lambda x: x.diff())
assert is_timedelta64_dtype(result.DATETIME.dtype)
def test_transform_multiple(ts):
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
grouped.transform(lambda x: x * 2)
grouped.transform(np.mean)
def test_dispatch_transform(tsframe):
df = tsframe[::5].reindex(tsframe.index)
grouped = df.groupby(lambda x: x.month)
filled = grouped.fillna(method='pad')
fillit = lambda x: x.fillna(method='pad')
expected = df.groupby(lambda x: x.month).transform(fillit)
assert_frame_equal(filled, expected)
def test_transform_select_columns(df):
f = lambda x: x.mean()
result = df.groupby('A')['C', 'D'].transform(f)
selection = df[['C', 'D']]
expected = selection.groupby(df['A']).transform(f)
assert_frame_equal(result, expected)
def test_transform_exclude_nuisance(df):
# this also tests orderings in transform between
# series/frame to make sure it's consistent
expected = {}
grouped = df.groupby('A')
expected['C'] = grouped['C'].transform(np.mean)
expected['D'] = grouped['D'].transform(np.mean)
expected = DataFrame(expected)
result = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
def test_transform_function_aliases(df):
result = df.groupby('A').transform('mean')
expected = df.groupby('A').transform(np.mean)
assert_frame_equal(result, expected)
result = df.groupby('A')['C'].transform('mean')
expected = df.groupby('A')['C'].transform(np.mean)
assert_series_equal(result, expected)
def test_series_fast_transform_date():
# GH 13191
df = pd.DataFrame({'grouping': [np.nan, 1, 1, 3],
'd': pd.date_range('2014-1-1', '2014-1-4')})
result = df.groupby('grouping')['d'].transform('first')
dates = [pd.NaT, pd.Timestamp('2014-1-2'), pd.Timestamp('2014-1-2'),
pd.Timestamp('2014-1-4')]
expected = pd.Series(dates, name='d')
assert_series_equal(result, expected)
def test_transform_length():
# GH 9697
df = pd.DataFrame({'col1': [1, 1, 2, 2], 'col2': [1, 2, 3, np.nan]})
expected = pd.Series([3.0] * 4)
def nsum(x):
return np.nansum(x)
results = [df.groupby('col1').transform(sum)['col2'],
df.groupby('col1')['col2'].transform(sum),
df.groupby('col1').transform(nsum)['col2'],
df.groupby('col1')['col2'].transform(nsum)]
for result in results:
assert_series_equal(result, expected, check_names=False)
def test_transform_coercion():
# 14457
# when we are transforming be sure to not coerce
# via assignment
df = pd.DataFrame(dict(A=['a', 'a'], B=[0, 1]))
g = df.groupby('A')
expected = g.transform(np.mean)
result = g.transform(lambda x: np.mean(x))
assert_frame_equal(result, expected)
def test_groupby_transform_with_int():
# GH 3740, make sure that we might upcast on item-by-item transform
# floats
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=Series(1, dtype='float64'),
C=Series(
[1, 2, 3, 1, 2, 3], dtype='float64'), D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=Series(
[-1, 0, 1, -1, 0, 1], dtype='float64')))
assert_frame_equal(result, expected)
# int case
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1,
C=[1, 2, 3, 1, 2, 3], D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
expected = DataFrame(dict(B=np.nan, C=[-1, 0, 1, -1, 0, 1]))
assert_frame_equal(result, expected)
# int that needs float conversion
s = Series([2, 3, 4, 10, 5, -1])
df = DataFrame(dict(A=[1, 1, 1, 2, 2, 2], B=1, C=s, D='foo'))
with np.errstate(all='ignore'):
result = df.groupby('A').transform(
lambda x: (x - x.mean()) / x.std())
s1 = s.iloc[0:3]
s1 = (s1 - s1.mean()) / s1.std()
s2 = s.iloc[3:6]
s2 = (s2 - s2.mean()) / s2.std()
expected = DataFrame(dict(B=np.nan, C=concat([s1, s2])))
assert_frame_equal(result, expected)
# int downcasting
result = df.groupby('A').transform(lambda x: x * 2 / 2)
expected = DataFrame(dict(B=1, C=[2, 3, 4, 10, 5, -1]))
assert_frame_equal(result, expected)
def test_groupby_transform_with_nan_group():
# GH 9941
df = pd.DataFrame({'a': range(10),
'b': [1, 1, 2, 3, np.nan, 4, 4, 5, 5, 5]})
result = df.groupby(df.b)['a'].transform(max)
expected = pd.Series([1., 1., 2., 3., np.nan, 6., 6., 9., 9., 9.],
name='a')
assert_series_equal(result, expected)
def test_transform_mixed_type():
index = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [1, 2, 3, 1, 2, 3]
])
df = DataFrame({'d': [1., 1., 1., 2., 2., 2.],
'c': np.tile(['a', 'b', 'c'], 2),
'v': np.arange(1., 7.)}, index=index)
def f(group):
group['g'] = group['d'] * 2
return group[:1]
grouped = df.groupby('c')
result = grouped.apply(f)
assert result['d'].dtype == np.float64
# this is by definition a mutating operation!
with pd.option_context('mode.chained_assignment', None):
for key, group in grouped:
res = f(group)
assert_frame_equal(res, result.loc[key])
def _check_cython_group_transform_cumulative(pd_op, np_op, dtype):
"""
Check a group transform that executes a cumulative function.
Parameters
----------
pd_op : callable
The pandas cumulative function.
np_op : callable
The analogous one in NumPy.
dtype : type
The specified dtype of the data.
"""
is_datetimelike = False
data = np.array([[1], [2], [3], [4]], dtype=dtype)
ans = np.zeros_like(data)
labels = np.array([0, 0, 0, 0], dtype=np.int64)
ngroups = 1
pd_op(ans, data, labels, ngroups, is_datetimelike)
tm.assert_numpy_array_equal(np_op(data), ans[:, 0],
check_dtype=False)
def test_cython_group_transform_cumsum(any_real_dtype):
# see gh-4095
dtype = np.dtype(any_real_dtype).type
pd_op, np_op = groupby.group_cumsum, np.cumsum
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_cumprod():
# see gh-4095
dtype = np.float64
pd_op, np_op = groupby.group_cumprod_float64, np.cumproduct
_check_cython_group_transform_cumulative(pd_op, np_op, dtype)
def test_cython_group_transform_algos():
# see gh-4095
is_datetimelike = False
# with nans
labels = np.array([0, 0, 0, 0, 0], dtype=np.int64)
ngroups = 1
data = np.array([[1], [2], [3], [np.nan], [4]], dtype='float64')
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumprod_float64(actual, data, labels, ngroups,
is_datetimelike)
expected = np.array([1, 2, 6, np.nan, 24], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
actual = np.zeros_like(data)
actual.fill(np.nan)
groupby.group_cumsum(actual, data, labels, ngroups, is_datetimelike)
expected = np.array([1, 3, 6, np.nan, 10], dtype='float64')
tm.assert_numpy_array_equal(actual[:, 0], expected)
# timedelta
is_datetimelike = True
data = np.array([np.timedelta64(1, 'ns')] * 5, dtype='m8[ns]')[:, None]
actual = np.zeros_like(data, dtype='int64')
groupby.group_cumsum(actual, data.view('int64'), labels,
ngroups, is_datetimelike)
expected = np.array([np.timedelta64(1, 'ns'), np.timedelta64(
2, 'ns'), np.timedelta64(3, 'ns'), np.timedelta64(4, 'ns'),
np.timedelta64(5, 'ns')])
tm.assert_numpy_array_equal(actual[:, 0].view('m8[ns]'), expected)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_series(op, args, targop):
# GH 4095
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
# series
for data in [s, s_missing]:
# print(data.head())
expected = data.groupby(labels).transform(targop)
tm.assert_series_equal(
expected,
data.groupby(labels).transform(op, *args))
tm.assert_series_equal(expected, getattr(
data.groupby(labels), op)(*args))
@pytest.mark.parametrize("op", ['cumprod', 'cumsum'])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize('input, exp', [
# When everything is NaN
({'key': ['b'] * 10, 'value': np.nan},
pd.Series([np.nan] * 10, name='value')),
# When there is a single NaN
({'key': ['b'] * 10 + ['a'] * 2,
'value': [3] * 3 + [np.nan] + [3] * 8},
{('cumprod', False): [3.0, 9.0, 27.0] + [np.nan] * 7 + [3.0, 9.0],
('cumprod', True): [3.0, 9.0, 27.0, np.nan, 81., 243., 729.,
2187., 6561., 19683., 3.0, 9.0],
('cumsum', False): [3.0, 6.0, 9.0] + [np.nan] * 7 + [3.0, 6.0],
('cumsum', True): [3.0, 6.0, 9.0, np.nan, 12., 15., 18.,
21., 24., 27., 3.0, 6.0]})])
def test_groupby_cum_skipna(op, skipna, input, exp):
df = pd.DataFrame(input)
result = df.groupby('key')['value'].transform(op, skipna=skipna)
if isinstance(exp, dict):
expected = exp[(op, skipna)]
else:
expected = exp
expected = pd.Series(expected, name='value')
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"op, args, targop",
[('cumprod', (), lambda x: x.cumprod()),
('cumsum', (), lambda x: x.cumsum()),
('shift', (-1, ), lambda x: x.shift(-1)),
('shift', (1, ), lambda x: x.shift())])
def test_cython_transform_frame(op, args, targop):
s = Series(np.random.randn(1000))
s_missing = s.copy()
s_missing.iloc[2:10] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
strings = list('qwertyuiopasdfghjklz')
strings_missing = strings[:]
strings_missing[5] = np.nan
df = DataFrame({'float': s,
'float_missing': s_missing,
'int': [1, 1, 1, 1, 2] * 200,
'datetime': pd.date_range('1990-1-1', periods=1000),
'timedelta': pd.timedelta_range(1, freq='s',
periods=1000),
'string': strings * 50,
'string_missing': strings_missing * 50},
columns=['float', 'float_missing', 'int', 'datetime',
'timedelta', 'string', 'string_missing'])
df['cat'] = df['string'].astype('category')
df2 = df.copy()
df2.index = pd.MultiIndex.from_product([range(100), range(10)])
# DataFrame - Single and MultiIndex,
# group by values, index level, columns
for df in [df, df2]:
for gb_target in [dict(by=labels), dict(level=0), dict(by='string')
]: # dict(by='string_missing')]:
# dict(by=['int','string'])]:
gb = df.groupby(**gb_target)
# whitelisted methods set the selection before applying
# bit a of hack to make sure the cythonized shift
# is equivalent to pre 0.17.1 behavior
if op == 'shift':
gb._set_group_selection()
if op != 'shift' and 'int' not in gb_target:
# numeric apply fastpath promotes dtype so have
# to apply separately and concat
i = gb[['int']].apply(targop)
f = gb[['float', 'float_missing']].apply(targop)
expected = pd.concat([f, i], axis=1)
else:
expected = gb.apply(targop)
expected = expected.sort_index(axis=1)
tm.assert_frame_equal(expected,
gb.transform(op, *args).sort_index(
axis=1))
tm.assert_frame_equal(
expected,
getattr(gb, op)(*args).sort_index(axis=1))
# individual columns
for c in df:
if c not in ['float', 'int', 'float_missing'
] and op != 'shift':
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
gb[c].transform(op)
with pytest.raises(DataError, match=msg):
getattr(gb[c], op)()
else:
expected = gb[c].apply(targop)
expected.name = c
tm.assert_series_equal(expected,
gb[c].transform(op, *args))
tm.assert_series_equal(expected,
getattr(gb[c], op)(*args))
def test_transform_with_non_scalar_group():
# GH 10165
cols = pd.MultiIndex.from_tuples([
('syn', 'A'), ('mis', 'A'), ('non', 'A'),
('syn', 'C'), ('mis', 'C'), ('non', 'C'),
('syn', 'T'), ('mis', 'T'), ('non', 'T'),
('syn', 'G'), ('mis', 'G'), ('non', 'G')])
df = pd.DataFrame(np.random.randint(1, 10, (4, 12)),
columns=cols,
index=['A', 'C', 'G', 'T'])
msg = 'transform must return a scalar value for each group.*'
with pytest.raises(ValueError, match=msg):
df.groupby(axis=1, level=1).transform(
lambda z: z.div(z.sum(axis=1), axis=0))
@pytest.mark.parametrize('cols,exp,comp_func', [
('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal),
(['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}),
tm.assert_frame_equal)
])
@pytest.mark.parametrize('agg_func', [
'count', 'rank', 'size'])
def test_transform_numeric_ret(cols, exp, comp_func, agg_func):
if agg_func == 'size' and isinstance(cols, list):
pytest.xfail("'size' transformation not supported with "
"NDFrameGroupy")
# GH 19200
df = pd.DataFrame(
{'a': pd.date_range('2018-01-01', periods=3),
'b': range(3),
'c': range(7, 10)})
result = df.groupby('b')[cols].transform(agg_func)
if agg_func == 'rank':
exp = exp.astype('float')
comp_func(result, exp)
@pytest.mark.parametrize("mix_groupings", [True, False])
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize("val1,val2", [
('foo', 'bar'), (1, 2), (1., 2.)])
@pytest.mark.parametrize("fill_method,limit,exp_vals", [
("ffill", None,
[np.nan, np.nan, 'val1', 'val1', 'val1', 'val2', 'val2', 'val2']),
("ffill", 1,
[np.nan, np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan]),
("bfill", None,
['val1', 'val1', 'val1', 'val2', 'val2', 'val2', np.nan, np.nan]),
("bfill", 1,
[np.nan, 'val1', 'val1', np.nan, 'val2', 'val2', np.nan, np.nan])
])
def test_group_fill_methods(mix_groupings, as_series, val1, val2,
fill_method, limit, exp_vals):
vals = [np.nan, np.nan, val1, np.nan, np.nan, val2, np.nan, np.nan]
_exp_vals = list(exp_vals)
# Overwrite placeholder values
for index, exp_val in enumerate(_exp_vals):
if exp_val == 'val1':
_exp_vals[index] = val1
elif exp_val == 'val2':
_exp_vals[index] = val2
# Need to modify values and expectations depending on the
# Series / DataFrame that we ultimately want to generate
if mix_groupings: # ['a', 'b', 'a, 'b', ...]
keys = ['a', 'b'] * len(vals)
def interweave(list_obj):
temp = list()
for x in list_obj:
temp.extend([x, x])
return temp
_exp_vals = interweave(_exp_vals)
vals = interweave(vals)
else: # ['a', 'a', 'a', ... 'b', 'b', 'b']
keys = ['a'] * len(vals) + ['b'] * len(vals)
_exp_vals = _exp_vals * 2
vals = vals * 2
df = DataFrame({'key': keys, 'val': vals})
if as_series:
result = getattr(
df.groupby('key')['val'], fill_method)(limit=limit)
exp = Series(_exp_vals, name='val')
assert_series_equal(result, exp)
else:
result = getattr(df.groupby('key'), fill_method)(limit=limit)
exp = DataFrame({'val': _exp_vals})
assert_frame_equal(result, exp)
@pytest.mark.parametrize("fill_method", ['ffill', 'bfill'])
def test_pad_stable_sorting(fill_method):
# GH 21207
x = [0] * 20
y = [np.nan] * 10 + [1] * 10
if fill_method == 'bfill':
y = y[::-1]
df = pd.DataFrame({'x': x, 'y': y})
expected = df.drop('x', 1)
result = getattr(df.groupby('x'), fill_method)()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
@pytest.mark.parametrize("freq", [
None,
pytest.param('D', marks=pytest.mark.xfail(
reason='GH#23918 before method uses freq in vectorized approach'))])
@pytest.mark.parametrize("periods,fill_method,limit", [
(1, 'ffill', None), (1, 'ffill', 1),
(1, 'bfill', None), (1, 'bfill', 1),
(-1, 'ffill', None), (-1, 'ffill', 1),
(-1, 'bfill', None), (-1, 'bfill', 1),
])
def test_pct_change(test_series, freq, periods, fill_method, limit):
# GH 21200, 21621
vals = [3, np.nan, np.nan, np.nan, 1, 2, 4, 10, np.nan, 4]
keys = ['a', 'b']
key_v = np.repeat(keys, len(vals))
df = DataFrame({'key': key_v, 'vals': vals * 2})
df_g = getattr(df.groupby('key'), fill_method)(limit=limit)
grp = df_g.groupby(df.key)
expected = grp['vals'].obj / grp['vals'].shift(periods) - 1
if test_series:
result = df.groupby('key')['vals'].pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq)
tm.assert_series_equal(result, expected)
else:
result = df.groupby('key').pct_change(
periods=periods, fill_method=fill_method, limit=limit, freq=freq)
tm.assert_frame_equal(result, expected.to_frame('vals'))
@pytest.mark.parametrize("func", [np.any, np.all])
def test_any_all_np_func(func):
# GH 20653
df = pd.DataFrame([['foo', True],
[np.nan, True],
['foo', True]], columns=['key', 'val'])
exp = pd.Series([True, np.nan, True], name='val')
res = df.groupby('key')['val'].transform(func)
tm.assert_series_equal(res, exp)
def test_groupby_transform_rename():
# https://github.com/pandas-dev/pandas/issues/23461
def demean_rename(x):
result = x - x.mean()
if isinstance(x, pd.Series):
return result
result = result.rename(
columns={c: '{}_demeaned'.format(c) for c in result.columns})
return result
df = pd.DataFrame({'group': list('ababa'),
'value': [1, 1, 1, 2, 2]})
expected = pd.DataFrame({'value': [-1. / 3, -0.5, -1. / 3, 0.5, 2. / 3]})
result = df.groupby('group').transform(demean_rename)
tm.assert_frame_equal(result, expected)
result_single = df.groupby('group').value.transform(demean_rename)
tm.assert_series_equal(result_single, expected['value'])
@pytest.mark.parametrize('func', [min, max, np.min, np.max, 'first', 'last'])
def test_groupby_transform_timezone_column(func):
# GH 24198
ts = pd.to_datetime('now', utc=True).tz_convert('Asia/Singapore')
result = pd.DataFrame({'end_time': [ts], 'id': [1]})
result['max_end_time'] = result.groupby('id').end_time.transform(func)
expected = pd.DataFrame([[ts, 1, ts]], columns=['end_time', 'id',
'max_end_time'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func, values", [
("idxmin", ["1/1/2011"] * 2 + ["1/3/2011"] * 7 + ["1/10/2011"]),
("idxmax", ["1/2/2011"] * 2 + ["1/9/2011"] * 7 + ["1/10/2011"])
])
def test_groupby_transform_with_datetimes(func, values):
# GH 15306
dates = pd.date_range('1/1/2011', periods=10, freq='D')
stocks = pd.DataFrame({'price': np.arange(10.0)}, index=dates)
stocks['week_id'] = pd.to_datetime(stocks.index).week
result = stocks.groupby(stocks['week_id'])['price'].transform(func)
expected = pd.Series(data=pd.to_datetime(values),
index=dates, name="price")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['cumsum', 'cumprod', 'cummin', 'cummax'])
def test_transform_absent_categories(func):
# GH 16771
# cython transforms with more groups than rows
x_vals = [1]
x_cats = range(2)
y = [1]
df = DataFrame(dict(x=Categorical(x_vals, x_cats), y=y))
result = getattr(df.y.groupby(df.x), func)()
expected = df.y
assert_series_equal(result, expected)
@pytest.mark.parametrize('func', ['ffill', 'bfill', 'shift'])
@pytest.mark.parametrize('key, val', [('level', 0), ('by', Series([0]))])
def test_ffill_not_in_axis(func, key, val):
# GH 21521
df = pd.DataFrame([[np.nan]])
result = getattr(df.groupby(**{key: val}), func)()
expected = df
assert_frame_equal(result, expected)
|
from django.conf.urls import *
from dbe.cal.models import *
from dbe.cal.views import *
from django.contrib.auth.decorators import login_required
urlpatterns = patterns("dbe.cal.views",
(r"^month/(\d+)/(\d+)/(-?1)/$" , login_required(MonthView.as_view()), {}, "month"),
(r"^month/(\d+)/(\d+)/$" , login_required(MonthView.as_view()), {}, "month"),
(r"^day/(\d+)/(\d+)/(\d+)/$" , login_required(DayView.as_view()), {}, "day"),
(r"^settings/(?P<dpk>\d+)/$" , login_required(SettingsView.as_view()), {}, "settings"),
(r"^(\d+)$" , login_required(MainView.as_view()), {}, "main"),
(r"^$" , login_required(MainView.as_view()), {}, "main"),
# (r"^month/(\d+)/(\d+)/$", "month"),
# (r"^month$", "month"),
# (r"^day/(\d+)/(\d+)/(\d+)/$", "day"),
# (r"^settings/$", "settings"),
# (r"^(\d+)/$", "main"),
# (r"", "main"),
)
|
from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, NaT, date_range
import pandas._testing as tm
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
@pytest.fixture
def float_frame_with_na():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
ABwBzA0ljw -1.128865 -0.897161 0.046603 0.274997
DJiRzmbyQF 0.728869 0.233502 0.722431 -0.890872
neMgPD5UBF 0.486072 -1.027393 -0.031553 1.449522
0yWA4n8VeX -1.937191 -1.142531 0.805215 -0.462018
3slYUbbqU1 0.153260 1.164691 1.489795 -0.545826
soujjZ0A08 NaN NaN NaN NaN
7W6NLGsjB9 NaN NaN NaN NaN
... ... ... ... ...
uhfeaNkCR1 -0.231210 -0.340472 0.244717 -0.901590
n6p7GYuBIV -0.419052 1.922721 -0.125361 -0.727717
ZhzAeY6p1y 1.234374 -1.425359 -0.827038 -0.633189
uWdPsORyUh 0.046738 -0.980445 -1.102965 0.605503
3DJA6aN590 -0.091018 -1.684734 -1.100900 0.215947
2GBPAzdbMk -2.883405 -1.021071 1.209877 1.633083
sHadBoyVHw -2.223032 -0.326384 0.258931 0.245517
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
return df
@pytest.fixture
def bool_frame_with_na():
"""
Fixture for DataFrame of booleans with index of unique strings
Columns are ['A', 'B', 'C', 'D']; some entries are missing
A B C D
zBZxY2IDGd False False False False
IhBWBMWllt False True True True
ctjdvZSR6R True False True True
AVTujptmxb False True False True
G9lrImrSWq False False False True
sFFwdIUfz2 NaN NaN NaN NaN
s15ptEJnRb NaN NaN NaN NaN
... ... ... ... ...
UW41KkDyZ4 True True False False
l9l6XkOdqV True False False False
X2MeZfzDYA False True False False
xWkIKU7vfX False True False True
QOhL6VmpGU False False False True
22PwkRJdat False True False False
kfboQ3VeIK True False True False
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData()) > 0
df = df.astype(object)
# set some NAs
df.iloc[5:10] = np.nan
df.iloc[15:20, -2:] = np.nan
# For `any` tests we need to have at least one True before the first NaN
# in each column
for i in range(4):
df.iloc[i, i] = True
return df
@pytest.fixture
def float_string_frame():
"""
Fixture for DataFrame of floats and strings with index of unique strings
Columns are ['A', 'B', 'C', 'D', 'foo'].
A B C D foo
w3orJvq07g -1.594062 -1.084273 -1.252457 0.356460 bar
PeukuVdmz2 0.109855 -0.955086 -0.809485 0.409747 bar
ahp2KvwiM8 -1.533729 -0.142519 -0.154666 1.302623 bar
3WSJ7BUCGd 2.484964 0.213829 0.034778 -2.327831 bar
khdAmufk0U -0.193480 -0.743518 -0.077987 0.153646 bar
LE2DZiFlrE -0.193566 -1.343194 -0.107321 0.959978 bar
HJXSJhVn7b 0.142590 1.257603 -0.659409 -0.223844 bar
... ... ... ... ... ...
9a1Vypttgw -1.316394 1.601354 0.173596 1.213196 bar
h5d1gVFbEy 0.609475 1.106738 -0.155271 0.294630 bar
mK9LsTQG92 1.303613 0.857040 -1.019153 0.369468 bar
oOLksd9gKH 0.558219 -0.134491 -0.289869 -0.951033 bar
9jgoOjKyHg 0.058270 -0.496110 -0.413212 -0.852659 bar
jZLDHclHAO 0.096298 1.267510 0.549206 -0.005235 bar
lR0nxDp1C2 -2.119350 -0.794384 0.544118 0.145849 bar
[30 rows x 5 columns]
"""
df = DataFrame(tm.getSeriesData())
df["foo"] = "bar"
return df
@pytest.fixture
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
GI7bbDaEZe -0.237908 -0.246225 -0.468506 0.752993
KGp9mFepzA -1.140809 -0.644046 -1.225586 0.801588
VeVYLAb1l2 -1.154013 -1.677615 0.690430 -0.003731
kmPME4WKhO 0.979578 0.998274 -0.776367 0.897607
CPyopdXTiz 0.048119 -0.257174 0.836426 0.111266
0kJZQndAj0 0.274357 -0.281135 -0.344238 0.834541
tqdwQsaHG8 -0.979716 -0.519897 0.582031 0.144710
... ... ... ... ...
7FhZTWILQj -2.906357 1.261039 -0.780273 -0.537237
4pUDPM4eGq -2.042512 -0.464382 -0.382080 1.132612
B8dUgUzwTi -1.506637 -0.364435 1.087891 0.297653
hErlVYjVv9 1.477453 -0.495515 -0.713867 1.438427
1BKN3o7YLs 0.127535 -0.349812 -0.881836 0.489827
9S4Ekn7zga 1.445518 -2.095149 0.031982 0.373204
xN1dNn6OV6 1.425017 -0.983995 -0.363281 -0.224502
[30 rows x 4 columns]
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype("float32")
df.B = df.B.astype("float32")
df.C = df.C.astype("float16")
df.D = df.D.astype("float64")
return df
@pytest.fixture
def mixed_int_frame():
"""
Fixture for DataFrame of different int types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
mUrCZ67juP 0 1 2 2
rw99ACYaKS 0 1 0 0
7QsEcpaaVU 0 1 1 1
xkrimI2pcE 0 1 0 0
dz01SuzoS8 0 1 255 255
ccQkqOHX75 -1 1 0 0
DN0iXaoDLd 0 1 0 0
... .. .. ... ...
Dfb141wAaQ 1 1 254 254
IPD8eQOVu5 0 1 0 0
CcaKulsCmv 0 1 0 0
rIBa8gu7E5 0 1 0 0
RP6peZmh5o 0 1 1 1
NMb9pipQWQ 0 1 0 0
PqgbJEzjib 0 1 3 3
[30 rows x 4 columns]
"""
df = DataFrame({k: v.astype(int) for k, v in tm.getSeriesData().items()})
df.A = df.A.astype("int32")
df.B = np.ones(len(df.B), dtype="uint64")
df.C = df.C.astype("uint8")
df.D = df.C.astype("int64")
return df
@pytest.fixture
def mixed_type_frame():
"""
Fixture for DataFrame of float/int/string columns with RangeIndex
Columns are ['a', 'b', 'c', 'float32', 'int32'].
"""
return DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"float32": np.array([1.0] * 10, dtype="float32"),
"int32": np.array([1] * 10, dtype="int32"),
},
index=np.arange(10),
)
@pytest.fixture
def timezone_frame():
"""
Fixture for DataFrame of date_range Series with different time zones
Columns are ['A', 'B', 'C']; some entries are missing
A B C
0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00
1 2013-01-02 NaT NaT
2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00
"""
df = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
df.iloc[1, 1] = NaT
df.iloc[1, 2] = NaT
return df
@pytest.fixture
def uint64_frame():
"""
Fixture for DataFrame with uint64 values
Columns are ['A', 'B']
"""
return DataFrame(
{"A": np.arange(3), "B": [2 ** 63, 2 ** 63 + 5, 2 ** 63 + 10]}, dtype=np.uint64
)
@pytest.fixture
def simple_frame():
"""
Fixture for simple 3x3 DataFrame
Columns are ['one', 'two', 'three'], index is ['a', 'b', 'c'].
one two three
a 1.0 2.0 3.0
b 4.0 5.0 6.0
c 7.0 8.0 9.0
"""
arr = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])
return DataFrame(arr, columns=["one", "two", "three"], index=["a", "b", "c"])
@pytest.fixture
def frame_of_index_cols():
"""
Fixture for DataFrame of columns that can be used for indexing
Columns are ['A', 'B', 'C', 'D', 'E', ('tuple', 'as', 'label')];
'A' & 'B' contain duplicates (but are jointly unique), the rest are unique.
A B C D E (tuple, as, label)
0 foo one a 0.608477 -0.012500 -1.664297
1 foo two b -0.633460 0.249614 -0.364411
2 foo three c 0.615256 2.154968 -0.834666
3 bar one d 0.234246 1.085675 0.718445
4 bar two e 0.533841 -0.005702 -3.533912
"""
df = DataFrame(
{
"A": ["foo", "foo", "foo", "bar", "bar"],
"B": ["one", "two", "three", "one", "two"],
"C": ["a", "b", "c", "d", "e"],
"D": np.random.randn(5),
"E": np.random.randn(5),
("tuple", "as", "label"): np.random.randn(5),
}
)
return df
|
import numpy as np
from .addon import AddonModel
class EffectiveStressModel(AddonModel):
"""Effective stress model"""
name = '__effstress__'
def __init__(self, porepres):
self.num_sdv = 1
self.sdv_names = ['POREPRES']
self.porepres = np.asarray(porepres)
if len(self.porepres.shape) != 2:
raise ValueError('pore_pres must be a 2 dimensional array with '
'the first column being time and the second the '
'associated pore pressure.')
def get_porepres_at_time(self, time):
return np.interp(1, self.porepres[0,:], self.porepres[1,:],
left=self.porepres[1,0], right=self.porepres[1,-1])
def sdvini(self, statev):
statev = np.array([self.get_porepres_at_time(0.)])
return statev
def eval(self, kappa, time, dtime, temp, dtemp,
F0, F, strain, d, stress, statev, **kwds):
"""Evaluate the effective stress model
"""
porepres = self.get_porepres_at_time(time+dtime/2.)
stress[[0,1,2]] -= porepres
statev[0] = porepres
return None
def posteval(self, kappa, time, dtime, temp, dtemp, F0, F, strain, d,
stress, statev, **kwds):
porepres = self.get_porepres_at_time(time+dtime/2.)
stress[[0,1,2]] += porepres
statev[0] = porepres
return None
|
from __future__ import division, unicode_literals, print_function
from django.core.management import BaseCommand
from applications.posts.models import Post, Category
def smart_print(text):
print(text.encode("utf-8"))
class Command(BaseCommand):
def handle(self, *args, **options):
posts = Post.objects.all()
categories = Category.objects.all()
for category in categories:
for post in posts:
if len(post.content.split(category.name)) > 10:
smart_print("%s 归于 %s" % (post.title, category.name))
post.categories.add(category)
#self.auto_rating(posts)
def auto_rating(self, posts):
for post in posts:
rating = int(len(post.content) / 300.0)
post.rating = rating
post.save()
smart_print("%s %d分" % (post.title, post.rating))
|
"""Module Description
Copyright (c) 2008 H. Gene Shin <shin@jimmy.harvard.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: H. Gene Shin
@contact: shin@jimmy.harvard.edu
"""
import sys
from array import *
from bisect import *
from Cistrome.Assoc.inout import *
class Sampler:
"""Abstract class Sampler"""
def __init__(self,name=''):
self.__name__=name
# for C elegans
self.standard_chroms={'I':'chrI','II':'chrII','III':'chrIII','IV':'chrIV','V':'chrV','M':'chrM','X':'chrX'}
def sample(self):
"""Doing sampling"""
pass
def get_name(self):
"""Return the name of the sampler"""
return self.__name__
def set_name(self,name):
"""Set the name of the sampler"""
self.__name__=name
class GenomeSampler(Sampler):
def __init__(self,name=''):
"""Constructor"""
Sampler.__init__(self,name)
def sample(self,wig=None,resolution=100):
"""Doing sampling"""
try:
chroms=wig.get_chroms()
except AttributeError:
raise Exception("Argument 'wig' must be given")
coordinates={}
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except KeyError:
standardchrom=chrom
wigcoord=wig[chrom][0]
coordinates[standardchrom]=[]
for wc in wigcoord:
coordinate=(int(round(1.0*wc/resolution)))*resolution+1
if not coordinates[standardchrom] or coordinate!=coordinates[standardchrom][-1]:
coordinates[standardchrom].append(coordinate)
return coordinates
class ChIPSampler(Sampler):
"""Class ChIPSampler"""
def __init__(self,name=''):
"""Constructor"""
Sampler.__init__(self,name='')
# for C elegans
self.standard_chroms={'I':'chrI','II':'chrII','III':'chrIII','IV':'chrIV','V':'chrV','M':'chrM','X':'chrX'}
def sample(self,bed=None,resolution=600):
"""Doing sampling"""
try:
chroms=bed.get_chroms()
except AttributeError:
raise Exception("Argument 'bed' must be given")
coordinates={}
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except KeyError:
standardchrom=chrom
coordinates[standardchrom]=[]
ChIP=zip(bed[chrom]['start'],bed[chrom]['end'],map(lambda x,y,:(x+y)/2,bed[chrom]['start'],bed[chrom]['end']))
howmanyChIPs=len(ChIP)
for i in xrange(0,howmanyChIPs):
# get the begining, end, and center of a peak
beg,end,center=ChIP[i]
### When using the real locations of beg
Ns=range(center,max(0,beg-1),-1*resolution)
Ns.reverse()
Ns+=range(center+resolution,end+1,resolution)
if Ns: coordinates[standardchrom].extend(Ns)
coordinates[standardchrom].sort()
return coordinates
class WigSampler(Sampler):
"""Class WigSampler
This class samples a wig file (object)
"""
def __init__(self,name=''):
"""Constructor"""
Sampler.__init__(self,name='')
def sample(self, wig, resolution):
"""Sample a wig file at the given resolution.
Parameters:
1. wig: a wig object (see inout.py)
2. resolution: sampling resolution
Return:
sampWig: the sampled wig object
"""
# parameter checking
try:
chroms=wig.get_chroms()
except AttributeError:
raise Exception("Argument 'wig' must be given")
sampWig=Wig()
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except KeyError:
standardchrom=chrom
samp=[array('l',[]),array('d',[])]
for wc,val in itertools.izip(wig[chrom][0],wig[chrom][1]):
coordinate=(int(round(1.0*wc/resolution)))*resolution+1
if len(samp[0])==0:
samp[0].append(coordinate)
samp[1].append(val)
continue
if coordinate!=samp[0][-1]:
samp[0].append(coordinate)
samp[1].append(val)
#added to sampWig only if there are some point(s)
if samp[0]: sampWig.wig[standardchrom]=samp
return sampWig
class WigSamplerFast(Sampler):
"""Class WigSamplerFast
This class samples a wig file (object) more fast. However, this sampler cannot guarantee very exact sampling.
"""
def __init__(self,name=''):
"""Constructor"""
Sampler.__init__(self,name='')
def sample(self, wig, resolution):
"""Sample a wig file at the given resolution.
Parameters:
1. wig: a wig object (see inout.py)
2. resolution: sampling resolution
Return:
sampWig: the sampled wig object
"""
# parameter checking
try:
chroms=wig.get_chroms()
except AttributeError:
raise Exception("Argument 'wig' must be given")
sampWig=Wig()
for chrom in chroms:
try:
standardchrom=self.standard_chroms[chrom]
except KeyError:
standardchrom=chrom
try:
start = wig[chrom][0][0]
end = wig[chrom][0][-1]
except IndexError:
continue
samp=[array('l',[]),array('d',[])]
cor = wig[chrom][0]
val = wig[chrom][1]
init = 0
prev = -1000
for sc in xrange(start, end, resolution):
# get the closest one to the sampled point and save
gotya = bisect_left(cor[init:], sc)
if prev == (init+gotya): continue
else: prev = (init+gotya)
try:
samp[0].append(cor[init+gotya])
samp[1].append(val[init+gotya])
init += gotya
except IndexError:
continue
#added to sampWig only if there are some point(s)
if samp[0]: sampWig.wig[standardchrom]=samp
return sampWig
def fillupwig(wig,resolution,fillupval=0):
"""Fill up wig with a given value at the resolution
Parameters:
1. wig: a Wig object (see inout.py).
This Wig object is expected to be already sampled by WigSampler at a regular interval.
2. resolution: sampling resolution
3. fillupval: fill-up value, by default 0
Return:
1. fillupWig: filled-up Wig object
"""
fillupWig=Wig()
for chrom in wig.get_chroms():
# check the chromosome is empty or only single element
if len(wig[chrom][0])== 0 or len(wig[chrom][0])== 1:
fillupWig[chrom]=wig[chrom][:]
break
cs=wig[chrom][0]
vs=wig[chrom][1]
ncs=[cs[0]]
nvs=[vs[0]]
pc=cs[0]
pv=vs[0]
for c,v in itertools.izip(cs[1:],vs[1:]):
if c-pc > resolution:
a=range(pc+resolution,c,resolution)
ncs+=a
nvs+=[0.0]*len(a)
else:
ncs.append(c)
nvs.append(v)
pc=c
pv=v
fillupWig.wig[chrom]=[array('l',ncs), array('d',nvs)]
return fillupWig
|
from datetime import datetime
import time
from optparse import make_option
import sys
from django.core.management.base import NoArgsCommand
import simplejson
from pillowtop.couchdb import CachedCouchDB
CHUNK_SIZE = 500
POOL_SIZE = 15
MAX_TRIES = 10
RETRY_DELAY = 60
RETRY_TIME_DELAY_FACTOR = 15
class PtopReindexer(NoArgsCommand):
help = "View based elastic reindexer"
option_list = NoArgsCommand.option_list + (
make_option('--resume',
action='store_true',
dest='resume',
default=False,
help='Resume, do not delete existing index data'),
make_option('--bulk',
action='store_true',
dest='bulk',
default=False,
help='Do a bulk load'),
make_option('--sequence',
type="int",
action='store',
dest='seq',
default=0,
help='Sequence id to resume from'),
make_option('--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings?!?!'),
make_option('--runfile',
action='store',
dest='runfile',
help='Previous run input file prefix',
default=None,),
make_option('--chunk',
action='store',
type='int',
dest='chunk_size',
default=CHUNK_SIZE,
help='Previous run input file prefix',),
)
doc_class = None
view_name = None
couch_key = None
pillow_class = None # the pillow where the main indexing logic is
indexing_pillow_class = None # the pillow that points to the index you want to index. By default this == self.pillow_class
file_prefix = "ptop_fast_reindex_"
own_index_exists = True
def __init__(self):
super(PtopReindexer, self).__init__()
if not getattr(self, "indexing_pillow_class", None):
self.indexing_pillow_class = self.pillow_class
def custom_filter(self, view_row):
"""
Custom filter if you want to do additional filtering based on the view
Return true if to index, false if to SKIP
"""
return True
def get_extra_view_kwargs(self):
return {}
def get_seq_prefix(self):
if hasattr(self, '_seq_prefix'):
datestring = self._seq_prefix
else:
datestring = datetime.now().strftime("%Y-%m-%d-%H%M")
self._seq_prefix = datestring
return datestring
def set_seq_prefix(self, prefix):
self._seq_prefix = prefix
def get_seq_filename(self):
#print "Run file prefix: ptop_fast_reindex_%s_%s" % (self.doc_class.__name__, datestring)
seq_filename = "%s%s_%s_seq.txt" % (self.file_prefix, self.pillow_class.__name__, self.get_seq_prefix())
return seq_filename
def get_dump_filename(self):
view_dump_filename = "%s%s_%s_data.json" % (self.file_prefix, self.pillow_class.__name__, self.get_seq_prefix())
return view_dump_filename
def full_couch_view_iter(self):
start_seq = 0
if hasattr(self.pillow, 'include_docs_when_preindexing'):
include_docs = self.pillow.include_docs_when_preindexing
else:
include_docs = self.pillow.include_docs
view_kwargs = {"include_docs": include_docs}
if self.couch_key is not None:
view_kwargs["key"] = self.couch_key
view_kwargs.update(self.get_extra_view_kwargs())
view_chunk = self.db.view(
self.view_name,
reduce=False,
limit=self.chunk_size * self.chunk_size,
skip=start_seq,
**view_kwargs
)
while len(view_chunk) > 0:
for item in view_chunk:
yield item
start_seq += self.chunk_size * self.chunk_size
view_chunk = self.db.view(self.view_name,
reduce=False,
limit=self.chunk_size * self.chunk_size,
skip=start_seq,
**view_kwargs
)
def load_from_view(self):
"""
Loads entire view, saves to file, set pillowtop checkpoint
"""
# Set pillowtop checkpoint for doc_class
# though this might cause some superfluous reindexes of docs,
# we're going to set the checkpoint BEFORE we start our operation so that any changes
# that happen to cases while we're doing our reindexing would not get skipped once we
# finish.
current_db_seq = self.pillow.couch_db.info()['update_seq']
self.pillow.set_checkpoint({'seq': current_db_seq})
#Write sequence file to disk
with open(self.get_seq_filename(), 'w') as fout:
fout.write(str(current_db_seq))
#load entire view to disk
print "Getting full view list: %s" % datetime.utcnow().isoformat()
with open(self.get_dump_filename(), 'w') as fout:
fout.write('\n'.join(simplejson.dumps(row) for row in self.full_couch_view_iter()))
print "View and sequence written to disk: %s" % datetime.utcnow().isoformat()
def load_seq_from_disk(self):
"""
Main load of view data from disk.
"""
print "Loading from disk: %s" % datetime.utcnow().isoformat()
with open(self.get_seq_filename(), 'r') as fin:
current_db_seq = fin.read()
self.pillow.set_checkpoint({'seq': current_db_seq})
def view_data_file_iter(self):
with open(self.get_dump_filename(), 'r') as fin:
for line in fin:
yield simplejson.loads(line)
def _bootstrap(self, options):
self.resume = options['resume']
self.bulk = options['bulk']
self.pillow = self.pillow_class()
self.indexing_pillow = self.indexing_pillow_class()
self.db = self.doc_class.get_db()
self.runfile = options['runfile']
self.chunk_size = options.get('chunk_size', CHUNK_SIZE)
self.start_num = options.get('seq', 0)
def handle(self, *args, **options):
if not options['noinput']:
confirm = raw_input("""
### %s Fast Reindex !!! ###
You have requested to do an elastic index reset via fast track.
This will IRREVERSIBLY REMOVE
ALL index data in the case index and will take a while to reload.
Are you sure you want to do this. Also you MUST have run_ptop disabled for this to run.
Type 'yes' to continue, or 'no' to cancel: """ % self.indexing_pillow_class.__name__)
if confirm != 'yes':
print "\tReset cancelled."
return
confirm_ptop = raw_input("""\tAre you sure you disabled run_ptop? """)
if confirm_ptop != "yes":
return
confirm_alias = raw_input("""\tAre you sure you are not blowing away a production index? """)
if confirm_alias != "yes":
return
self._bootstrap(options)
start = datetime.utcnow()
print "using chunk size %s" % self.chunk_size
if not self.resume:
if self.own_index_exists:
#delete the existing index.
print "Deleting index"
self.indexing_pillow.delete_index()
print "Recreating index"
self.indexing_pillow.create_index()
self.indexing_pillow.seen_types = {}
self.load_from_view()
else:
if self.runfile is None:
print "\tNeed a previous runfile prefix to access older snapshot of view. eg. ptop_fast_reindex_%s_yyyy-mm-dd-HHMM" % self.pillow_class.__name__
sys.exit()
print "Starting fast tracked reindexing from view position %d" % self.start_num
runparts = self.runfile.split('_')
print runparts
if len(runparts) != 5 or not self.runfile.startswith('ptop_fast_reindex'):
print "\tError, runpart name must be in format ptop_fast_reindex_%s_yyyy-mm-dd-HHMM"
sys.exit()
self.set_seq_prefix(runparts[-1])
seq = self.load_seq_from_disk()
#configure index to indexing mode
self.indexing_pillow.set_index_reindex_settings()
if self.bulk:
print "Preparing Bulk Payload"
self.load_bulk()
else:
print "Loading traditional method"
self.load_traditional()
end = datetime.utcnow()
print "setting index settings to normal search configuration and refreshing index"
self.indexing_pillow.set_index_normal_settings()
self.indexing_pillow.refresh_index()
print "done in %s seconds" % (end - start).seconds
def process_row(self, row, count):
if count >= self.start_num:
retries = 0
while retries < MAX_TRIES:
try:
if not self.custom_filter(row):
break
self.pillow.processor(row, do_set_checkpoint=False)
break
except Exception, ex:
retries += 1
print "\tException sending single item %s, %s, retrying..." % (row['id'], ex)
time.sleep(RETRY_DELAY + retries * RETRY_TIME_DELAY_FACTOR)
else:
print "\tskipping... %d < %d" % (count, self.start_num)
def load_traditional(self):
"""
Iterative view indexing - use --bulk for faster reindex.
:return:
"""
for ix, item in enumerate(self.full_couch_view_iter()):
print "\tProcessing item %s (%d)" % (item['id'], ix)
self.process_row(item, ix)
def load_bulk(self):
start = self.start_num
end = start + self.chunk_size
json_iter = self.view_data_file_iter()
bulk_slice = []
self.pillow.couch_db = CachedCouchDB(self.pillow.document_class.get_db().uri,
readonly=True)
for curr_counter, json_doc in enumerate(json_iter):
if curr_counter < start:
continue
else:
bulk_slice.append(json_doc)
if len(bulk_slice) == self.chunk_size:
self.send_bulk(bulk_slice, start, end)
bulk_slice = []
start += self.chunk_size
end += self.chunk_size
self.send_bulk(bulk_slice, start, end)
def send_bulk(self, slice, start, end):
doc_ids = [x['id'] for x in slice]
self.pillow.couch_db.bulk_load(doc_ids, purge_existing=True)
filtered_ids = set([d['_id'] for d in filter(self.custom_filter, self.pillow.couch_db.get_all())])
filtered_slice = filter(lambda change: change['id'] in filtered_ids, slice)
retries = 0
bulk_start = datetime.utcnow()
while retries < MAX_TRIES:
try:
self.pillow.process_bulk(filtered_slice)
break
except Exception, ex:
retries += 1
retry_time = (datetime.utcnow() - bulk_start).seconds + retries * RETRY_TIME_DELAY_FACTOR
print "\t%s: Exception sending slice %d:%d, %s, retrying in %s seconds" % (datetime.now().isoformat(), start, end, ex, retry_time)
time.sleep(retry_time)
print "\t%s: Retrying again %d:%d..." % (datetime.now().isoformat(), start, end)
bulk_start = datetime.utcnow() #reset timestamp when looping again
|
import sys
import os
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.db import connections, DEFAULT_DB_ALIAS, migrations
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.autodetector import MigrationAutodetector, InteractiveMigrationQuestioner
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.writer import MigrationWriter
from django.db.models.loading import cache
from django.db.migrations.optimizer import MigrationOptimizer
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--no-optimize', action='store_true', dest='no_optimize', default=False,
help='Do not try to optimize the squashed operations.'),
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
)
help = "Squashes an existing set of migrations (from first until specified) into a single new one."
usage_str = "Usage: ./manage.py squashmigrations app migration_name"
def handle(self, app_label=None, migration_name=None, **options):
self.verbosity = int(options.get('verbosity'))
self.interactive = options.get('interactive')
if app_label is None or migration_name is None:
self.stderr.write(self.usage_str)
sys.exit(1)
# Load the current graph state, check the app and migration they asked for exists
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations (so squashmigrations on it makes no sense)" % app_label)
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (app_label, migration_name))
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (app_label, migration_name))
# Work out the list of predecessor migrations
migrations_to_squash = [
executor.loader.get_migration(al, mn)
for al, mn in executor.loader.graph.forwards_plan((migration.app_label, migration.name))
if al == migration.app_label
]
# Tell them what we're doing and optionally ask if we should proceed
if self.verbosity > 0 or self.interactive:
self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:"))
for migration in migrations_to_squash:
self.stdout.write(" - %s" % migration.name)
if self.interactive:
answer = None
while not answer or answer not in "yn":
answer = six.moves.input("Do you wish to proceed? [yN] ")
if not answer:
answer = "n"
break
else:
answer = answer[0].lower()
if answer != "y":
return
# Load the operations from all those migrations and concat together
operations = []
for smigration in migrations_to_squash:
operations.extend(smigration.operations)
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Optimizing..."))
optimizer = MigrationOptimizer()
new_operations = optimizer.optimize(operations, migration.app_label)
if self.verbosity > 0:
if len(new_operations) == len(operations):
self.stdout.write(" No optimizations possible.")
else:
self.stdout.write(" Optimized from %s operations to %s operations." % (len(operations), len(new_operations)))
# Make a new migration with those operations
subclass = type("Migration", (migrations.Migration, ), {
"dependencies": [],
"operations": new_operations,
"replaces": [(m.app_label, m.name) for m in migrations_to_squash],
})
new_migration = subclass("0001_squashed_%s" % migration.name, app_label)
# Write out the new migration file
writer = MigrationWriter(new_migration)
with open(writer.path, "wb") as fh:
fh.write(writer.as_string())
if self.verbosity > 0:
self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path))
self.stdout.write(" You should commit this migration but leave the old ones in place;")
self.stdout.write(" the new migration will be used for new installs. Once you are sure")
self.stdout.write(" all instances of the codebase have applied the migrations you squashed,")
self.stdout.write(" you can delete them.")
|
from argparse import ArgumentParser
from sys import version_info
_NO_FUNC = object()
__all__ = ['App']
if version_info[0] >= 32:
text_type = basestring
else:
text_type = str
class App(object):
"""
Simple command line application.
Constructor arguments are propagated to :py:class:`ArgumentParser`.
"""
def __init__(self, *args, **kwargs):
self._parser = ArgumentParser(*args, **kwargs)
self._global_args = []
self._subparsers = self._parser.add_subparsers(title="Subcommands")
self._pending_args = []
self._defaults = {}
def arg(self, *args, **kwargs):
"""Add a global application argument.
All arguments are passed on to :py:meth:`ArgumentParser.add_argument`.
"""
self._global_args.append((args, kwargs))
return self._parser.add_argument(*args, **kwargs)
def defaults(self, **kwargs):
"""Set global defaults.
All arguments are passed on to :py:meth:`ArgumentParser.set_defaults`.
"""
return self._parser.set_defaults(**kwargs)
def cmd(self, _func=_NO_FUNC, name=None, alias=None, *args, **kwargs):
"""
Decorator to create a command line subcommand for a function.
By default, the name of the decorated function is used as the
name of the subcommand, but this can be overridden by specifying
the `name` argument. Aliases are also possible via the alias
argument, which takes either a string or a tuple/list of
strings.
Additional arguments are passed to the subcommand's
:py:class:`ArgumentParser`.
"""
if _func is not _NO_FUNC:
# Support for using this decorator without calling it, e.g.
# @app.cmd <---- note: no parentheses here!
# def foo(): pass
return self.cmd()(_func)
parser_args = args
parser_kwargs = kwargs
def wrapper(func):
subcommand = name if name is not None else func.__name__
parser_kwargs.setdefault('help', "") # improves --help output
subparser = self._subparsers.add_parser(
subcommand, *parser_args, **parser_kwargs)
# Register aliases, if specified
if alias:
# Accept both a tuple/list or a single string
if isinstance(alias, text_type):
aliases = [alias]
else:
aliases = alias
assert isinstance(aliases, (tuple, list))
# XXX: poking inside argparse internals is a ugly
parser_map = self._subparsers._name_parser_map
for subcommand_alias in aliases:
parser_map[subcommand_alias] = parser_map[subcommand]
# Add any pending arguments
for args, kwargs in self._pending_args:
subparser.add_argument(*args, **kwargs)
self._pending_args = []
# Add any pending default values
try:
pending_defaults = self._defaults.pop(None)
except KeyError:
pass # no pending defaults
else:
self._defaults[func] = pending_defaults
# Store callback function and return the decorated function
# unmodified
subparser.set_defaults(_func=func)
return func
return wrapper
def cmd_arg(self, *args, **kwargs):
"""Decorator to specify a command line argument for a subcommand.
All arguments are passed on to :py:meth:`ArgumentParser.add_argument`.
Note: this function must be used in conjunction with .cmd().
"""
# TODO: perhaps add a 'group' argument to cmd_arg() that
# translates to add_argument_group
if len(args) == 1 and callable(args[0]) and not kwargs:
raise TypeError("cmd_arg() decorator requires arguments, "
"but none were supplied")
# Remember the passed args, since the command is not yet known
# when this decorator is called.
self._pending_args.append((args, kwargs))
# Return a do-nothing decorator
return lambda func: func
def cmd_defaults(self, **kwargs):
"""Decorator to specify defaults for a subcommand.
This can be useful to override global argument defaults for specific
subcommands.
All arguments are passed on to :py:meth:`ArgumentParser.set_defaults`.
Note: this function must be used in conjunction with .cmd().
"""
if len(kwargs) == 1 and callable(list(kwargs.values())[0]):
raise TypeError("defaults() decorator requires arguments, "
"but none were supplied")
# Work-around http://bugs.python.org/issue9351 by storing the
# defaults outside the ArgumentParser. The special key "None" is
# used for the pending defaults for a yet-to-be defined command.
self._defaults[None] = kwargs
return lambda func: func
def run(self, args=None, namespace=None):
"""Run the application.
This method parses the arguments and takes the appropriate actions. If
a valid subcommand was found, it will be executed and its return value
will be returned.
All arguments are passed on to :py:meth:`ArgumentParser.parse_args`.
"""
if self._pending_args:
raise TypeError("cmd_arg() called without matching cmd()")
if None in self._defaults:
raise TypeError("cmd_defaults() called without matching cmd()")
kwargs = vars(self._parser.parse_args(args=args, namespace=namespace))
sentinel = object()
func = kwargs.pop('_func', sentinel)
if func is sentinel:
self._parser.error("too few arguments")
if func in self._defaults:
kwargs.update(self._defaults[func])
return func(**kwargs)
|
from .a import App, Other
@App.foo(name="alpha")
def f():
pass
@App.foo(name="beta")
def g():
pass
@App.foo(name="gamma")
def h():
pass
@Other.foo(name="alpha")
def i():
pass
|
import re
import numpy as np
import datetime
from ..base import BaseRaw
from ..meas_info import create_info, _format_dig_points
from ..utils import _mult_cal_one
from ...annotations import Annotations
from ...utils import logger, verbose, fill_doc, warn, _check_fname
from ...utils.check import _require_version
from ..constants import FIFF
from .._digitization import _make_dig_points
from ...transforms import _frame_to_str, apply_trans
from ..nirx.nirx import _convert_fnirs_to_head
from ..._freesurfer import get_mni_fiducials
@fill_doc
def read_raw_snirf(fname, optode_frame="unknown", preload=False, verbose=None):
"""Reader for a continuous wave SNIRF data.
.. note:: This reader supports the .snirf file type only,
not the .jnirs version.
Files with either 3D or 2D locations can be read.
However, we strongly recommend using 3D positions.
If 2D positions are used the behaviour of MNE functions
can not be guaranteed.
Parameters
----------
fname : str
Path to the SNIRF data file.
optode_frame : str
Coordinate frame used for the optode positions. The default is unknown,
in which case the positions are not modified. If a known coordinate
frame is provided (head, meg, mri), then the positions are transformed
in to the Neuromag head coordinate frame (head).
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawSNIRF
A Raw object containing fNIRS data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawSNIRF(fname, optode_frame, preload, verbose)
def _open(fname):
return open(fname, 'r', encoding='latin-1')
@fill_doc
class RawSNIRF(BaseRaw):
"""Raw object from a continuous wave SNIRF file.
Parameters
----------
fname : str
Path to the SNIRF data file.
optode_frame : str
Coordinate frame used for the optode positions. The default is unknown,
in which case the positions are not modified. If a known coordinate
frame is provided (head, meg, mri), then the positions are transformed
in to the Neuromag head coordinate frame (head).
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, fname, optode_frame="unknown",
preload=False, verbose=None):
_require_version('h5py', 'read raw SNIRF data')
from ...externals.pymatreader.utils import _import_h5py
# Must be here due to circular import error
from ...preprocessing.nirs import _validate_nirs_info
h5py = _import_h5py()
fname = _check_fname(fname, 'read', True, 'fname')
logger.info('Loading %s' % fname)
with h5py.File(fname, 'r') as dat:
if 'data2' in dat['nirs']:
warn("File contains multiple recordings. "
"MNE does not support this feature. "
"Only the first dataset will be processed.")
if np.array(dat.get('nirs/data1/measurementList1/dataType')) != 1:
raise RuntimeError('File does not contain continuous wave '
'data. MNE only supports reading continuous'
' wave amplitude SNIRF files. Expected type'
' code 1 but received type code %d' %
(np.array(dat.get(
'nirs/data1/measurementList1/dataType'
))))
last_samps = dat.get('/nirs/data1/dataTimeSeries').shape[0] - 1
samplingrate_raw = np.array(dat.get('nirs/data1/time'))
sampling_rate = 0
if samplingrate_raw.shape == (2, 1):
# specified as onset/samplerate
warn("Onset/sample rate SNIRF not yet supported.")
else:
# specified as time points
fs_diff = np.around(np.diff(samplingrate_raw), decimals=4)
if len(np.unique(fs_diff)) == 1:
# Uniformly sampled data
sampling_rate = 1. / np.unique(fs_diff)
else:
# print(np.unique(fs_diff))
warn("Non uniform sampled data not supported.")
if sampling_rate == 0:
warn("Unable to extract sample rate from SNIRF file.")
# Extract wavelengths
fnirs_wavelengths = np.array(dat.get('nirs/probe/wavelengths'))
fnirs_wavelengths = [int(w) for w in fnirs_wavelengths]
if len(fnirs_wavelengths) != 2:
raise RuntimeError(f'The data contains '
f'{len(fnirs_wavelengths)}'
f' wavelengths: {fnirs_wavelengths}. '
f'MNE only supports reading continuous'
' wave amplitude SNIRF files '
'with two wavelengths.')
# Extract channels
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
channels = np.array([name for name in dat['nirs']['data1'].keys()])
channels_idx = np.array(['measurementList' in n for n in channels])
channels = channels[channels_idx]
channels = sorted(channels, key=natural_keys)
# Source and detector labels are optional fields.
# Use S1, S2, S3, etc if not specified.
if 'sourceLabels' in dat['nirs/probe']:
sources = np.array(dat.get('nirs/probe/sourceLabels'))
sources = [s.decode('UTF-8') for s in sources]
else:
sources = np.unique([dat.get('nirs/data1/' + c +
'/sourceIndex')[0]
for c in channels])
sources = [f"S{s}" for s in sources]
if 'detectorLabels' in dat['nirs/probe']:
detectors = np.array(dat.get('nirs/probe/detectorLabels'))
detectors = [d.decode('UTF-8') for d in detectors]
else:
detectors = np.unique([dat.get('nirs/data1/' + c +
'/detectorIndex')[0]
for c in channels])
detectors = [f"D{d}" for d in detectors]
# Extract source and detector locations
# 3D positions are optional in SNIRF,
# but highly recommended in MNE.
if ('detectorPos3D' in dat['nirs/probe']) &\
('sourcePos3D' in dat['nirs/probe']):
# If 3D positions are available they are used even if 2D exists
detPos3D = np.array(dat.get('nirs/probe/detectorPos3D'))
srcPos3D = np.array(dat.get('nirs/probe/sourcePos3D'))
elif ('detectorPos2D' in dat['nirs/probe']) &\
('sourcePos2D' in dat['nirs/probe']):
warn('The data only contains 2D location information for the '
'optode positions. '
'It is highly recommended that data is used '
'which contains 3D location information for the '
'optode positions. With only 2D locations it can not be '
'guaranteed that MNE functions will behave correctly '
'and produce accurate results. If it is not possible to '
'include 3D positions in your data, please consider '
'using the set_montage() function.')
detPos2D = np.array(dat.get('nirs/probe/detectorPos2D'))
srcPos2D = np.array(dat.get('nirs/probe/sourcePos2D'))
# Set the third dimension to zero. See gh#9308
detPos3D = np.append(detPos2D,
np.zeros((detPos2D.shape[0], 1)), axis=1)
srcPos3D = np.append(srcPos2D,
np.zeros((srcPos2D.shape[0], 1)), axis=1)
else:
raise RuntimeError('No optode location information is '
'provided. MNE requires at least 2D '
'location information')
assert len(sources) == srcPos3D.shape[0]
assert len(detectors) == detPos3D.shape[0]
chnames = []
for chan in channels:
src_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/sourceIndex'))[0])
det_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/detectorIndex'))[0])
wve_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/wavelengthIndex'))[0])
ch_name = sources[src_idx - 1] + '_' +\
detectors[det_idx - 1] + ' ' +\
str(fnirs_wavelengths[wve_idx - 1])
chnames.append(ch_name)
# Create mne structure
info = create_info(chnames,
sampling_rate,
ch_types='fnirs_cw_amplitude')
subject_info = {}
names = np.array(dat.get('nirs/metaDataTags/SubjectID'))
subject_info['first_name'] = names[0].decode('UTF-8')
# Read non standard (but allowed) custom metadata tags
if 'lastName' in dat.get('nirs/metaDataTags/'):
ln = dat.get('/nirs/metaDataTags/lastName')[0].decode('UTF-8')
subject_info['last_name'] = ln
if 'middleName' in dat.get('nirs/metaDataTags/'):
m = dat.get('/nirs/metaDataTags/middleName')[0].decode('UTF-8')
subject_info['middle_name'] = m
if 'sex' in dat.get('nirs/metaDataTags/'):
s = dat.get('/nirs/metaDataTags/sex')[0].decode('UTF-8')
if s in {'M', 'Male', '1', 'm'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE
elif s in {'F', 'Female', '2', 'f'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE
elif s in {'0', 'u'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN
# End non standard name reading
# Update info
info.update(subject_info=subject_info)
LengthUnit = np.array(dat.get('/nirs/metaDataTags/LengthUnit'))
LengthUnit = LengthUnit[0].decode('UTF-8')
scal = 1
if "cm" in LengthUnit:
scal = 100
elif "mm" in LengthUnit:
scal = 1000
srcPos3D /= scal
detPos3D /= scal
if optode_frame in ["mri", "meg"]:
# These are all in MNI or MEG coordinates, so let's transform
# them to the Neuromag head coordinate frame
srcPos3D, detPos3D, _, head_t = _convert_fnirs_to_head(
'fsaverage', optode_frame, 'head', srcPos3D, detPos3D, [])
else:
head_t = np.eye(4)
if optode_frame in ["head", "mri", "meg"]:
# Then the transformation to head was performed above
coord_frame = FIFF.FIFFV_COORD_HEAD
elif 'MNE_coordFrame' in dat.get('nirs/metaDataTags/'):
coord_frame = int(dat.get('/nirs/metaDataTags/MNE_coordFrame')
[0])
else:
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
for idx, chan in enumerate(channels):
src_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/sourceIndex'))[0])
det_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/detectorIndex'))[0])
wve_idx = int(np.array(dat.get('nirs/data1/' +
chan + '/wavelengthIndex'))[0])
info['chs'][idx]['loc'][3:6] = srcPos3D[src_idx - 1, :]
info['chs'][idx]['loc'][6:9] = detPos3D[det_idx - 1, :]
# Store channel as mid point
midpoint = (info['chs'][idx]['loc'][3:6] +
info['chs'][idx]['loc'][6:9]) / 2
info['chs'][idx]['loc'][0:3] = midpoint
info['chs'][idx]['loc'][9] = fnirs_wavelengths[wve_idx - 1]
info['chs'][idx]['coord_frame'] = coord_frame
if 'landmarkPos3D' in dat.get('nirs/probe/'):
diglocs = np.array(dat.get('/nirs/probe/landmarkPos3D'))
digname = np.array(dat.get('/nirs/probe/landmarkLabels'))
nasion, lpa, rpa, hpi = None, None, None, None
extra_ps = dict()
for idx, dign in enumerate(digname):
if dign == b'LPA':
lpa = diglocs[idx, :3]
elif dign == b'NASION':
nasion = diglocs[idx, :3]
elif dign == b'RPA':
rpa = diglocs[idx, :3]
else:
extra_ps[f'EEG{len(extra_ps) + 1:03d}'] = \
diglocs[idx, :3]
dig = _make_dig_points(
nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi,
dig_ch_pos=extra_ps,
coord_frame=_frame_to_str[coord_frame])
else:
ch_locs = [info['chs'][idx]['loc'][0:3]
for idx in range(len(channels))]
# Set up digitization
dig = get_mni_fiducials('fsaverage', verbose=False)
for fid in dig:
fid['r'] = apply_trans(head_t, fid['r'])
fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD
for ii, ch_loc in enumerate(ch_locs, 1):
dig.append(dict(
kind=FIFF.FIFFV_POINT_EEG, # misnomer prob okay
r=ch_loc,
ident=ii,
coord_frame=FIFF.FIFFV_COORD_HEAD,
))
dig = _format_dig_points(dig)
del head_t
with info._unlock():
info['dig'] = dig
str_date = np.array((dat.get(
'/nirs/metaDataTags/MeasurementDate')))[0].decode('UTF-8')
str_time = np.array((dat.get(
'/nirs/metaDataTags/MeasurementTime')))[0].decode('UTF-8')
str_datetime = str_date + str_time
# Several formats have been observed so we try each in turn
for dt_code in ['%Y-%m-%d%H:%M:%SZ',
'%Y-%m-%d%H:%M:%S']:
try:
meas_date = datetime.datetime.strptime(
str_datetime, dt_code)
except ValueError:
pass
else:
break
else:
warn("Extraction of measurement date from SNIRF file failed. "
"The date is being set to January 1st, 2000, "
f"instead of {str_datetime}")
meas_date = datetime.datetime(2000, 1, 1, 0, 0, 0)
meas_date = meas_date.replace(tzinfo=datetime.timezone.utc)
with info._unlock():
info['meas_date'] = meas_date
if 'DateOfBirth' in dat.get('nirs/metaDataTags/'):
str_birth = np.array((dat.get('/nirs/metaDataTags/'
'DateOfBirth')))[0].decode()
birth_matched = re.fullmatch(r'(\d+)-(\d+)-(\d+)', str_birth)
if birth_matched is not None:
birthday = (int(birth_matched.groups()[0]),
int(birth_matched.groups()[1]),
int(birth_matched.groups()[2]))
with info._unlock():
info["subject_info"]['birthday'] = birthday
super(RawSNIRF, self).__init__(info, preload, filenames=[fname],
last_samps=[last_samps],
verbose=verbose)
# Extract annotations
annot = Annotations([], [], [])
for key in dat['nirs']:
if 'stim' in key:
data = np.atleast_2d(np.array(
dat.get('/nirs/' + key + '/data')))
if data.size > 0:
desc = dat.get('/nirs/' + key + '/name')[0]
annot.append(data[:, 0], 1.0, desc.decode('UTF-8'))
self.set_annotations(annot)
# Reorder channels to match expected ordering in MNE
num_chans = len(self.ch_names)
chans = []
for idx in range(num_chans // 2):
chans.append(idx)
chans.append(idx + num_chans // 2)
self.pick(picks=chans)
# Validate that the fNIRS info is correctly formatted
_validate_nirs_info(self.info)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
import h5py
with h5py.File(self._filenames[0], 'r') as dat:
one = dat['/nirs/data1/dataTimeSeries'][start:stop].T
_mult_cal_one(data, one, idx, cals, mult)
|
import os, sys
import math
import urllib
import urllib2
import tempfile
import StringIO
import operator
import base64
import posixpath
import os.path as systempath
import zipfile
import shutil
from hashlib import md5
from datetime import datetime
from time import strftime, localtime
from re import sub, compile, MULTILINE
from urlparse import urlparse, urljoin
from operator import lt, le, eq, ge, gt
def _relpath(path, start=posixpath.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
if sys.hexversion >= 0x020600F0:
from httplib import HTTPConnection, HTTPSConnection
else:
posixpath.relpath = _relpath
from httplib import HTTPConnection as _HTTPConnection
from httplib import HTTPSConnection as _HTTPSConnection
import socket
def HTTPConnection(host, port=None, strict=None, timeout=None):
if timeout:
socket.setdefaulttimeout(timeout)
return _HTTPConnection(host, port=port, strict=strict)
def HTTPSConnection(host, port=None, strict=None, timeout=None):
if timeout:
socket.setdefaulttimeout(timeout)
return _HTTPSConnection(host, port=port, strict=strict)
from . import safe64, style, output, sources
from . import MAPNIK_VERSION, MAPNIK_VERSION_STR
from .nonposix import un_posix, to_posix
from .parse import stylesheet_declarations
from .style import uri
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = False
if not Image:
warn = 'Warning: PIL (Python Imaging Library) is required for proper handling of image symbolizers when using JPEG format images or not running Mapnik >=0.7.0\n'
sys.stderr.write(warn)
DEFAULT_ENCODING = 'utf-8'
try:
import xml.etree.ElementTree as ElementTree
from xml.etree.ElementTree import Element
except ImportError:
try:
import lxml.etree as ElementTree
from lxml.etree import Element
except ImportError:
import elementtree.ElementTree as ElementTree
from elementtree.ElementTree import Element
opsort = {lt: 1, le: 2, eq: 3, ge: 4, gt: 5}
opstr = {lt: '<', le: '<=', eq: '==', ge: '>=', gt: '>'}
VERBOSE = False
def msg(msg):
if VERBOSE:
sys.stderr.write('Cascadenik debug: %s\n' % msg)
counter = 0
def next_counter():
global counter
counter += 1
return counter
def url2fs(url):
""" encode a URL to be safe as a filename """
uri, extension = posixpath.splitext(url)
return safe64.dir(uri) + extension
def fs2url(url):
""" decode a filename to the URL it is derived from """
return safe64.decode(url)
def indent(elem, level=0):
""" http://infix.se/2007/02/06/gentlemen-indent-your-xml
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for e in elem:
indent(e, level+1)
if not e.tail or not e.tail.strip():
e.tail = i + " "
if not e.tail or not e.tail.strip():
e.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class Directories:
""" Holder for full paths to output and cache dirs.
"""
def __init__(self, output, cache, source):
self.output = posixpath.realpath(to_posix(output))
self.cache = posixpath.realpath(to_posix(cache))
scheme, n, path, p, q, f = urlparse(to_posix(source))
if scheme in ('http','https'):
self.source = source
elif scheme in ('file', ''):
# os.path (systempath) usage here is intentional...
self.source = 'file://' + to_posix(systempath.realpath(path))
assert self.source, "self.source does not exist: source was: %s" % source
def output_path(self, path_name):
""" Modify a path so it fits expectations.
Avoid returning relative paths that start with '../' and possibly
return relative paths when output and cache directories match.
"""
# make sure it is a valid posix format
path = to_posix(path_name)
assert (path == path_name), "path_name passed to output_path must be in posix format"
if posixpath.isabs(path):
if self.output == self.cache:
# worth seeing if an absolute path can be avoided
path = posixpath.relpath(path, self.output)
else:
return posixpath.realpath(path)
if path.startswith('../'):
joined = posixpath.join(self.output, path)
return posixpath.realpath(joined)
return path
class Range:
""" Represents a range for use in min/max scale denominator.
Ranges can have a left side, a right side, neither, or both,
with sides specified as inclusive or exclusive.
"""
def __init__(self, leftop=None, leftedge=None, rightop=None, rightedge=None):
assert leftop in (lt, le, eq, ge, gt, None)
assert rightop in (lt, le, eq, ge, gt, None)
self.leftop = leftop
self.rightop = rightop
self.leftedge = leftedge
self.rightedge = rightedge
def midpoint(self):
""" Return a point guranteed to fall within this range, hopefully near the middle.
"""
minpoint = self.leftedge
if self.leftop is gt:
minpoint += 1
maxpoint = self.rightedge
if self.rightop is lt:
maxpoint -= 1
if minpoint is None:
return maxpoint
elif maxpoint is None:
return minpoint
else:
return (minpoint + maxpoint) / 2
def isOpen(self):
""" Return true if this range has any room in it.
"""
if self.leftedge and self.rightedge and self.leftedge > self.rightedge:
return False
if self.leftedge == self.rightedge:
if self.leftop is gt or self.rightop is lt:
return False
return True
def toFilter(self, property):
""" Convert this range to a Filter with a tests having a given property.
"""
if self.leftedge == self.rightedge and self.leftop is ge and self.rightop is le:
# equivalent to ==
return Filter(style.SelectorAttributeTest(property, '=', self.leftedge))
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge),
style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge))
except KeyError:
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.rightop], self.rightedge))
except KeyError:
try:
return Filter(style.SelectorAttributeTest(property, opstr[self.leftop], self.leftedge))
except KeyError:
return Filter()
def __repr__(self):
"""
"""
if self.leftedge == self.rightedge and self.leftop is ge and self.rightop is le:
# equivalent to ==
return '(=%s)' % self.leftedge
try:
return '(%s%s ... %s%s)' % (self.leftedge, opstr[self.leftop], opstr[self.rightop], self.rightedge)
except KeyError:
try:
return '(... %s%s)' % (opstr[self.rightop], self.rightedge)
except KeyError:
try:
return '(%s%s ...)' % (self.leftedge, opstr[self.leftop])
except KeyError:
return '(...)'
class Filter:
""" Represents a filter of some sort for use in stylesheet rules.
Composed of a list of tests.
"""
def __init__(self, *tests):
self.tests = list(tests)
def isOpen(self):
""" Return true if this filter is not trivially false, i.e. self-contradictory.
"""
equals = {}
nequals = {}
for test in self.tests:
if test.op == '=':
if equals.has_key(test.property) and test.value != equals[test.property]:
# we've already stated that this arg must equal something else
return False
if nequals.has_key(test.property) and test.value in nequals[test.property]:
# we've already stated that this arg must not equal its current value
return False
equals[test.property] = test.value
if test.op == '!=':
if equals.has_key(test.property) and test.value == equals[test.property]:
# we've already stated that this arg must equal its current value
return False
if not nequals.has_key(test.property):
nequals[test.property] = set()
nequals[test.property].add(test.value)
return True
def clone(self):
"""
"""
return Filter(*self.tests[:])
def minusExtras(self):
""" Return a new Filter that's equal to this one,
without extra terms that don't add meaning.
"""
assert self.isOpen()
trimmed = self.clone()
equals = {}
for test in trimmed.tests:
if test.op == '=':
equals[test.property] = test.value
extras = []
for (i, test) in enumerate(trimmed.tests):
if test.op == '!=' and equals.has_key(test.property) and equals[test.property] != test.value:
extras.append(i)
while extras:
trimmed.tests.pop(extras.pop())
return trimmed
def __repr__(self):
"""
"""
return ''.join(map(repr, sorted(self.tests)))
def __cmp__(self, other):
"""
"""
# get the scale tests to the front of the line, followed by regular alphabetical
key_func = lambda t: (not t.isMapScaled(), t.property, t.op, t.value)
# extract tests into cleanly-sortable tuples
self_tuples = [(t.property, t.op, t.value) for t in sorted(self.tests, key=key_func)]
other_tuples = [(t.property, t.op, t.value) for t in sorted(other.tests, key=key_func)]
return cmp(self_tuples, other_tuples)
def test_ranges(tests):
""" Given a list of tests, return a list of Ranges that fully describes
all possible unique ranged slices within those tests.
This function was hard to write, it should be hard to read.
TODO: make this work for <= following by >= in breaks
"""
if len(tests) == 0:
return [Range()]
assert 1 == len(set(test.property for test in tests)), 'All tests must share the same property'
assert True in [test.isRanged() for test in tests], 'At least one test must be ranged'
assert False not in [test.isNumeric() for test in tests], 'All tests must be numeric'
repeated_breaks = []
# start by getting all the range edges from the selectors into a list of break points
for test in tests:
repeated_breaks.append(test.rangeOpEdge())
# from here on out, *order will matter*
# it's expected that the breaks will be sorted from minimum to maximum,
# with greater/lesser/equal operators accounted for.
repeated_breaks.sort(key=lambda (o, e): (e, opsort[o]))
breaks = []
# next remove repetitions from the list
for (i, (op, edge)) in enumerate(repeated_breaks):
if i > 0:
if op is repeated_breaks[i - 1][0] and edge == repeated_breaks[i - 1][1]:
continue
breaks.append(repeated_breaks[i])
ranges = []
# now turn those breakpoints into a list of ranges
for (i, (op, edge)) in enumerate(breaks):
if i == 0:
# get a right-boundary for the first range
if op in (lt, le):
ranges.append(Range(None, None, op, edge))
elif op is ge:
ranges.append(Range(None, None, lt, edge))
elif op is gt:
ranges.append(Range(None, None, le, edge))
elif op is eq:
# edge case
ranges.append(Range(None, None, lt, edge))
ranges.append(Range(ge, edge, le, edge))
elif i > 0:
# get a left-boundary based on the previous right-boundary
if ranges[-1].rightop is lt:
ranges.append(Range(ge, ranges[-1].rightedge))
else:
ranges.append(Range(gt, ranges[-1].rightedge))
# get a right-boundary for the current range
if op in (lt, le):
ranges[-1].rightop, ranges[-1].rightedge = op, edge
elif op in (eq, ge):
ranges[-1].rightop, ranges[-1].rightedge = lt, edge
elif op is gt:
ranges[-1].rightop, ranges[-1].rightedge = le, edge
# equals is a special case, sometimes
# an extra element may need to sneak in.
if op is eq:
if ranges[-1].leftedge == edge:
# the previous range also covered just this one slice.
ranges.pop()
# equals is expressed as greater-than-equals and less-than-equals.
ranges.append(Range(ge, edge, le, edge))
if i == len(breaks) - 1:
# get a left-boundary for the final range
if op in (lt, ge):
ranges.append(Range(ge, edge))
else:
ranges.append(Range(gt, edge))
ranges = [range for range in ranges if range.isOpen()]
# print breaks
# print ranges
if ranges:
return ranges
else:
# if all else fails, return a Range that covers everything
return [Range()]
def test_combinations(tests, filter=None):
""" Given a list of simple =/!= tests, return a list of possible combinations.
The filter argument is used to call test_combinations() recursively;
this cuts down on the potential tests^2 number of combinations by
identifying closed filters early and culling them from consideration.
"""
# is the first one simple? it should be
if len(tests) >= 1:
assert tests[0].isSimple(), 'All tests must be simple, i.e. = or !='
# does it share a property with the next one? it should
if len(tests) >= 2:
assert tests[0].property == tests[1].property, 'All tests must share the same property'
# -------- remaining tests will be checked in subsequent calls --------
# bail early
if len(tests) == 0:
return []
# base case where no filter has been passed
if filter is None:
filter = Filter()
# knock one off the front
first_test, remaining_tests = tests[0], tests[1:]
# one filter with the front test on it
this_filter = filter.clone()
this_filter.tests.append(first_test)
# another filter with the inverse of the front test on it
that_filter = filter.clone()
that_filter.tests.append(first_test.inverse())
# return value
test_sets = []
for new_filter in (this_filter, that_filter):
if new_filter.isOpen():
if len(remaining_tests) > 0:
# keep diving deeper
test_sets += test_combinations(remaining_tests, new_filter)
else:
# only append once the list has been exhausted
new_set = []
for test in new_filter.minusExtras().tests:
if test not in new_set:
new_set.append(test)
test_sets.append(new_set)
return test_sets
def xindexes(slots):
""" Generate list of possible indexes into a list of slots.
Best way to think of this is as a number where each digit might have a different radix.
E.g.: (10, 10, 10) would return 10 x 10 x 10 = 1000 responses from (0, 0, 0) to (9, 9, 9),
(2, 2, 2, 2) would return 2 x 2 x 2 x 2 = 16 responses from (0, 0, 0, 0) to (1, 1, 1, 1).
"""
# the first response...
slot = [0] * len(slots)
for i in range(reduce(operator.mul, slots)):
yield slot
carry = 1
# iterate from the least to the most significant digit
for j in range(len(slots), 0, -1):
k = j - 1
slot[k] += carry
if slot[k] >= slots[k]:
carry = 1 + slot[k] - slots[k]
slot[k] = 0
else:
carry = 0
def selectors_tests(selectors, property=None):
""" Given a list of selectors, return a list of unique tests.
Optionally limit to those with a given property.
"""
tests = {}
for selector in selectors:
for test in selector.allTests():
if property is None or test.property == property:
tests[unicode(test)] = test
return tests.values()
def tests_filter_combinations(tests):
""" Return a complete list of filter combinations for given list of tests
"""
if len(tests) == 0:
return [Filter()]
# unique properties
properties = sorted(list(set([test.property for test in tests])))
property_tests = {}
# divide up the tests by their first argument, e.g. "landuse" vs. "tourism",
# into lists of all possible legal combinations of those tests.
for property in properties:
# limit tests to those with the current property
current_tests = [test for test in tests if test.property == property]
has_ranged_tests = True in [test.isRanged() for test in current_tests]
has_nonnumeric_tests = False in [test.isNumeric() for test in current_tests]
if has_ranged_tests and has_nonnumeric_tests:
raise Exception('Mixed ranged/non-numeric tests in %s' % str(current_tests))
elif has_ranged_tests:
property_tests[property] = [range.toFilter(property).tests for range in test_ranges(current_tests)]
else:
property_tests[property] = test_combinations(current_tests)
# get a list of the number of combinations for each group of tests from above.
property_counts = [len(property_tests[property]) for property in properties]
filters = []
# now iterate over each combination - for large numbers of tests, this can get big really, really fast
for property_indexes in xindexes(property_counts):
# list of lists of tests
testslist = [property_tests[properties[i]][j] for (i, j) in enumerate(property_indexes)]
# corresponding filter
filter = Filter(*reduce(operator.add, testslist))
filters.append(filter)
if len(filters):
return sorted(filters)
# if no filters have been defined, return a blank one that matches anything
return [Filter()]
def is_merc_projection(srs):
""" Return true if the map projection matches that used by VEarth, Google, OSM, etc.
Is currently necessary for zoom-level shorthand for scale-denominator.
"""
if srs.lower() == '+init=epsg:900913':
return True
# observed
srs = dict([p.split('=') for p in srs.split() if '=' in p])
# expected
# note, common optional modifiers like +no_defs, +over, and +wkt
# are not pairs and should not prevent matching
gym = '+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null'
gym = dict([p.split('=') for p in gym.split() if '=' in p])
for p in gym:
if srs.get(p, None) != gym.get(p, None):
return False
return True
def extract_declarations(map_el, dirs, scale=1, user_styles=[]):
""" Given a Map element and directories object, remove and return a complete
list of style declarations from any Stylesheet elements found within.
"""
styles = []
#
# First, look at all the stylesheets defined in the map itself.
#
for stylesheet in map_el.findall('Stylesheet'):
map_el.remove(stylesheet)
content, mss_href = fetch_embedded_or_remote_src(stylesheet, dirs)
if content:
styles.append((content, mss_href))
#
# Second, look through the user-supplied styles for override rules.
#
for stylesheet in user_styles:
mss_href = urljoin(dirs.source.rstrip('/')+'/', stylesheet)
content = urllib.urlopen(mss_href).read().decode(DEFAULT_ENCODING)
styles.append((content, mss_href))
declarations = []
for (content, mss_href) in styles:
is_merc = is_merc_projection(map_el.get('srs',''))
for declaration in stylesheet_declarations(content, is_merc, scale):
#
# Change the value of each URI relative to the location
# of the containing stylesheet. We generally just have
# the one instance of "dirs" around for a full parse cycle,
# so it's necessary to perform this normalization here
# instead of later, while mss_href is still available.
#
uri_value = declaration.value.value
if uri_value.__class__ is uri:
uri_value.address = urljoin(mss_href, uri_value.address)
declarations.append(declaration)
return declarations
def fetch_embedded_or_remote_src(elem, dirs):
"""
"""
if 'src' in elem.attrib:
scheme, host, remote_path, p, q, f = urlparse(dirs.source)
src_href = urljoin(dirs.source.rstrip('/')+'/', elem.attrib['src'])
return urllib.urlopen(src_href).read().decode(DEFAULT_ENCODING), src_href
elif elem.text:
return elem.text, dirs.source.rstrip('/')+'/'
return None, None
def expand_source_declarations(map_el, dirs, local_conf):
""" This provides mechanism for externalizing and sharing data sources. The datasource configs are
python files, and layers reference sections within that config:
<DataSourcesConfig src="datasources.cfg" />
<Layer class="road major" source_name="planet_osm_major_roads" />
<Layer class="road minor" source_name="planet_osm_minor_roads" />
See example_dscfg.mml and example.cfg at the root of the cascadenik directory for an example.
"""
ds = sources.DataSources(dirs.source, local_conf)
# build up the configuration
for spec in map_el.findall('DataSourcesConfig'):
map_el.remove(spec)
src_text, local_base = fetch_embedded_or_remote_src(spec, dirs)
if not src_text:
continue
ds.add_config(src_text, local_base)
# now transform the xml
# add in base datasources
for base_name in ds.templates:
b = Element("Datasource", name=base_name)
for pname, pvalue in ds.sources[base_name]['parameters'].items():
p = Element("Parameter", name=pname)
p.text = str(pvalue)
b.append(p)
map_el.insert(0, b)
# expand layer data sources
for layer in map_el.findall('Layer'):
if 'source_name' not in layer.attrib:
continue
if layer.attrib['source_name'] not in ds.sources:
raise Exception("Datasource '%s' referenced, but not defined in layer:\n%s" % (layer.attrib['source_name'], ElementTree.tostring(layer)))
# create the nested datasource object
b = Element("Datasource")
dsrc = ds.sources[layer.attrib['source_name']]
if 'template' in dsrc:
b.attrib['base'] = dsrc['template']
# set the SRS if present
if 'layer_srs' in dsrc:
layer.attrib['srs'] = dsrc['layer_srs']
for pname, pvalue in dsrc['parameters'].items():
p = Element("Parameter", name=pname)
p.text = pvalue
b.append(p)
layer.append(b)
del layer.attrib['source_name']
def test2str(test):
""" Return a mapnik-happy Filter expression atom for a single test
"""
if type(test.value) in (int, float):
value = str(test.value)
elif type(test.value) in (str, unicode):
value = "'%s'" % test.value
else:
raise Exception("test2str doesn't know what to do with a %s" % type(test.value))
if test.op == '!=':
return "not [%s] = %s" % (test.property, value)
elif test.op in ('<', '<=', '=', '>=', '>'):
return "[%s] %s %s" % (test.property, test.op, value)
else:
raise Exception('"%s" is not a valid filter operation' % test.op)
def make_rule(filter, *symbolizers):
""" Given a Filter and some symbolizers, return a Rule prepopulated
with applicable min/max scale denominator and filter.
"""
scale_tests = [test for test in filter.tests if test.isMapScaled()]
other_tests = [test for test in filter.tests if not test.isMapScaled()]
# these will be replaced with values as necessary
minscale, maxscale, filter = None, None, None
for scale_test in scale_tests:
if scale_test.op in ('>', '>='):
if scale_test.op == '>=':
value = scale_test.value
elif scale_test.op == '>':
value = scale_test.value + 1
minscale = output.MinScaleDenominator(value)
if scale_test.op in ('<', '<='):
if scale_test.op == '<=':
value = scale_test.value
elif scale_test.op == '<':
value = scale_test.value - 1
maxscale = output.MaxScaleDenominator(value)
filter_text = ' and '.join(test2str(test) for test in other_tests)
if filter_text:
filter = output.Filter(filter_text)
rule = output.Rule(minscale, maxscale, filter, [s for s in symbolizers if s])
return rule
def is_applicable_selector(selector, filter):
""" Given a Selector and Filter, return True if the Selector is
compatible with the given Filter, and False if they contradict.
"""
for test in selector.allTests():
if not test.isCompatible(filter.tests):
return False
return True
def get_map_attributes(declarations):
"""
"""
property_map = {'map-bgcolor': 'background'}
return dict([(property_map[dec.property.name], dec.value.value)
for dec in declarations
if dec.property.name in property_map])
def filtered_property_declarations(declarations, property_names):
"""
"""
property_names += ['display']
# just the ones we care about here
declarations = [dec for dec in declarations if dec.property.name in property_names]
selectors = [dec.selector for dec in declarations]
# a place to put rules
rules = []
for filter in tests_filter_combinations(selectors_tests(selectors)):
rule = {}
# collect all the applicable declarations into a list of parameters and values
for dec in declarations:
if is_applicable_selector(dec.selector, filter):
rule[dec.property.name] = dec.value
# Presence of display: none means don't add this rule at all.
if (dec.property.name, dec.value.value) == ('display', 'none'):
rule = {}
break
# Presence of display here probably just means display: map,
# which is boring and can be discarded.
if rule and 'display' in rule:
del rule['display']
# If the rule is empty by this point, skip it.
if not rule:
continue
rules.append((filter, rule))
return rules
def get_polygon_rules(declarations):
""" Given a Map element, a Layer element, and a list of declarations,
create a new Style element with a PolygonSymbolizer, add it to Map
and refer to it in Layer.
"""
property_map = {'polygon-fill': 'fill', 'polygon-opacity': 'fill-opacity',
'polygon-gamma': 'gamma',
'polygon-meta-output': 'meta-output', 'polygon-meta-writer': 'meta-writer'}
property_names = property_map.keys()
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
color = values.has_key('polygon-fill') and values['polygon-fill'].value
opacity = values.has_key('polygon-opacity') and values['polygon-opacity'].value or None
gamma = values.has_key('polygon-gamma') and values['polygon-gamma'].value or None
symbolizer = color and output.PolygonSymbolizer(color, opacity, gamma)
if symbolizer:
rules.append(make_rule(filter, symbolizer))
return rules
def get_raster_rules(declarations):
""" Given a Map element, a Layer element, and a list of declarations,
create a new Style element with a RasterSymbolizer, add it to Map
and refer to it in Layer.
The RasterSymbolizer will always created, even if there are
no applicable declarations.
"""
property_map = {'raster-opacity': 'opacity',
'raster-mode': 'mode',
'raster-scaling': 'scaling'
}
property_names = property_map.keys()
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
sym_params = {}
for prop,attr in property_map.items():
sym_params[attr] = values.has_key(prop) and values[prop].value or None
symbolizer = output.RasterSymbolizer(**sym_params)
rules.append(make_rule(filter, symbolizer))
if not rules:
# No raster-* rules were created, but we're here so we must need a symbolizer.
rules.append(make_rule(Filter(), output.RasterSymbolizer()))
return rules
def get_line_rules(declarations):
""" Given a list of declarations, return a list of output.Rule objects.
This function is wise to line-<foo>, inline-<foo>, and outline-<foo> properties,
and will generate multiple LineSymbolizers if necessary.
"""
property_map = {'line-color': 'stroke', 'line-width': 'stroke-width', 'line-offset': 'offset',
'line-opacity': 'stroke-opacity', 'line-join': 'stroke-linejoin',
'line-cap': 'stroke-linecap', 'line-dasharray': 'stroke-dasharray',
'line-meta-output': 'meta-output', 'line-meta-writer': 'meta-writer'}
property_names = property_map.keys()
# prepend parameter names with 'in' and 'out'
for i in range(len(property_names)):
property_names.append('in' + property_names[i])
property_names.append('out' + property_names[i])
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
width = values.has_key('line-width') and values['line-width'].value
color = values.has_key('line-color') and values['line-color'].value
offset = values.has_key('line-offset') and values['line-offset'].value or None
opacity = values.has_key('line-opacity') and values['line-opacity'].value or None
join = values.has_key('line-join') and values['line-join'].value or None
cap = values.has_key('line-cap') and values['line-cap'].value or None
dashes = values.has_key('line-dasharray') and values['line-dasharray'].value or None
line_symbolizer = color and width and output.LineSymbolizer(color, width, opacity, join, cap, dashes, offset) or False
width = values.has_key('inline-width') and values['inline-width'].value
color = values.has_key('inline-color') and values['inline-color'].value
offset = values.has_key('line-offset') and values['line-offset'].value or None
opacity = values.has_key('inline-opacity') and values['inline-opacity'].value or None
join = values.has_key('inline-join') and values['inline-join'].value or None
cap = values.has_key('inline-cap') and values['inline-cap'].value or None
dashes = values.has_key('inline-dasharray') and values['inline-dasharray'].value or None
inline_symbolizer = color and width and output.LineSymbolizer(color, width, opacity, join, cap, dashes, offset) or False
# outline requires regular line to have a meaningful width
width = values.has_key('outline-width') and values.has_key('line-width') \
and values['line-width'].value + values['outline-width'].value * 2
color = values.has_key('outline-color') and values['outline-color'].value
offset = values.has_key('line-offset') and values['line-offset'].value or None
opacity = values.has_key('outline-opacity') and values['outline-opacity'].value or None
join = values.has_key('outline-join') and values['outline-join'].value or None
cap = values.has_key('outline-cap') and values['outline-cap'].value or None
dashes = values.has_key('outline-dasharray') and values['outline-dasharray'].value or None
outline_symbolizer = color and width and output.LineSymbolizer(color, width, opacity, join, cap, dashes, offset) or False
if outline_symbolizer or line_symbolizer or inline_symbolizer:
rules.append(make_rule(filter, outline_symbolizer, line_symbolizer, inline_symbolizer))
return rules
def get_text_rule_groups(declarations):
""" Given a list of declarations, return a list of output.Rule objects.
"""
property_map = {'text-anchor-dx': 'anchor_dx', # does nothing
'text-anchor-dy': 'anchor_dy', # does nothing
'text-align': 'horizontal_alignment',
'text-allow-overlap': 'allow_overlap',
'text-avoid-edges': 'avoid_edges',
'text-character-spacing': 'character_spacing',
'text-dx': 'dx',
'text-dy': 'dy',
'text-face-name': 'face_name',
'text-fill': 'fill',
'text-fontset': 'fontset',
'text-force-odd-labels': 'force_odd_labels',
'text-halo-fill': 'halo_fill',
'text-halo-radius': 'halo_radius',
'text-justify-align': 'justify_alignment',
'text-label-position-tolerance': 'label_position_tolerance',
'text-line-spacing': 'line_spacing',
'text-max-char-angle-delta': 'max_char_angle_delta',
'text-min-distance': 'minimum_distance',
'text-placement': 'label_placement',
'text-ratio': 'text_ratio',
'text-size': 'size',
'text-spacing': 'spacing',
'text-transform': 'text_convert',
'text-vertical-align': 'vertical_alignment',
'text-wrap-width': 'wrap_width',
'text-meta-output': 'meta-output',
'text-meta-writer': 'meta-writer'
}
property_names = property_map.keys()
# pull out all the names
text_names = [dec.selector.elements[1].names[0]
for dec in declarations
if len(dec.selector.elements) is 2 and len(dec.selector.elements[1].names) is 1]
# a place to put groups
groups = []
# a separate style element for each text name
for text_name in set(text_names):
# just the ones we care about here.
# the complicated conditional means: get all declarations that
# apply to this text_name specifically, or text in general.
name_declarations = [dec for dec in declarations
if dec.property.name in property_map
and (len(dec.selector.elements) == 1
or (len(dec.selector.elements) == 2
and dec.selector.elements[1].names[0] in (text_name, '*')))]
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(name_declarations, property_names):
face_name = values.has_key('text-face-name') and values['text-face-name'].value or None
fontset = values.has_key('text-fontset') and values['text-fontset'].value or None
size = values.has_key('text-size') and values['text-size'].value
color = values.has_key('text-fill') and values['text-fill'].value
ratio = values.has_key('text-ratio') and values['text-ratio'].value or None
wrap_width = values.has_key('text-wrap-width') and values['text-wrap-width'].value or None
label_spacing = values.has_key('text-spacing') and values['text-spacing'].value or None
label_position_tolerance = values.has_key('text-label-position-tolerance') and values['text-label-position-tolerance'].value or None
max_char_angle_delta = values.has_key('text-max-char-angle-delta') and values['text-max-char-angle-delta'].value or None
halo_color = values.has_key('text-halo-fill') and values['text-halo-fill'].value or None
halo_radius = values.has_key('text-halo-radius') and values['text-halo-radius'].value or None
dx = values.has_key('text-dx') and values['text-dx'].value or None
dy = values.has_key('text-dy') and values['text-dy'].value or None
avoid_edges = values.has_key('text-avoid-edges') and values['text-avoid-edges'].value or None
minimum_distance = values.has_key('text-min-distance') and values['text-min-distance'].value or None
allow_overlap = values.has_key('text-allow-overlap') and values['text-allow-overlap'].value or None
label_placement = values.has_key('text-placement') and values['text-placement'].value or None
text_transform = values.has_key('text-transform') and values['text-transform'].value or None
anchor_dx = values.has_key('text-anchor-dx') and values['text-anchor-dx'].value or None
anchor_dy = values.has_key('text-anchor-dy') and values['text-anchor-dy'].value or None
horizontal_alignment = values.has_key('text-horizontal-align') and values['text-horizontal-align'].value or None
vertical_alignment = values.has_key('text-vertical-align') and values['text-vertical-align'].value or None
justify_alignment = values.has_key('text-justify-align') and values['text-justify-align'].value or None
force_odd_labels = values.has_key('text-force-odd-labels') and values['text-force-odd-labels'].value or None
line_spacing = values.has_key('text-line-spacing') and values['text-line-spacing'].value or None
character_spacing = values.has_key('text-character-spacing') and values['text-character-spacing'].value or None
if (face_name or fontset) and size and color:
symbolizer = output.TextSymbolizer(text_name, face_name, size, color, \
wrap_width, label_spacing, label_position_tolerance, \
max_char_angle_delta, halo_color, halo_radius, dx, dy, \
avoid_edges, minimum_distance, allow_overlap, label_placement, \
line_spacing, character_spacing, text_transform, fontset,
anchor_dx, anchor_dy,horizontal_alignment, \
vertical_alignment, justify_alignment, force_odd_labels)
rules.append(make_rule(filter, symbolizer))
groups.append((text_name, rules))
return dict(groups)
def locally_cache_remote_file(href, dir):
""" Locally cache a remote resource using a predictable file name
and awareness of modification date. Assume that files are "normal"
which is to say they have filenames with extensions.
"""
scheme, host, remote_path, params, query, fragment = urlparse(href)
assert scheme in ('http','https'), 'Scheme must be either http or https, not "%s" (for %s)' % (scheme,href)
head, ext = posixpath.splitext(posixpath.basename(remote_path))
head = sub(r'[^\w\-_]', '', head)
hash = md5(href).hexdigest()[:8]
local_path = '%(dir)s/%(host)s-%(hash)s-%(head)s%(ext)s' % locals()
headers = {}
if posixpath.exists(local_path):
msg('Found local file: %s' % local_path )
t = localtime(os.stat(local_path).st_mtime)
headers['If-Modified-Since'] = strftime('%a, %d %b %Y %H:%M:%S %Z', t)
if scheme == 'https':
conn = HTTPSConnection(host, timeout=5)
else:
conn = HTTPConnection(host, timeout=5)
if query:
remote_path += '?%s' % query
conn.request('GET', remote_path, headers=headers)
resp = conn.getresponse()
if resp.status in range(200, 210):
# hurrah, it worked
f = open(un_posix(local_path), 'wb')
msg('Reading from remote: %s' % remote_path)
f.write(resp.read())
f.close()
elif resp.status in (301, 302, 303) and resp.getheader('location', False):
# follow a redirect, totally untested.
redirected_href = urljoin(href, resp.getheader('location'))
redirected_path = locally_cache_remote_file(redirected_href, dir)
os.rename(redirected_path, local_path)
elif resp.status == 304:
# hurrah, it's cached
msg('Reading directly from local cache')
pass
else:
raise Exception("Failed to get remote resource %s: %s" % (href, resp.status))
return local_path
def post_process_symbolizer_image_file(file_href, dirs):
""" Given an image file href and a set of directories, modify the image file
name so it's correct with respect to the output and cache directories.
"""
# support latest mapnik features of auto-detection
# of image sizes and jpeg reading support...
# http://trac.mapnik.org/ticket/508
mapnik_auto_image_support = (MAPNIK_VERSION >= 701)
mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601)
file_href = urljoin(dirs.source.rstrip('/')+'/', file_href)
scheme, n, path, p, q, f = urlparse(file_href)
if scheme in ('http','https'):
scheme, path = '', locally_cache_remote_file(file_href, dirs.cache)
if scheme not in ('file', '') or not systempath.exists(un_posix(path)):
raise Exception("Image file needs to be a working, fetchable resource, not %s" % file_href)
if not mapnik_auto_image_support and not Image:
raise SystemExit('PIL (Python Imaging Library) is required for handling image data unless you are using PNG inputs and running Mapnik >=0.7.0')
img = Image.open(un_posix(path))
if mapnik_requires_absolute_paths:
path = posixpath.realpath(path)
else:
path = dirs.output_path(path)
msg('reading symbol: %s' % path)
image_name, ext = posixpath.splitext(path)
if ext in ('.png', '.tif', '.tiff'):
output_ext = ext
else:
output_ext = '.png'
# new local file name
dest_file = un_posix('%s%s' % (image_name, output_ext))
if not posixpath.exists(dest_file):
img.save(dest_file,'PNG')
msg('Destination file: %s' % dest_file)
return dest_file, output_ext[1:], img.size[0], img.size[1]
def get_shield_rule_groups(declarations, dirs):
""" Given a list of declarations, return a list of output.Rule objects.
Optionally provide an output directory for local copies of image files.
"""
property_map = {'shield-face-name': 'face_name',
'shield-fontset': 'fontset',
'shield-size': 'size',
'shield-fill': 'fill', 'shield-character-spacing': 'character_spacing',
'shield-line-spacing': 'line_spacing',
'shield-spacing': 'spacing', 'shield-min-distance': 'minimum_distance',
'shield-file': 'file', 'shield-width': 'width', 'shield-height': 'height',
'shield-meta-output': 'meta-output', 'shield-meta-writer': 'meta-writer',
'shield-text-dx': 'dx', 'shield-text-dy': 'dy'}
property_names = property_map.keys()
# pull out all the names
text_names = [dec.selector.elements[1].names[0]
for dec in declarations
if len(dec.selector.elements) is 2 and len(dec.selector.elements[1].names) is 1]
# a place to put groups
groups = []
# a separate style element for each text name
for text_name in set(text_names):
# just the ones we care about here.
# the complicated conditional means: get all declarations that
# apply to this text_name specifically, or text in general.
name_declarations = [dec for dec in declarations
if dec.property.name in property_map
and (len(dec.selector.elements) == 1
or (len(dec.selector.elements) == 2
and dec.selector.elements[1].names[0] in (text_name, '*')))]
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(name_declarations, property_names):
face_name = values.has_key('shield-face-name') and values['shield-face-name'].value or None
fontset = values.has_key('shield-fontset') and values['shield-fontset'].value or None
size = values.has_key('shield-size') and values['shield-size'].value or None
file, filetype, width, height \
= values.has_key('shield-file') \
and post_process_symbolizer_image_file(str(values['shield-file'].value), dirs) \
or (None, None, None, None)
width = values.has_key('shield-width') and values['shield-width'].value or width
height = values.has_key('shield-height') and values['shield-height'].value or height
color = values.has_key('shield-fill') and values['shield-fill'].value or None
minimum_distance = values.has_key('shield-min-distance') and values['shield-min-distance'].value or None
character_spacing = values.has_key('shield-character-spacing') and values['shield-character-spacing'].value or None
line_spacing = values.has_key('shield-line-spacing') and values['shield-line-spacing'].value or None
label_spacing = values.has_key('shield-spacing') and values['shield-spacing'].value or None
text_dx = values.has_key('shield-text-dx') and values['shield-text-dx'].value or 0
text_dy = values.has_key('shield-text-dy') and values['shield-text-dy'].value or 0
if file and (face_name or fontset):
symbolizer = output.ShieldSymbolizer(text_name, face_name, size, file, filetype,
width, height, color, minimum_distance, character_spacing,
line_spacing, label_spacing, text_dx=text_dx, text_dy=text_dy,
fontset=fontset)
rules.append(make_rule(filter, symbolizer))
groups.append((text_name, rules))
return dict(groups)
def get_point_rules(declarations, dirs):
""" Given a list of declarations, return a list of output.Rule objects.
Optionally provide an output directory for local copies of image files.
"""
property_map = {'point-file': 'file', 'point-width': 'width',
'point-height': 'height', 'point-type': 'type',
'point-allow-overlap': 'allow_overlap',
'point-meta-output': 'meta-output', 'point-meta-writer': 'meta-writer'}
property_names = property_map.keys()
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
point_file, point_type, point_width, point_height \
= values.has_key('point-file') \
and post_process_symbolizer_image_file(str(values['point-file'].value), dirs) \
or (None, None, None, None)
point_width = values.has_key('point-width') and values['point-width'].value or point_width
point_height = values.has_key('point-height') and values['point-height'].value or point_height
point_allow_overlap = values.has_key('point-allow-overlap') and values['point-allow-overlap'].value or None
symbolizer = point_file and output.PointSymbolizer(point_file, point_type, point_width, point_height, point_allow_overlap)
if symbolizer:
rules.append(make_rule(filter, symbolizer))
return rules
def get_polygon_pattern_rules(declarations, dirs):
""" Given a list of declarations, return a list of output.Rule objects.
Optionally provide an output directory for local copies of image files.
"""
property_map = {'polygon-pattern-file': 'file', 'polygon-pattern-width': 'width',
'polygon-pattern-height': 'height', 'polygon-pattern-type': 'type',
'polygon-meta-output': 'meta-output', 'polygon-meta-writer': 'meta-writer'}
property_names = property_map.keys()
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height \
= values.has_key('polygon-pattern-file') \
and post_process_symbolizer_image_file(str(values['polygon-pattern-file'].value), dirs) \
or (None, None, None, None)
poly_pattern_width = values.has_key('polygon-pattern-width') and values['polygon-pattern-width'].value or poly_pattern_width
poly_pattern_height = values.has_key('polygon-pattern-height') and values['polygon-pattern-height'].value or poly_pattern_height
symbolizer = poly_pattern_file and output.PolygonPatternSymbolizer(poly_pattern_file, poly_pattern_type, poly_pattern_width, poly_pattern_height)
if symbolizer:
rules.append(make_rule(filter, symbolizer))
return rules
def get_line_pattern_rules(declarations, dirs):
""" Given a list of declarations, return a list of output.Rule objects.
Optionally provide an output directory for local copies of image files.
"""
property_map = {'line-pattern-file': 'file', 'line-pattern-width': 'width',
'line-pattern-height': 'height', 'line-pattern-type': 'type',
'line-pattern-meta-output': 'meta-output', 'line-pattern-meta-writer': 'meta-writer'}
property_names = property_map.keys()
# a place to put rules
rules = []
for (filter, values) in filtered_property_declarations(declarations, property_names):
line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height \
= values.has_key('line-pattern-file') \
and post_process_symbolizer_image_file(str(values['line-pattern-file'].value), dirs) \
or (None, None, None, None)
line_pattern_width = values.has_key('line-pattern-width') and values['line-pattern-width'].value or line_pattern_width
line_pattern_height = values.has_key('line-pattern-height') and values['line-pattern-height'].value or line_pattern_height
symbolizer = line_pattern_file and output.LinePatternSymbolizer(line_pattern_file, line_pattern_type, line_pattern_width, line_pattern_height)
if symbolizer:
rules.append(make_rule(filter, symbolizer))
return rules
def get_applicable_declarations(element, declarations):
""" Given an XML element and a list of declarations, return the ones
that match as a list of (property, value, selector) tuples.
"""
element_tag = element.tag
element_id = element.get('id', None)
element_classes = element.get('class', '').split()
return [dec for dec in declarations
if dec.selector.matches(element_tag, element_id, element_classes)]
def unzip_shapefile_into(zip_path, dir, host=None):
"""
"""
hash = md5(zip_path).hexdigest()[:8]
zip_file = zipfile.ZipFile(un_posix(zip_path))
zip_ctime = os.stat(un_posix(zip_path)).st_ctime
infos = zip_file.infolist()
extensions = [posixpath.splitext(info.filename)[1] for info in infos]
host_prefix = host and ('%(host)s-' % locals()) or ''
shape_parts = ('.shp', True), ('.shx', True), ('.dbf', True), ('.prj', False), ('.index', False)
for (expected, required) in shape_parts:
if required and expected not in extensions:
raise Exception('Zip file %(zip_path)s missing extension "%(expected)s"' % locals())
for info in infos:
head, ext = posixpath.splitext(posixpath.basename(info.filename))
head = sub(r'[^\w\-_]', '', head)
if ext == expected:
file_data = zip_file.read(info.filename)
file_name = '%(dir)s/%(host_prefix)s%(hash)s-%(head)s%(ext)s' % locals()
if not systempath.exists(un_posix(file_name)) or os.stat(un_posix(file_name)).st_ctime < zip_ctime:
file_ = open(un_posix(file_name), 'wb')
file_.write(file_data)
file_.close()
if ext == '.shp':
local = file_name[:-4]
break
return local
def localize_shapefile(shp_href, dirs):
""" Given a shapefile href and a set of directories, modify the shapefile
name so it's correct with respect to the output and cache directories.
"""
# support latest mapnik features of auto-detection
# of image sizes and jpeg reading support...
# http://trac.mapnik.org/ticket/508
mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601)
shp_href = urljoin(dirs.source.rstrip('/')+'/', shp_href)
scheme, host, path, p, q, f = urlparse(shp_href)
if scheme in ('http','https'):
msg('%s | %s' % (shp_href, dirs.cache))
scheme, path = '', locally_cache_remote_file(shp_href, dirs.cache)
else:
host = None
# collect drive for windows
to_posix(systempath.realpath(path))
if scheme not in ('file', ''):
raise Exception("Shapefile needs to be local, not %s" % shp_href)
if mapnik_requires_absolute_paths:
path = posixpath.realpath(path)
original = path
path = dirs.output_path(path)
if path.endswith('.zip'):
# unzip_shapefile_into needs a path it can find
path = posixpath.join(dirs.output, path)
path = unzip_shapefile_into(path, dirs.cache, host)
return dirs.output_path(path)
def localize_file_datasource(file_href, dirs):
""" Handle localizing file-based datasources other than shapefiles.
This will only work for single-file based types.
"""
# support latest mapnik features of auto-detection
# of image sizes and jpeg reading support...
# http://trac.mapnik.org/ticket/508
mapnik_requires_absolute_paths = (MAPNIK_VERSION < 601)
file_href = urljoin(dirs.source.rstrip('/')+'/', file_href)
scheme, n, path, p, q, f = urlparse(file_href)
if scheme in ('http','https'):
scheme, path = '', locally_cache_remote_file(file_href, dirs.cache)
if scheme not in ('file', ''):
raise Exception("Datasource file needs to be a working, fetchable resource, not %s" % file_href)
if mapnik_requires_absolute_paths:
return posixpath.realpath(path)
else:
return dirs.output_path(path)
def compile(src, dirs, verbose=False, srs=None, datasources_cfg=None, user_styles=[], scale=1):
""" Compile a Cascadenik MML file, returning a cascadenik.output.Map object.
Parameters:
src:
Path to .mml file, or raw .mml file content.
dirs:
Object with directory names in 'cache', 'output', and 'source' attributes.
dirs.source is expected to be fully-qualified, e.g. "http://example.com"
or "file:///home/example".
Keyword Parameters:
verbose:
If True, debugging information will be printed to stderr.
srs:
Target spatial reference system for the compiled stylesheet.
If provided, overrides default map srs in the .mml file.
datasources_cfg:
If a file or URL, uses the config to override datasources or parameters
(i.e. postgis_dbname) defined in the map's canonical <DataSourcesConfig>
entities. This is most useful in development, whereby one redefines
individual datasources, connection parameters, and/or local paths.
user_styles:
A optional list of files or URLs, that override styles defined in
the map source. These are evaluated in order, with declarations from
later styles overriding those from earlier styles.
scale:
Scale value for output map, 2 doubles the size for high-res displays.
"""
global VERBOSE
if verbose:
VERBOSE = True
sys.stderr.write('\n')
msg('Targeting mapnik version: %s | %s' % (MAPNIK_VERSION, MAPNIK_VERSION_STR))
if posixpath.exists(src):
doc = ElementTree.parse(src)
map_el = doc.getroot()
else:
try:
# guessing src is a literal XML string?
map_el = ElementTree.fromstring(src)
except:
if not (src[:7] in ('http://', 'https:/', 'file://')):
src = "file://" + src
try:
doc = ElementTree.parse(urllib.urlopen(src))
except IOError, e:
raise IOError('%s: %s' % (e,src))
map_el = doc.getroot()
expand_source_declarations(map_el, dirs, datasources_cfg)
declarations = extract_declarations(map_el, dirs, scale, user_styles)
# a list of layers and a sequential ID generator
layers, ids = [], (i for i in xrange(1, 999999))
# Handle base datasources
# http://trac.mapnik.org/changeset/574
datasource_templates = {}
for base_el in map_el:
if base_el.tag != 'Datasource':
continue
datasource_templates[base_el.get('name')] = dict(((p.get('name'),p.text) for p in base_el.findall('Parameter')))
for layer_el in map_el.findall('Layer'):
# nevermind with this one
if layer_el.get('status', None) in ('off', '0', 0):
continue
# build up a map of Parameters for this Layer
datasource_params = dict((p.get('name'),p.text) for p in layer_el.find('Datasource').findall('Parameter'))
base = layer_el.find('Datasource').get('base')
if base:
datasource_params.update(datasource_templates[base])
if datasource_params.get('table'):
# remove line breaks from possible SQL, using a possibly-unsafe regexp
# that simply blows away anything that looks like it might be a SQL comment.
# http://trac.mapnik.org/ticket/173
if not MAPNIK_VERSION >= 601:
sql = datasource_params.get('table')
sql = compile(r'--.*$', MULTILINE).sub('', sql)
sql = sql.replace('\r', ' ').replace('\n', ' ')
datasource_params['table'] = sql
elif datasource_params.get('file') is not None:
# make sure we localize any remote files
file_param = datasource_params.get('file')
if datasource_params.get('type') == 'shape':
# handle a local shapefile or fetch a remote, zipped shapefile
msg('Handling shapefile datasource...')
file_param = localize_shapefile(file_param, dirs)
# TODO - support datasource reprojection to make map srs
# TODO - support automatically indexing shapefiles
else: # ogr,raster, gdal, sqlite
# attempt to generically handle other file based datasources
msg('Handling generic datasource...')
file_param = localize_file_datasource(file_param, dirs)
msg("Localized path = %s" % un_posix(file_param))
datasource_params['file'] = un_posix(file_param)
# TODO - consider custom support for other mapnik datasources:
# sqlite, oracle, osm, kismet, gdal, raster, rasterlite
layer_declarations = get_applicable_declarations(layer_el, declarations)
# a list of styles
styles = []
if datasource_params.get('type', None) == 'gdal':
styles.append(output.Style('raster style %d' % ids.next(),
get_raster_rules(layer_declarations)))
else:
styles.append(output.Style('polygon style %d' % ids.next(),
get_polygon_rules(layer_declarations)))
styles.append(output.Style('polygon pattern style %d' % ids.next(),
get_polygon_pattern_rules(layer_declarations, dirs)))
styles.append(output.Style('line style %d' % ids.next(),
get_line_rules(layer_declarations)))
styles.append(output.Style('line pattern style %d' % ids.next(),
get_line_pattern_rules(layer_declarations, dirs)))
for (shield_name, shield_rules) in get_shield_rule_groups(layer_declarations, dirs).items():
styles.append(output.Style('shield style %d (%s)' % (ids.next(), shield_name), shield_rules))
for (text_name, text_rules) in get_text_rule_groups(layer_declarations).items():
styles.append(output.Style('text style %d (%s)' % (ids.next(), text_name), text_rules))
styles.append(output.Style('point style %d' % ids.next(),
get_point_rules(layer_declarations, dirs)))
styles = [s for s in styles if s.rules]
if styles:
datasource = output.Datasource(**datasource_params)
layer = output.Layer('layer %d' % ids.next(),
datasource, styles,
layer_el.get('srs', None),
layer_el.get('min_zoom', None) and int(layer_el.get('min_zoom')) or None,
layer_el.get('max_zoom', None) and int(layer_el.get('max_zoom')) or None)
layers.append(layer)
map_attrs = get_map_attributes(get_applicable_declarations(map_el, declarations))
# if a target srs is profiled, override whatever is in mml
if srs is not None:
map_el.set('srs', srs)
return output.Map(map_el.attrib.get('srs', None), layers, **map_attrs)
|
from distutils.core import setup
setup(
name='SocksiPy',
version='1.00',
py_modules=["socks", ],
)
|
from . import domainresource
class StructureMap(domainresource.DomainResource):
""" A Map of relationships between 2 structures that can be used to transform
data.
"""
resource_type = "StructureMap"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date this was last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the structure map.
Type `str`. """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.group = None
""" Named sections for reader convenience.
List of `StructureMapGroup` items (represented as `dict` in JSON). """
self.identifier = None
""" Additional identifier for the structure map.
List of `Identifier` items (represented as `dict` in JSON). """
self.import_fhir = None
""" Other maps used by this map (canonical URLs).
List of `str` items. """
self.jurisdiction = None
""" Intended jurisdiction for structure map (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.name = None
""" Name for this structure map (computer friendly).
Type `str`. """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this structure map is defined.
Type `str`. """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.structure = None
""" Structure Definition used by this map.
List of `StructureMapStructure` items (represented as `dict` in JSON). """
self.title = None
""" Name for this structure map (human friendly).
Type `str`. """
self.url = None
""" Logical URI to reference this structure map (globally unique).
Type `str`. """
self.useContext = None
""" Context the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the structure map.
Type `str`. """
super(StructureMap, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMap, self).elementProperties()
js.extend([
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("experimental", "experimental", bool, False, None, False),
("group", "group", StructureMapGroup, True, None, True),
("identifier", "identifier", identifier.Identifier, True, None, False),
("import_fhir", "import", str, True, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("name", "name", str, False, None, True),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("status", "status", str, False, None, True),
("structure", "structure", StructureMapStructure, True, None, False),
("title", "title", str, False, None, False),
("url", "url", str, False, None, True),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class StructureMapGroup(backboneelement.BackboneElement):
""" Named sections for reader convenience.
Organizes the mapping into managable chunks for human review/ease of
maintenance.
"""
resource_type = "StructureMapGroup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.documentation = None
""" Additional description/explaination for group.
Type `str`. """
self.extends = None
""" Another group that this group adds rules to.
Type `str`. """
self.input = None
""" Named instance provided when invoking the map.
List of `StructureMapGroupInput` items (represented as `dict` in JSON). """
self.name = None
""" Human-readable label.
Type `str`. """
self.rule = None
""" Transform Rule from source to target.
List of `StructureMapGroupRule` items (represented as `dict` in JSON). """
self.typeMode = None
""" none | types | type-and-types.
Type `str`. """
super(StructureMapGroup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroup, self).elementProperties()
js.extend([
("documentation", "documentation", str, False, None, False),
("extends", "extends", str, False, None, False),
("input", "input", StructureMapGroupInput, True, None, True),
("name", "name", str, False, None, True),
("rule", "rule", StructureMapGroupRule, True, None, True),
("typeMode", "typeMode", str, False, None, True),
])
return js
class StructureMapGroupInput(backboneelement.BackboneElement):
""" Named instance provided when invoking the map.
A name assigned to an instance of data. The instance must be provided when
the mapping is invoked.
"""
resource_type = "StructureMapGroupInput"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.documentation = None
""" Documentation for this instance of data.
Type `str`. """
self.mode = None
""" source | target.
Type `str`. """
self.name = None
""" Name for this instance of data.
Type `str`. """
self.type = None
""" Type for this instance of data.
Type `str`. """
super(StructureMapGroupInput, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupInput, self).elementProperties()
js.extend([
("documentation", "documentation", str, False, None, False),
("mode", "mode", str, False, None, True),
("name", "name", str, False, None, True),
("type", "type", str, False, None, False),
])
return js
class StructureMapGroupRule(backboneelement.BackboneElement):
""" Transform Rule from source to target.
"""
resource_type = "StructureMapGroupRule"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.dependent = None
""" Which other rules to apply in the context of this rule.
List of `StructureMapGroupRuleDependent` items (represented as `dict` in JSON). """
self.documentation = None
""" Documentation for this instance of data.
Type `str`. """
self.name = None
""" Name of the rule for internal references.
Type `str`. """
self.rule = None
""" Rules contained in this rule.
List of `StructureMapGroupRule` items (represented as `dict` in JSON). """
self.source = None
""" Source inputs to the mapping.
List of `StructureMapGroupRuleSource` items (represented as `dict` in JSON). """
self.target = None
""" Content to create because of this mapping rule.
List of `StructureMapGroupRuleTarget` items (represented as `dict` in JSON). """
super(StructureMapGroupRule, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupRule, self).elementProperties()
js.extend([
("dependent", "dependent", StructureMapGroupRuleDependent, True, None, False),
("documentation", "documentation", str, False, None, False),
("name", "name", str, False, None, True),
("rule", "rule", StructureMapGroupRule, True, None, False),
("source", "source", StructureMapGroupRuleSource, True, None, True),
("target", "target", StructureMapGroupRuleTarget, True, None, False),
])
return js
class StructureMapGroupRuleDependent(backboneelement.BackboneElement):
""" Which other rules to apply in the context of this rule.
"""
resource_type = "StructureMapGroupRuleDependent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.name = None
""" Name of a rule or group to apply.
Type `str`. """
self.variable = None
""" Variable to pass to the rule or group.
List of `str` items. """
super(StructureMapGroupRuleDependent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupRuleDependent, self).elementProperties()
js.extend([
("name", "name", str, False, None, True),
("variable", "variable", str, True, None, True),
])
return js
class StructureMapGroupRuleSource(backboneelement.BackboneElement):
""" Source inputs to the mapping.
"""
resource_type = "StructureMapGroupRuleSource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.check = None
""" FHIRPath expression - must be true or the mapping engine throws an
error instead of completing.
Type `str`. """
self.condition = None
""" FHIRPath expression - must be true or the rule does not apply.
Type `str`. """
self.context = None
""" Type or variable this rule applies to.
Type `str`. """
self.defaultValueAddress = None
""" Default value if no value exists.
Type `Address` (represented as `dict` in JSON). """
self.defaultValueAge = None
""" Default value if no value exists.
Type `Age` (represented as `dict` in JSON). """
self.defaultValueAnnotation = None
""" Default value if no value exists.
Type `Annotation` (represented as `dict` in JSON). """
self.defaultValueAttachment = None
""" Default value if no value exists.
Type `Attachment` (represented as `dict` in JSON). """
self.defaultValueBase64Binary = None
""" Default value if no value exists.
Type `str`. """
self.defaultValueBoolean = None
""" Default value if no value exists.
Type `bool`. """
self.defaultValueCode = None
""" Default value if no value exists.
Type `str`. """
self.defaultValueCodeableConcept = None
""" Default value if no value exists.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.defaultValueCoding = None
""" Default value if no value exists.
Type `Coding` (represented as `dict` in JSON). """
self.defaultValueContactPoint = None
""" Default value if no value exists.
Type `ContactPoint` (represented as `dict` in JSON). """
self.defaultValueCount = None
""" Default value if no value exists.
Type `Count` (represented as `dict` in JSON). """
self.defaultValueDate = None
""" Default value if no value exists.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDateTime = None
""" Default value if no value exists.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDecimal = None
""" Default value if no value exists.
Type `float`. """
self.defaultValueDistance = None
""" Default value if no value exists.
Type `Distance` (represented as `dict` in JSON). """
self.defaultValueDuration = None
""" Default value if no value exists.
Type `Duration` (represented as `dict` in JSON). """
self.defaultValueHumanName = None
""" Default value if no value exists.
Type `HumanName` (represented as `dict` in JSON). """
self.defaultValueId = None
""" Default value if no value exists.
Type `str`. """
self.defaultValueIdentifier = None
""" Default value if no value exists.
Type `Identifier` (represented as `dict` in JSON). """
self.defaultValueInstant = None
""" Default value if no value exists.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueInteger = None
""" Default value if no value exists.
Type `int`. """
self.defaultValueMarkdown = None
""" Default value if no value exists.
Type `str`. """
self.defaultValueMeta = None
""" Default value if no value exists.
Type `Meta` (represented as `dict` in JSON). """
self.defaultValueMoney = None
""" Default value if no value exists.
Type `Money` (represented as `dict` in JSON). """
self.defaultValueOid = None
""" Default value if no value exists.
Type `str`. """
self.defaultValuePeriod = None
""" Default value if no value exists.
Type `Period` (represented as `dict` in JSON). """
self.defaultValuePositiveInt = None
""" Default value if no value exists.
Type `int`. """
self.defaultValueQuantity = None
""" Default value if no value exists.
Type `Quantity` (represented as `dict` in JSON). """
self.defaultValueRange = None
""" Default value if no value exists.
Type `Range` (represented as `dict` in JSON). """
self.defaultValueRatio = None
""" Default value if no value exists.
Type `Ratio` (represented as `dict` in JSON). """
self.defaultValueReference = None
""" Default value if no value exists.
Type `FHIRReference` (represented as `dict` in JSON). """
self.defaultValueSampledData = None
""" Default value if no value exists.
Type `SampledData` (represented as `dict` in JSON). """
self.defaultValueSignature = None
""" Default value if no value exists.
Type `Signature` (represented as `dict` in JSON). """
self.defaultValueString = None
""" Default value if no value exists.
Type `str`. """
self.defaultValueTime = None
""" Default value if no value exists.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueTiming = None
""" Default value if no value exists.
Type `Timing` (represented as `dict` in JSON). """
self.defaultValueUnsignedInt = None
""" Default value if no value exists.
Type `int`. """
self.defaultValueUri = None
""" Default value if no value exists.
Type `str`. """
self.element = None
""" Optional field for this source.
Type `str`. """
self.listMode = None
""" first | not_first | last | not_last | only_one.
Type `str`. """
self.max = None
""" Specified maximum cardinality (number or *).
Type `str`. """
self.min = None
""" Specified minimum cardinality.
Type `int`. """
self.type = None
""" Rule only applies if source has this type.
Type `str`. """
self.variable = None
""" Named context for field, if a field is specified.
Type `str`. """
super(StructureMapGroupRuleSource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupRuleSource, self).elementProperties()
js.extend([
("check", "check", str, False, None, False),
("condition", "condition", str, False, None, False),
("context", "context", str, False, None, True),
("defaultValueAddress", "defaultValueAddress", address.Address, False, "defaultValue", False),
("defaultValueAge", "defaultValueAge", age.Age, False, "defaultValue", False),
("defaultValueAnnotation", "defaultValueAnnotation", annotation.Annotation, False, "defaultValue", False),
("defaultValueAttachment", "defaultValueAttachment", attachment.Attachment, False, "defaultValue", False),
("defaultValueBase64Binary", "defaultValueBase64Binary", str, False, "defaultValue", False),
("defaultValueBoolean", "defaultValueBoolean", bool, False, "defaultValue", False),
("defaultValueCode", "defaultValueCode", str, False, "defaultValue", False),
("defaultValueCodeableConcept", "defaultValueCodeableConcept", codeableconcept.CodeableConcept, False, "defaultValue", False),
("defaultValueCoding", "defaultValueCoding", coding.Coding, False, "defaultValue", False),
("defaultValueContactPoint", "defaultValueContactPoint", contactpoint.ContactPoint, False, "defaultValue", False),
("defaultValueCount", "defaultValueCount", count.Count, False, "defaultValue", False),
("defaultValueDate", "defaultValueDate", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDateTime", "defaultValueDateTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueDecimal", "defaultValueDecimal", float, False, "defaultValue", False),
("defaultValueDistance", "defaultValueDistance", distance.Distance, False, "defaultValue", False),
("defaultValueDuration", "defaultValueDuration", duration.Duration, False, "defaultValue", False),
("defaultValueHumanName", "defaultValueHumanName", humanname.HumanName, False, "defaultValue", False),
("defaultValueId", "defaultValueId", str, False, "defaultValue", False),
("defaultValueIdentifier", "defaultValueIdentifier", identifier.Identifier, False, "defaultValue", False),
("defaultValueInstant", "defaultValueInstant", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueInteger", "defaultValueInteger", int, False, "defaultValue", False),
("defaultValueMarkdown", "defaultValueMarkdown", str, False, "defaultValue", False),
("defaultValueMeta", "defaultValueMeta", meta.Meta, False, "defaultValue", False),
("defaultValueMoney", "defaultValueMoney", money.Money, False, "defaultValue", False),
("defaultValueOid", "defaultValueOid", str, False, "defaultValue", False),
("defaultValuePeriod", "defaultValuePeriod", period.Period, False, "defaultValue", False),
("defaultValuePositiveInt", "defaultValuePositiveInt", int, False, "defaultValue", False),
("defaultValueQuantity", "defaultValueQuantity", quantity.Quantity, False, "defaultValue", False),
("defaultValueRange", "defaultValueRange", range.Range, False, "defaultValue", False),
("defaultValueRatio", "defaultValueRatio", ratio.Ratio, False, "defaultValue", False),
("defaultValueReference", "defaultValueReference", fhirreference.FHIRReference, False, "defaultValue", False),
("defaultValueSampledData", "defaultValueSampledData", sampleddata.SampledData, False, "defaultValue", False),
("defaultValueSignature", "defaultValueSignature", signature.Signature, False, "defaultValue", False),
("defaultValueString", "defaultValueString", str, False, "defaultValue", False),
("defaultValueTime", "defaultValueTime", fhirdate.FHIRDate, False, "defaultValue", False),
("defaultValueTiming", "defaultValueTiming", timing.Timing, False, "defaultValue", False),
("defaultValueUnsignedInt", "defaultValueUnsignedInt", int, False, "defaultValue", False),
("defaultValueUri", "defaultValueUri", str, False, "defaultValue", False),
("element", "element", str, False, None, False),
("listMode", "listMode", str, False, None, False),
("max", "max", str, False, None, False),
("min", "min", int, False, None, False),
("type", "type", str, False, None, False),
("variable", "variable", str, False, None, False),
])
return js
class StructureMapGroupRuleTarget(backboneelement.BackboneElement):
""" Content to create because of this mapping rule.
"""
resource_type = "StructureMapGroupRuleTarget"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.context = None
""" Type or variable this rule applies to.
Type `str`. """
self.contextType = None
""" type | variable.
Type `str`. """
self.element = None
""" Field to create in the context.
Type `str`. """
self.listMode = None
""" first | share | last | collate.
List of `str` items. """
self.listRuleId = None
""" Internal rule reference for shared list items.
Type `str`. """
self.parameter = None
""" Parameters to the transform.
List of `StructureMapGroupRuleTargetParameter` items (represented as `dict` in JSON). """
self.transform = None
""" create | copy +.
Type `str`. """
self.variable = None
""" Named context for field, if desired, and a field is specified.
Type `str`. """
super(StructureMapGroupRuleTarget, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupRuleTarget, self).elementProperties()
js.extend([
("context", "context", str, False, None, False),
("contextType", "contextType", str, False, None, False),
("element", "element", str, False, None, False),
("listMode", "listMode", str, True, None, False),
("listRuleId", "listRuleId", str, False, None, False),
("parameter", "parameter", StructureMapGroupRuleTargetParameter, True, None, False),
("transform", "transform", str, False, None, False),
("variable", "variable", str, False, None, False),
])
return js
class StructureMapGroupRuleTargetParameter(backboneelement.BackboneElement):
""" Parameters to the transform.
"""
resource_type = "StructureMapGroupRuleTargetParameter"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.valueBoolean = None
""" Parameter value - variable or literal.
Type `bool`. """
self.valueDecimal = None
""" Parameter value - variable or literal.
Type `float`. """
self.valueId = None
""" Parameter value - variable or literal.
Type `str`. """
self.valueInteger = None
""" Parameter value - variable or literal.
Type `int`. """
self.valueString = None
""" Parameter value - variable or literal.
Type `str`. """
super(StructureMapGroupRuleTargetParameter, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapGroupRuleTargetParameter, self).elementProperties()
js.extend([
("valueBoolean", "valueBoolean", bool, False, "value", True),
("valueDecimal", "valueDecimal", float, False, "value", True),
("valueId", "valueId", str, False, "value", True),
("valueInteger", "valueInteger", int, False, "value", True),
("valueString", "valueString", str, False, "value", True),
])
return js
class StructureMapStructure(backboneelement.BackboneElement):
""" Structure Definition used by this map.
A structure definition used by this map. The structure definition may
describe instances that are converted, or the instances that are produced.
"""
resource_type = "StructureMapStructure"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.alias = None
""" Name for type in this map.
Type `str`. """
self.documentation = None
""" Documentation on use of structure.
Type `str`. """
self.mode = None
""" source | queried | target | produced.
Type `str`. """
self.url = None
""" Canonical URL for structure definition.
Type `str`. """
super(StructureMapStructure, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(StructureMapStructure, self).elementProperties()
js.extend([
("alias", "alias", str, False, None, False),
("documentation", "documentation", str, False, None, False),
("mode", "mode", str, False, None, True),
("url", "url", str, False, None, True),
])
return js
import sys
try:
from . import address
except ImportError:
address = sys.modules[__package__ + '.address']
try:
from . import age
except ImportError:
age = sys.modules[__package__ + '.age']
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import contactpoint
except ImportError:
contactpoint = sys.modules[__package__ + '.contactpoint']
try:
from . import count
except ImportError:
count = sys.modules[__package__ + '.count']
try:
from . import distance
except ImportError:
distance = sys.modules[__package__ + '.distance']
try:
from . import duration
except ImportError:
duration = sys.modules[__package__ + '.duration']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import humanname
except ImportError:
humanname = sys.modules[__package__ + '.humanname']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import meta
except ImportError:
meta = sys.modules[__package__ + '.meta']
try:
from . import money
except ImportError:
money = sys.modules[__package__ + '.money']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
try:
from . import range
except ImportError:
range = sys.modules[__package__ + '.range']
try:
from . import ratio
except ImportError:
ratio = sys.modules[__package__ + '.ratio']
try:
from . import sampleddata
except ImportError:
sampleddata = sys.modules[__package__ + '.sampleddata']
try:
from . import signature
except ImportError:
signature = sys.modules[__package__ + '.signature']
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + '.timing']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
|
""" Generator for C++ style thunks """
import glob
import os
import re
import sys
from idl_log import ErrOut, InfoOut, WarnOut
from idl_node import IDLAttribute, IDLNode
from idl_ast import IDLAst
from idl_option import GetOption, Option, ParseOptions
from idl_outfile import IDLOutFile
from idl_parser import ParseFiles
from idl_c_proto import CGen, GetNodeComments, CommentLines, Comment
from idl_generator import Generator, GeneratorByFile
Option('thunkroot', 'Base directory of output',
default=os.path.join('..', 'thunk'))
class TGenError(Exception):
def __init__(self, msg):
self.value = msg
def __str__(self):
return repr(self.value)
class ThunkBodyMetadata(object):
"""Metadata about thunk body. Used for selecting which headers to emit."""
def __init__(self):
self._apis = set()
def AddApi(self, api):
self._apis.add(api)
def Apis(self):
return self._apis
def _GetBaseFileName(filenode):
"""Returns the base name for output files, given the filenode.
Examples:
'dev/ppb_find_dev.h' -> 'ppb_find'
'trusted/ppb_buffer_trusted.h' -> 'ppb_buffer_trusted'
"""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
if name.endswith('_dev'):
# Clip off _dev suffix.
name = name[:-len('_dev')]
return name
def _GetHeaderFileName(filenode):
"""Returns the name for the header for this file."""
path, name = os.path.split(filenode.GetProperty('NAME'))
name = os.path.splitext(name)[0]
if path:
header = "ppapi/c/%s/%s.h" % (path, name)
else:
header = "ppapi/c/%s.h" % name
return header
def _GetThunkFileName(filenode, relpath):
"""Returns the thunk file name."""
path = os.path.split(filenode.GetProperty('NAME'))[0]
name = _GetBaseFileName(filenode)
# We don't reattach the path for thunk.
if relpath: name = os.path.join(relpath, name)
name = '%s%s' % (name, '_thunk.cc')
return name
def _MakeEnterLine(filenode, interface, arg, handle_errors, callback, meta):
"""Returns an EnterInstance/EnterResource string for a function."""
if arg[0] == 'PP_Instance':
if callback is None:
return 'EnterInstance enter(%s);' % arg[1]
else:
return 'EnterInstance enter(%s, %s);' % (arg[1], callback)
elif arg[0] == 'PP_Resource':
api_name = interface.GetName()
if api_name.endswith('_Dev'):
api_name = api_name[:-len('_Dev')]
api_name += '_API'
enter_type = 'EnterResource<%s>' % api_name
# The API header matches the file name, not the interface name.
meta.AddApi(_GetBaseFileName(filenode) + '_api')
if callback is None:
return '%s enter(%s, %s);' % (enter_type, arg[1],
str(handle_errors).lower())
else:
return '%s enter(%s, %s, %s);' % (enter_type, arg[1],
callback,
str(handle_errors).lower())
else:
raise TGenError("Unknown type for _MakeEnterLine: %s" % arg[0])
def _GetShortName(interface, filter_suffixes):
"""Return a shorter interface name that matches Is* and Create* functions."""
parts = interface.GetName().split('_')[1:]
tail = parts[len(parts) - 1]
if tail in filter_suffixes:
parts = parts[:-1]
return ''.join(parts)
def _IsTypeCheck(interface, node):
"""Returns true if node represents a type-checking function."""
return node.GetName() == 'Is%s' % _GetShortName(interface, ['Dev', 'Private'])
def _GetCreateFuncName(interface):
"""Returns the creation function name for an interface."""
return 'Create%s' % _GetShortName(interface, ['Dev'])
def _GetDefaultFailureValue(t):
"""Returns the default failure value for a given type.
Returns None if no default failure value exists for the type.
"""
values = {
'PP_Bool': 'PP_FALSE',
'PP_Resource': '0',
'struct PP_Var': 'PP_MakeUndefined()',
'int32_t': 'enter.retval()',
'uint16_t': '0',
'uint32_t': '0',
'uint64_t': '0',
}
if t in values:
return values[t]
return None
def _MakeCreateMemberBody(interface, member, args):
"""Returns the body of a Create() function.
Args:
interface - IDLNode for the interface
member - IDLNode for member function
args - List of arguments for the Create() function
"""
if args[0][0] == 'PP_Resource':
body = ' Resource* object =\n'
body += ' PpapiGlobals::Get()->GetResourceTracker()->'
body += 'GetResource(%s);\n' % args[0][1]
body += ' if (!object)\n'
body += ' return 0;\n'
body += ' EnterResourceCreation enter(object->pp_instance());\n'
elif args[0][0] == 'PP_Instance':
body = ' EnterResourceCreation enter(%s);\n' % args[0][1]
else:
raise TGenError('Unknown arg type for Create(): %s' % args[0][0])
body += ' if (enter.failed())\n'
body += ' return 0;\n'
arg_list = ', '.join([a[1] for a in args])
if member.GetProperty('create_func'):
create_func = member.GetProperty('create_func')
else:
create_func = _GetCreateFuncName(interface)
body += ' return enter.functions()->%s(%s);' % (create_func,
arg_list)
return body
def _MakeNormalMemberBody(filenode, node, member, rtype, args, meta):
"""Returns the body of a typical function.
Args:
filenode - IDLNode for the file
node - IDLNode for the interface
member - IDLNode for the member function
rtype - Return type for the member function
args - List of 4-tuple arguments for the member function
meta - ThunkBodyMetadata for header hints
"""
is_callback_func = args[len(args) - 1][0] == 'struct PP_CompletionCallback'
if is_callback_func:
call_args = args[:-1] + [('', 'enter.callback()', '', '')]
else:
call_args = args
if args[0][0] == 'PP_Instance':
call_arglist = ', '.join(a[1] for a in call_args)
function_container = 'functions'
else:
call_arglist = ', '.join(a[1] for a in call_args[1:])
function_container = 'object'
invocation = 'enter.%s()->%s(%s)' % (function_container,
member.GetName(),
call_arglist)
handle_errors = not (member.GetProperty('report_errors') == 'False')
if is_callback_func:
body = ' %s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
args[len(args) - 1][1], meta)
body += ' if (enter.failed())\n'
value = member.GetProperty('on_failure')
if value is None:
value = 'enter.retval()'
body += ' return %s;\n' % value
body += ' return enter.SetResult(%s);\n' % invocation
elif rtype == 'void':
body = ' %s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
None, meta)
body += ' if (enter.succeeded())\n'
body += ' %s;' % invocation
else:
value = member.GetProperty('on_failure')
if value is None:
value = _GetDefaultFailureValue(rtype)
if value is None:
raise TGenError('No default value for rtype %s' % rtype)
body = ' %s\n' % _MakeEnterLine(filenode, node, args[0], handle_errors,
None, meta)
body += ' if (enter.failed())\n'
body += ' return %s;\n' % value
body += ' return %s;' % invocation
return body
def DefineMember(filenode, node, member, release, include_version, meta):
"""Returns a definition for a member function of an interface.
Args:
filenode - IDLNode for the file
node - IDLNode for the interface
member - IDLNode for the member function
release - release to generate
include_version - include the version in emitted function name.
meta - ThunkMetadata for header hints
Returns:
A string with the member definition.
"""
cgen = CGen()
rtype, name, arrays, args = cgen.GetComponents(member, release, 'return')
if _IsTypeCheck(node, member):
body = ' %s\n' % _MakeEnterLine(filenode, node, args[0], False, None, meta)
body += ' return PP_FromBool(enter.succeeded());'
elif member.GetName() == 'Create':
body = _MakeCreateMemberBody(node, member, args)
else:
body = _MakeNormalMemberBody(filenode, node, member, rtype, args, meta)
signature = cgen.GetSignature(member, release, 'return', func_as_ptr=False,
include_version=include_version)
member_code = '%s {\n%s\n}' % (signature, body)
return cgen.Indent(member_code, tabs=0)
class TGen(GeneratorByFile):
def __init__(self):
Generator.__init__(self, 'Thunk', 'tgen', 'Generate the C++ thunk.')
def GenerateFile(self, filenode, releases, options):
savename = _GetThunkFileName(filenode, GetOption('thunkroot'))
my_min, my_max = filenode.GetMinMax(releases)
if my_min > releases[-1] or my_max < releases[0]:
if os.path.isfile(savename):
print "Removing stale %s for this range." % filenode.GetName()
os.remove(os.path.realpath(savename))
return False
do_generate = filenode.GetProperty('generate_thunk')
if not do_generate:
return False
thunk_out = IDLOutFile(savename)
body, meta = self.GenerateBody(thunk_out, filenode, releases, options)
self.WriteHead(thunk_out, filenode, releases, options, meta)
thunk_out.Write('\n\n'.join(body))
self.WriteTail(thunk_out, filenode, releases, options)
return thunk_out.Close()
def WriteHead(self, out, filenode, releases, options, meta):
__pychecker__ = 'unusednames=options'
cgen = CGen()
cright_node = filenode.GetChildren()[0]
assert(cright_node.IsA('Copyright'))
out.Write('%s\n' % cgen.Copyright(cright_node, cpp_style=True))
# Wrap the From ... modified ... comment if it would be >80 characters.
from_text = 'From %s' % (
filenode.GetProperty('NAME').replace(os.sep,'/'))
modified_text = 'modified %s.' % (
filenode.GetProperty('DATETIME'))
if len(from_text) + len(modified_text) < 74:
out.Write('// %s %s\n\n' % (from_text, modified_text))
else:
out.Write('// %s,\n// %s\n\n' % (from_text, modified_text))
# TODO(teravest): Don't emit includes we don't need.
includes = ['ppapi/c/pp_errors.h',
'ppapi/shared_impl/tracked_callback.h',
'ppapi/thunk/enter.h',
'ppapi/thunk/ppb_instance_api.h',
'ppapi/thunk/resource_creation_api.h',
'ppapi/thunk/thunk.h']
includes.append(_GetHeaderFileName(filenode))
for api in meta.Apis():
includes.append('ppapi/thunk/%s.h' % api.lower())
for include in sorted(includes):
out.Write('#include "%s"\n' % include)
out.Write('\n')
out.Write('namespace ppapi {\n')
out.Write('namespace thunk {\n')
out.Write('\n')
out.Write('namespace {\n')
out.Write('\n')
def GenerateBody(self, out, filenode, releases, options):
"""Generates a member function lines to be written and metadata.
Returns a tuple of (body, meta) where:
body - a list of lines with member function bodies
meta - a ThunkMetadata instance for hinting which headers are needed.
"""
__pychecker__ = 'unusednames=options'
members = []
meta = ThunkBodyMetadata()
for node in filenode.GetListOf('Interface'):
# Skip if this node is not in this release
if not node.InReleases(releases):
print "Skipping %s" % node
continue
# Generate Member functions
if node.IsA('Interface'):
for child in node.GetListOf('Member'):
build_list = child.GetUniqueReleases(releases)
# We have to filter out releases this node isn't in.
build_list = filter(lambda r: child.InReleases([r]), build_list)
if len(build_list) == 0:
continue
release = build_list[-1] # Pick the newest release.
member = DefineMember(filenode, node, child, release, False, meta)
if not member:
continue
members.append(member)
for build in build_list[:-1]:
member = DefineMember(filenode, node, child, build, True, meta)
if not member:
continue
members.append(member)
return (members, meta)
def WriteTail(self, out, filenode, releases, options):
__pychecker__ = 'unusednames=options'
cgen = CGen()
version_list = []
out.Write('\n\n')
for node in filenode.GetListOf('Interface'):
build_list = node.GetUniqueReleases(releases)
for build in build_list:
version = node.GetVersion(build).replace('.', '_')
thunk_name = 'g_' + node.GetName().lower() + '_thunk_' + \
version
thunk_type = '_'.join((node.GetName(), version))
version_list.append((thunk_type, thunk_name))
out.Write('const %s %s = {\n' % (thunk_type, thunk_name))
for child in node.GetListOf('Member'):
rtype, name, arrays, args = cgen.GetComponents(
child, build, 'return')
if child.InReleases([build]): # TEST
out.Write(' &%s,\n' % name)
out.Write('};\n\n')
out.Write('} // namespace\n')
out.Write('\n')
for thunk_type, thunk_name in version_list:
thunk_decl = 'const %s* Get%s_Thunk() {\n' % (thunk_type, thunk_type)
if len(thunk_decl) > 80:
thunk_decl = 'const %s*\n Get%s_Thunk() {\n' % (thunk_type,
thunk_type)
out.Write(thunk_decl)
out.Write(' return &%s;\n' % thunk_name)
out.Write('}\n')
out.Write('\n')
out.Write('} // namespace thunk\n')
out.Write('} // namespace ppapi\n')
tgen = TGen()
def Main(args):
# Default invocation will verify the golden files are unchanged.
failed = 0
if not args:
args = ['--wnone', '--diff', '--test', '--thunkroot=.']
ParseOptions(args)
idldir = os.path.split(sys.argv[0])[0]
idldir = os.path.join(idldir, 'test_thunk', '*.idl')
filenames = glob.glob(idldir)
ast = ParseFiles(filenames)
if tgen.GenerateRange(ast, ['M13', 'M14'], {}):
print "Golden file for M13-M14 failed."
failed = 1
else:
print "Golden file for M13-M14 passed."
return failed
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
from __future__ import annotations
from dials.array_family import flex
class ValidatedMultiExpProfileModeller:
"""
A class to wrap profile modeller for validation
"""
def __init__(self, modellers):
"""
Init the list of MultiExpProfileModeller modellers
"""
self.modellers = modellers
self.finalized_modeller = None
def __getitem__(self, index):
"""
Get a modeller
"""
return self.modellers[index]
def model(self, reflections):
"""
Do the modelling for all modellers
"""
if "profile.index" not in reflections:
assert len(self.modellers) == 1
self.modellers[0].model(reflections)
else:
for i, modeller in enumerate(self.modellers):
mask = reflections["profile.index"] != i
indices = flex.size_t(range(len(mask))).select(mask)
if len(indices) > 0:
subsample = reflections.select(indices)
modeller.model(subsample)
reflections.set_selected(indices, subsample)
def validate(self, reflections):
"""
Do the validation.
"""
results = []
for i, modeller in enumerate(self.modellers):
mask = reflections["profile.index"] != i
indices = flex.size_t(range(len(mask))).select(mask)
if len(indices) > 0:
subsample = reflections.select(indices)
modeller.validate(subsample)
reflections.set_selected(indices, subsample)
corr = subsample["profile.correlation"]
mean_corr = flex.mean(corr)
else:
mean_corr = None
results.append(mean_corr)
return results
def accumulate(self, other):
"""
Accumulate the modellers
"""
assert len(self) == len(other)
for ms, mo in zip(self, other):
ms.accumulate(mo)
def finalize(self):
"""
Finalize the model
"""
assert not self.finalized()
for m in self:
if self.finalized_modeller is None:
self.finalized_modeller = m.copy()
else:
self.finalized_modeller.accumulate(m)
m.finalize()
self.finalized_modeller.finalize()
def finalized(self):
"""
Check if the model has been finalized.
"""
return self.finalized_modeller is not None
def finalized_model(self):
"""
Get the finalized model
"""
assert self.finalized
return self.finalized_modeller
def __iter__(self):
"""
Iterate through the modellers
"""
yield from self.modellers
def __len__(self):
"""
Return the number of modellers
"""
return len(self.modellers)
|
def extractEnlnGenerasiNet(item):
'''
Parser for 'enLN.generasi.net'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
chp_prefixes = [
('imouto sae ireba ii', 'Imouto sae Ireba ii.', 'translated'),
('Cat ', 'Me and My Beloved Cat (Girlfriend)', 'translated'),
]
for prefix, series, tl_type in chp_prefixes:
if item['title'].lower().startswith(prefix.lower()):
return buildReleaseMessageWithType(item, series, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
"""Semi-automated code-level dependency tracking for python."""
from codedep.decorators import codeHash, codeDeps, ForwardRef, codedepEvalThunk
from codedep.compute import getHash
__version__ = '0.4.dev1'
|
from kapteyn import maputils
import numpy
from service import *
fignum = 34
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
title = r"""Hammer Aitoff projection (AIT) oblique with:
$(\alpha_p,\delta_p) = (0^\circ,30^\circ)$, $\phi_p = 75^\circ$ also:
$(\phi_0,\theta_0) = (0^\circ,90^\circ)$. (Cal. fig.34d)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---AIT',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--AIT',
'CRVAL2' : 30.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'LONPOLE' :75.0,
'PV1_1' : 0.0, 'PV1_2' : 90.0, # IMPORTANT. This is a setting from Cal.section 7.1, p 1103
}
X = numpy.arange(0,390.0,15.0);
Y = numpy.arange(-75,90,15.0)
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
header['CRVAL1'] = 0.0
header['CRVAL2'] = 0.0
del header['PV1_1']
del header['PV1_2']
header['LONPOLE'] = 0.0
header['LATPOLE'] = 0.0
border = annim.Graticule(header, axnum= (1,2), wylim=(-90,90.0), wxlim=(-180,180),
startx=(180-epsilon, -180+epsilon), skipy=True)
border.setp_lineswcs0(color='g') # Show borders in different color
border.setp_lineswcs1(color='g')
lon_world = list(range(0,360,30))
lat_world = [-60, -30, 30, 60]
labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
labkwargs1 = {'color':'b', 'va':'center', 'ha':'center'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
labkwargs0=labkwargs0, labkwargs1=labkwargs1,
markerpos=markerpos)
|
from __future__ import division
import numpy as np
from ..gloo import set_state, Texture2D
from ..color import get_colormap
from .shaders import ModularProgram, Function, FunctionChain
from .transforms import NullTransform
from .visual import Visual
from ..ext.six import string_types
VERT_SHADER = """
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = $transform(vec4(a_position, 0., 1.));
}
"""
FRAG_SHADER = """
uniform sampler2D u_texture;
varying vec2 v_texcoord;
void main()
{
vec2 texcoord = $map_uv_to_tex(vec4(v_texcoord, 0, 1)).xy;
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
gl_FragColor = $color_transform(texture2D(u_texture, texcoord));
}
""" # noqa
_null_color_transform = 'vec4 pass(vec4 color) { return color; }'
_c2l = 'float cmap(vec4 color) { return (color.r + color.g + color.b) / 3.; }'
class ImageVisual(Visual):
"""Visual subclass displaying an image.
Parameters
----------
data : ndarray
ImageVisual data. Can be shape (M, N), (M, N, 3), or (M, N, 4).
method : str
Selects method of rendering image in case of non-linear transforms.
Each method produces similar results, but may trade efficiency
and accuracy. If the transform is linear, this parameter is ignored
and a single quad is drawn around the area of the image.
* 'auto': Automatically select 'impostor' if the image is drawn
with a nonlinear transform; otherwise select 'subdivide'.
* 'subdivide': ImageVisual is represented as a grid of triangles
with texture coordinates linearly mapped.
* 'impostor': ImageVisual is represented as a quad covering the
entire view, with texture coordinates determined by the
transform. This produces the best transformation results, but may
be slow.
grid: tuple (rows, cols)
If method='subdivide', this tuple determines the number of rows and
columns in the image grid.
cmap : str | ColorMap
Colormap to use for luminance images.
clim : str | tuple
Limits to use for the colormap. Can be 'auto' to auto-set bounds to
the min and max of the data.
**kwargs : dict
Keyword arguments to pass to `Visual`.
Notes
-----
The colormap functionality through ``cmap`` and ``clim`` are only used
if the data are 2D.
"""
def __init__(self, data=None, method='auto', grid=(10, 10),
cmap='cubehelix', clim='auto', **kwargs):
super(ImageVisual, self).__init__(**kwargs)
self._program = ModularProgram(VERT_SHADER, FRAG_SHADER)
self.clim = clim
self.cmap = cmap
self._data = None
self._texture = None
self._interpolation = 'nearest'
if data is not None:
self.set_data(data)
self._method = method
self._method_used = None
self._grid = grid
self._need_vertex_update = True
def set_data(self, image):
"""Set the data
Parameters
----------
image : array-like
The image data.
"""
data = np.asarray(image)
if self._data is None or self._data.shape != data.shape:
self._need_vertex_update = True
self._data = data
self._texture = None
@property
def clim(self):
return (self._clim if isinstance(self._clim, string_types) else
tuple(self._clim))
@clim.setter
def clim(self, clim):
if isinstance(clim, string_types):
if clim != 'auto':
raise ValueError('clim must be "auto" if a string')
else:
clim = np.array(clim, float)
if clim.shape != (2,):
raise ValueError('clim must have two elements')
self._clim = clim
self._need_vertex_update = True
self.update()
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self.update()
@property
def method(self):
return self._method
@method.setter
def method(self, m):
if self._method != m:
self._method = m
self._need_vertex_update = True
self.update()
@property
def size(self):
return self._data.shape[:2][::-1]
def _build_vertex_data(self, transforms):
method = self._method
grid = self._grid
if method == 'auto':
if transforms.get_full_transform().Linear:
method = 'subdivide'
grid = (1, 1)
else:
method = 'impostor'
self._method_used = method
# TODO: subdivision and impostor modes should be handled by new
# components?
if method == 'subdivide':
# quads cover area of image as closely as possible
w = 1.0 / grid[1]
h = 1.0 / grid[0]
quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
[0, 0, 0], [w, h, 0], [0, h, 0]],
dtype=np.float32)
quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
quads[:] = quad
mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
mgrid = mgrid[:, :, np.newaxis, :]
mgrid[..., 0] *= w
mgrid[..., 1] *= h
quads[..., :2] += mgrid
tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
tex_coords = np.ascontiguousarray(tex_coords[:, :2])
vertices = tex_coords * self.size
# vertex shader provides correct texture coordinates
self._program.frag['map_uv_to_tex'] = NullTransform()
elif method == 'impostor':
# quad covers entire view; frag. shader will deal with image shape
vertices = np.array([[-1, -1], [1, -1], [1, 1],
[-1, -1], [1, 1], [-1, 1]],
dtype=np.float32)
tex_coords = vertices
# vertex shader provides ND coordinates;
# fragment shader maps to texture coordinates
self._program.vert['transform'] = NullTransform()
self._raycast_func = Function('''
vec4 map_local_to_tex(vec4 x) {
// Cast ray from 3D viewport to surface of image
// (if $transform does not affect z values, then this
// can be optimized as simply $transform.map(x) )
vec4 p1 = $transform(x);
vec4 p2 = $transform(x + vec4(0, 0, 0.5, 0));
p1 /= p1.w;
p2 /= p2.w;
vec4 d = p2 - p1;
float f = p2.z / d.z;
vec4 p3 = p2 - d * f;
// finally map local to texture coords
return vec4(p3.xy / $image_size, 0, 1);
}
''')
self._raycast_func['image_size'] = self.size
self._program.frag['map_uv_to_tex'] = self._raycast_func
else:
raise ValueError("Unknown image draw method '%s'" % method)
self._program['a_position'] = vertices.astype(np.float32)
self._program['a_texcoord'] = tex_coords.astype(np.float32)
self._need_vertex_update = False
def _build_texture(self):
data = self._data
if data.dtype == np.float64:
data = data.astype(np.float32)
if data.ndim == 2 or data.shape[2] == 1:
# deal with clim on CPU b/c of texture depth limits :(
# can eventually do this by simulating 32-bit float... maybe
clim = self._clim
if isinstance(clim, string_types) and clim == 'auto':
clim = np.min(data), np.max(data)
clim = np.asarray(clim, dtype=np.float32)
data = data - clim[0] # not inplace so we don't modify orig data
if clim[1] - clim[0] > 0:
data /= clim[1] - clim[0]
else:
data[:] = 1 if data[0, 0] != 0 else 0
fun = FunctionChain(None, [Function(_c2l),
Function(self.cmap.glsl_map)])
self._clim = np.array(clim)
else:
fun = Function(_null_color_transform)
self._program.frag['color_transform'] = fun
self._texture = Texture2D(data, interpolation=self._interpolation)
self._program['u_texture'] = self._texture
def bounds(self, mode, axis):
"""Get the bounds
Parameters
----------
mode : str
Describes the type of boundary requested. Can be "visual", "data",
or "mouse".
axis : 0, 1, 2
The axis along which to measure the bounding values, in
x-y-z order.
"""
if axis > 1:
return (0, 0)
else:
return (0, self.size[axis])
def draw(self, transforms):
"""Draw the visual
Parameters
----------
transforms : instance of TransformSystem
The transforms to use.
"""
if self._data is None:
return
set_state(cull_face=False)
# upload texture is needed
if self._texture is None:
self._build_texture()
# rebuild vertex buffers if needed
if self._need_vertex_update:
self._build_vertex_data(transforms)
# update transform
method = self._method_used
if method == 'subdivide':
self._program.vert['transform'] = transforms.get_full_transform()
else:
self._raycast_func['transform'] = \
transforms.get_full_transform().inverse
self._program.draw('triangles')
|
import socket
addr = '169.254.38.91'
port = 7890
sock = socket.socket()
def r():
print "recv'd: " + sock.recv(1024)
def s(m):
sock.sendall(msg + "\r\n")
def g(m):
s(m)
r()
r()
if __name__ == '__main__':
sock.connect( (addr,port))
r()
r()
print "setup done"
#msg = "dll?lib=msvcrt.dll&func=_wfopen&arg0=README.txt&type0=str&arg1=r&type1=str"
msg = "dll?lib=msvcrt.dll&func=_waccess&arg0=READMYE.txt&type0=str&arg1=0&type1=int"
#msg = "ls\r\n"
g(msg)
# g(msg)
sock.close()
|
import csv
from cStringIO import StringIO
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.core.urlresolvers import reverse
from nose.tools import eq_
import mkt
import mkt.site.tests
from mkt.access.models import Group, GroupUser
from mkt.reviewers.models import RereviewQueue
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
from mkt.webapps.models import AddonDeviceType, Webapp
from ..forms import DevMailerForm
from ..models import EmailPreviewTopic
class TestEmailPreview(mkt.site.tests.TestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group',
'webapp_337141')
def setUp(self):
self.login('admin@mozilla.com')
addon = Webapp.objects.get(pk=337141)
self.topic = EmailPreviewTopic(addon)
def test_csv(self):
self.topic.send_mail('the subject', u'Hello Ivan Krsti\u0107',
from_email='admin@mozilla.org',
recipient_list=['funnyguy@mozilla.org'])
r = self.client.get(reverse('zadmin.email_preview_csv',
args=[self.topic.topic]))
eq_(r.status_code, 200)
rdr = csv.reader(StringIO(r.content))
eq_(rdr.next(), ['from_email', 'recipient_list', 'subject', 'body'])
eq_(rdr.next(), ['admin@mozilla.org', 'funnyguy@mozilla.org',
'the subject', 'Hello Ivan Krsti\xc4\x87'])
class TestMemcache(mkt.site.tests.TestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group')
def setUp(self):
self.url = reverse('zadmin.memcache')
cache.set('foo', 'bar')
self.login('admin@mozilla.com')
def test_login(self):
self.client.logout()
eq_(self.client.get(self.url).status_code, 302)
def test_can_clear(self):
self.client.post(self.url, {'yes': 'True'})
eq_(cache.get('foo'), None)
def test_cant_clear(self):
self.client.post(self.url, {'yes': 'False'})
eq_(cache.get('foo'), 'bar')
class TestElastic(mkt.site.tests.ESTestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group')
def setUp(self):
self.url = reverse('zadmin.elastic')
self.login('admin@mozilla.com')
def test_login(self):
self.client.logout()
self.assert3xx(
self.client.get(self.url),
reverse('users.login') + '?to=/admin/elastic')
class TestEmailDevs(mkt.site.tests.TestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group',
'webapp_337141')
def setUp(self):
self.login('admin')
self.addon = Webapp.objects.get(pk=337141)
def post(self, recipients=None, subject='subject', message='msg',
preview_only=False):
return self.client.post(reverse('zadmin.email_devs'),
dict(recipients=recipients, subject=subject,
message=message,
preview_only=preview_only))
def test_preview(self):
self.addon.update(premium_type=mkt.ADDON_PREMIUM)
res = self.post(recipients='payments', preview_only=True)
self.assertNoFormErrors(res)
preview = EmailPreviewTopic(topic='email-devs')
eq_([e.recipient_list for e in preview.filter()],
['steamcube@mozilla.com'])
eq_(len(mail.outbox), 0)
def test_only_apps_with_payments(self):
self.addon.update(premium_type=mkt.ADDON_PREMIUM)
res = self.post(recipients='payments')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
self.addon.update(status=mkt.STATUS_PENDING)
res = self.post(recipients='payments')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
self.addon.update(status=mkt.STATUS_DELETED)
res = self.post(recipients='payments')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
def test_only_free_apps_with_new_regions(self):
self.addon.update(enable_new_regions=False)
res = self.post(recipients='free_apps_region_enabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
mail.outbox = []
res = self.post(recipients='free_apps_region_disabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
self.addon.update(enable_new_regions=True)
res = self.post(recipients='free_apps_region_enabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
res = self.post(recipients='free_apps_region_disabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
def test_only_apps_with_payments_and_new_regions(self):
self.addon.update(enable_new_regions=False)
self.addon.update(premium_type=mkt.ADDON_PREMIUM)
res = self.post(recipients='payments_region_enabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
mail.outbox = []
res = self.post(recipients='payments_region_disabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
self.addon.update(enable_new_regions=True)
res = self.post(recipients='payments_region_enabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
res = self.post(recipients='payments_region_disabled')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
def test_only_desktop_apps(self):
AddonDeviceType.objects.create(addon=self.addon,
device_type=mkt.DEVICE_MOBILE.id)
res = self.post(recipients='desktop_apps')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
mail.outbox = []
AddonDeviceType.objects.create(addon=self.addon,
device_type=mkt.DEVICE_DESKTOP.id)
res = self.post(recipients='desktop_apps')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
self.addon.update(status=mkt.STATUS_PENDING)
res = self.post(recipients='desktop_apps')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
mail.outbox = []
self.addon.update(status=mkt.STATUS_DELETED)
res = self.post(recipients='desktop_apps')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
def test_only_apps(self):
res = self.post(recipients='apps')
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 1)
def test_ignore_deleted_always(self):
self.addon.update(status=mkt.STATUS_DELETED)
for name, label in DevMailerForm._choices:
res = self.post(recipients=name)
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
def test_exclude_pending_for_addons(self):
self.addon.update(status=mkt.STATUS_PENDING)
for name, label in DevMailerForm._choices:
if name in ('payments', 'desktop_apps'):
continue
res = self.post(recipients=name)
self.assertNoFormErrors(res)
eq_(len(mail.outbox), 0)
class TestPerms(mkt.site.tests.TestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group',
'user_999')
def test_admin_user(self):
# Admin should see views with Django's perm decorator and our own.
self.login('admin@mozilla.com')
eq_(self.client.get(reverse('zadmin.index')).status_code, 200)
eq_(self.client.get(reverse('zadmin.settings')).status_code, 200)
def test_staff_user(self):
# Staff users have some privileges.
user = UserProfile.objects.get(email='regular@mozilla.com')
group = Group.objects.create(name='Staff', rules='AdminTools:View')
GroupUser.objects.create(group=group, user=user)
self.login('regular@mozilla.com')
eq_(self.client.get(reverse('zadmin.index')).status_code, 200)
eq_(self.client.get(reverse('zadmin.settings')).status_code, 200)
def test_sr_reviewers_user(self):
# Sr Reviewers users have only a few privileges.
user = UserProfile.objects.get(email='regular@mozilla.com')
group = Group.objects.create(name='Sr Reviewer',
rules='ReviewerAdminTools:View')
GroupUser.objects.create(group=group, user=user)
self.login('regular@mozilla.com')
eq_(self.client.get(reverse('zadmin.index')).status_code, 200)
eq_(self.client.get(reverse('zadmin.settings')).status_code, 403)
def test_unprivileged_user(self):
# Unprivileged user.
self.login('regular@mozilla.com')
eq_(self.client.get(reverse('zadmin.index')).status_code, 403)
eq_(self.client.get(reverse('zadmin.settings')).status_code, 403)
# Anonymous users should also get a 403.
self.client.logout()
self.assert3xx(
self.client.get(reverse('zadmin.index')),
reverse('users.login') + '?to=/admin/')
class TestHome(mkt.site.tests.TestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group')
def setUp(self):
self.login('admin@mozilla.com')
def test_home(self):
# Test that the admin home page (which is AMO) can still be loaded
# from Marketplace without exceptions.
res = self.client.get(reverse('zadmin.index'))
eq_(res.status_code, 200)
class TestGenerateError(mkt.site.tests.TestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group')
def setUp(self):
self.login('admin@mozilla.com')
heka = settings.HEKA
HEKA_CONF = {
'logger': 'zamboni',
'plugins': {'cef': ('heka_cef.cef_plugin:config_plugin',
{'override': True})},
'stream': {'class': 'heka.streams.DebugCaptureStream'},
'encoder': 'heka.encoders.NullEncoder',
}
from heka.config import client_from_dict_config
self.heka = client_from_dict_config(HEKA_CONF, heka)
self.heka.stream.msgs.clear()
def test_heka_statsd(self):
self.url = reverse('zadmin.generate-error')
self.client.post(self.url,
{'error': 'heka_statsd'})
eq_(len(self.heka.stream.msgs), 1)
msg = self.heka.stream.msgs[0]
eq_(msg.severity, 6)
eq_(msg.logger, 'zamboni')
eq_(msg.payload, '1')
eq_(msg.type, 'counter')
rate = [f for f in msg.fields if f.name == 'rate'][0]
name = [f for f in msg.fields if f.name == 'name'][0]
eq_(rate.value_double, [1.0])
eq_(name.value_string, ['z.zadmin'])
def test_heka_json(self):
self.url = reverse('zadmin.generate-error')
self.client.post(self.url,
{'error': 'heka_json'})
eq_(len(self.heka.stream.msgs), 1)
msg = self.heka.stream.msgs[0]
eq_(msg.type, 'heka_json')
eq_(msg.logger, 'zamboni')
foo = [f for f in msg.fields if f.name == 'foo'][0]
secret = [f for f in msg.fields if f.name == 'secret'][0]
eq_(foo.value_string, ['bar'])
eq_(secret.value_integer, [42])
def test_heka_cef(self):
self.url = reverse('zadmin.generate-error')
self.client.post(self.url,
{'error': 'heka_cef'})
eq_(len(self.heka.stream.msgs), 1)
msg = self.heka.stream.msgs[0]
eq_(msg.type, 'cef')
eq_(msg.logger, 'zamboni')
class TestManifestRevalidation(mkt.site.tests.TestCase):
fixtures = fixture('user_admin', 'group_admin', 'user_admin_group',
'webapp_337141', 'user_999')
def setUp(self):
self.url = reverse('zadmin.manifest_revalidation')
def _test_revalidation(self):
current_count = RereviewQueue.objects.count()
response = self.client.post(self.url)
eq_(response.status_code, 200)
self.assertTrue('Manifest revalidation queued' in response.content)
eq_(len(RereviewQueue.objects.all()), current_count + 1)
def test_revalidation_by_reviewers(self):
# Sr Reviewers users should be able to use the feature.
user = UserProfile.objects.get(email='regular@mozilla.com')
self.grant_permission(user, 'ReviewerAdminTools:View')
self.login('regular@mozilla.com')
self._test_revalidation()
def test_revalidation_by_admin(self):
# Admin users should be able to use the feature.
self.login('admin@mozilla.com')
self._test_revalidation()
def test_unpriviliged_user(self):
# Unprivileged user should not be able to reach the feature.
self.login('regular@mozilla.com')
eq_(self.client.post(self.url).status_code, 403)
|
"""
amnonscript
heatsequer
supercooldb.py
the sql cooldb implementation
"""
__version__ = "0.1"
from ..utils.amnonutils import Debug,dictupper,listupper,delete
from ..utils.oboparse import Parser
from ..utils.ontologygraph import ontologysubtreeids,ontologytotree,getnodeparents,ontotreetonames
from ..experiment.expclass import getheatsequerdir
import numpy as np
import matplotlib.pyplot as plt
import csv
import sqlite3
from collections import defaultdict
from pdb import set_trace as XXX
import datetime
import pickle
import requests
import os
class scdbstruct:
def __init__(self):
# the filename used for the database
self.dbfile=''
# the ontology dict (key is name and value is id)
self.ontology={}
# the ontology from id dict (key is id, and value is first name with this id) - used for save
self.ontologyfromid={}
# the dict of ontology graphs (load with loadontotree)
self.ontodict={}
# the names of the ontology files used:
self.ontologyfiles=['/Users/amnon/Databases/ontologies/doid.obo','/Users/amnon/Databases/ontologies/envo.obo','/Users/amnon/Databases/ontologies/uberon.obo','/Users/amnon/Databases/ontologies/efo.obo','/Users/amnon/Databases/ontologies/po.obo','/Users/amnon/Databases/ontologies/gaz.obo']
# the database server url
# self.dburl='http://localhost:5000'
self.dburl='http://amnonim.webfactional.com/scdb_main'
def addontology(scdb,ontology,ontoprefix='',namelist={}):
"""
add an obo ontology file to scdb
input:
scdb : scdbstruct
from scdbstart()
ontology : str
name of the obo ontology file to add
ontoprefix : str
the ontology prefix (i.e. ENVO) to show at end of each string, or '' for autodetect (default)
namelist : dict of ids
if non-empty, keep only items with ids from namelist (get namelist from ontologysubtreeids() )
output:
scdb :scdbstruct
with the new ontology items added to scdb.ontology dict
"""
parser=Parser(open(ontology))
for citem in parser:
cid=citem.tags["id"][0]
if len(ontoprefix)==0:
tt=cid.split(':')
if len(tt)>1:
ontoprefix=tt[0]
Debug(2,'Found ontology prefix %s' % ontoprefix)
if namelist:
if cid not in namelist:
continue
names=[]
if "name" in citem.tags:
names.extend(citem.tags["name"])
origname=citem.tags["name"][0]
scdb.ontologyfromid[cid]=origname
else:
Debug(6,"ontology item id %s does not have a name" % cid)
origname="NA"
if "synonym" in citem.tags:
names.extend(citem.tags["synonym"])
for cname in names:
Debug(1,"%s %s" % (cname,cid))
oname=cname+' :'+ontoprefix
if cname!=origname:
oname+='('+origname+')'
if oname in scdb.ontology:
Debug(1,"name %s id %s already in ontology list for id %s" % (oname,cid,scdb.ontology[oname]))
scdb.ontology[oname]=cid
return scdb
def loaddbonto(db,ontofile=None,ontofromidfile=None):
"""
load the pickled ontologies to the scdb structure
input:
db : from scdbstart
ontofile : str
name of the ontologies term file (from saveontologies) or None for default location
ontofromidfile : str
name of the ontologies reverse dict file (from saveontologies) or None for default location
output:
db : scdbstruct
with the loaded ontologies fields
"""
if ontofile is None:
ontofile=os.path.join(getheatsequerdir(),'db/ontology.pickle')
if ontofromidfile is None:
ontofromidfile=os.path.join(getheatsequerdir(),'db/ontologyfromid.pickle')
Debug(6,'loading ontology pickles')
Debug(6,'Files %s and %s' % (ontofile,ontofromidfile))
db.ontology=pickle.load(open(ontofile,'rb'))
db.ontologyfromid=pickle.load(open(ontofromidfile,'rb'))
Debug(6,'ontologies loaded')
return db
def loadontologies(scdb,pickleit=True,ontologies=[]):
"""
load the ontologies into the scdb class
input:
scdb : scdbstruct
from scdbstart()
pickleit : bool
True (default) to pickle the loaded ontologies to default location, False to not pickle
ontologylist : list of str
names of the obo ontology files to load or empty to use the default files (from scdb.ontologyfiles)
"""
if not ontologies:
ontologies=scdb.ontologyfiles
scdb.ontology={}
scdb.ontologyfromid={}
for contology in ontologies:
addontology(scdb,contology)
if pickleit:
saveontologies(scdb)
def saveontologies(scdb,ontofile=None,ontofromidfile=None):
"""
save the ontologies to pickle files.
use after loadontologies()
"""
if ontofile is None:
ontofile=os.path.join(getheatsequerdir(),'db/ontology.pickle')
if ontofromidfile is None:
ontofromidfile=os.path.join(getheatsequerdir(),'db/ontologyfromid.pickle')
fl=open(ontofile,'wb')
pickle.dump(scdb.ontology,fl,protocol=2)
fl.close()
fl=open(ontofromidfile,'wb')
pickle.dump(scdb.ontologyfromid,fl,protocol=2)
fl.close()
def dbstart(dbname="db/supercooldb.db"):
'''
start the database structure and connect to database
input:
dbname : string
the name of the database to connect to
output:
db : dbstruct
the database variable
'''
scdb=scdbstruct()
return scdb
def addexpdata(db,data,studyid=None):
"""
add new data entries (for a new study)
input:
db : scdbstruct
from dbstart()
data : list of tuples (Type:Value)
a list of tuples of (Type,Value) to add to Data table (i.e. ("PUBMEDID","322455") etc)
studyid : list of int
the ids in which this study appears (from finddataid)
output:
suid : int
the value of DataID for the new study (from Data table)
"""
# we need to get a new identifier for all entries in the study
# there should be a more elegant way to do it
Debug(2,"addexpdata for %d enteries" % len(data))
if studyid is None:
# add new study
Debug(2,"addexpdata for a new study")
else:
Debug(2,'addexpdata for existing study %d' % studyid)
rdata={}
rdata['expId']=studyid
rdata['details']=data
res=requests.post(db.dburl+'/experiments/add_details',json=rdata)
if res.status_code==200:
newid=res.json()['expId']
Debug(2,'experiment added. id is %d' % newid)
return newid
else:
Debug(8,'error adding experiment. msg: %s' % res.content)
return None
def addannotations(db,expid,sequences,annotationtype,annotations,submittername='NA',description='',method='',primerid=0,agenttype='HeatSequer',private='n'):
"""
Add a new manual curation to the database
input:
db : scdbstruct
from dbstart()
expid : int or dict of (Type:Value)
if int - the value of DataID from Data table, otherwise a list of (Type,Value) tuples to add to Data table
sequences : list of ACGT
the sequences to curate
annotationtype : str
the curation type (COMMON,DIFFEXP,CONTAM,HIGHFREQ,PATHOGEN)
annotations : list of Type,Value
The curations to add to the CurationList table (Type,Value)
submittername : str
Name of the submitter (first,last) or NA
description : str
text description of the curation entry (i.e. "lower in whole wheat pita bread")
method : str
text description of how the curation was detected - only if needed
primerid : int
the PrimerID from Primers table of the sequences (usually 1 - the V4 515F,806R)
agenttype : str
the program submitting the curation
private : str (optional)
'n' (default) or 'y'
output:
curationid : int
the CurationID (in Curations table) of the new curation, or 0 if not added
data : int
the DataID from Data table
"""
Debug(2,"addannotation - %d sequences" % len(sequences))
if len(sequences)==0:
Debug(6,"No sequences to annotate!")
return 0,0
if len(annotations)==0:
Debug(6,"No annotations to add. still adding...")
if not type(expid) is int:
Debug(6,"looking for studyid %s in data" % expid)
expid=addexpdata(db,expid)
if expid is None:
Debug(8,'problem adding new experiment data')
return 0,0
# add the curation
rdata={}
rdata['expId']=expid
rdata['sequences']=sequences
rdata['region']=primerid
rdata['annotationType']=annotationtype
rdata['method']=method
rdata['agentType']=agenttype
rdata['description']=description
rdata['private']=private
rdata['annotationList']=annotations
res=requests.post(db.dburl+'/annotations/add',json=rdata)
if res.status_code==200:
newid=res.json()['annotationId']
Debug(1,"Finished adding experiment id %d annotationid %d" % (expid,newid))
return res,newid
Debug(8,'problem adding annotations for experiment id %d' % expid)
Debug(8,res.content)
return 0,0
def finddataid(db,datamd5='',mapmd5='',getall=False):
"""
find the data id for the data/map md5 (which are calculated on load)
note the md5s don't change following filtering/normalization/etc... - only the original data
input:
scdb : from startdb()
datamd5 : str
from Experiment.datamd5
mapmd5 : str
from Experiment.mapmd5
getall : bool (optional)
False (default) to get only 1st id, True to get a list of all
output:
expids: int (if getall=False - default) or list of int (if getall=True)
an id or a list of ids of matching dataID indices (or None if no match)
"""
Debug(1,'findexpid for datamd5 %s mapmd5 %s' % (datamd5,mapmd5))
details=[]
if datamd5:
details.append(['DataMD5',datamd5])
if mapmd5:
details.append(['MapMD5',mapmd5])
if len(details)==0:
Debug(6,'Error. MapMD5 and DataMD5 both missing from finddataid')
return None
rdata={}
rdata['details']=details
res=requests.get(db.dburl+'/experiments/get_id',json=rdata)
if res.status_code==200:
expids=res.json()['expId']
if not getall:
if len(expids)>1:
Debug(6,'Problem. Found %d matches for data' % len(expids))
Debug(2,'Found study id %d' % expids[0])
return expids[0]
Debug(2,"Found %d matches to data" % len(expids))
return expids
Debug(8,'Error getting expid from details')
return None
def getexperimentinfo(db,expid):
"""
get the information about a given study dataid
input:
db : from dbstart()
dataid : int
The dataid on the study (DataID field)
output:
info : list of (str,str,str)
list of tuples for each entry in the study:
type,value,descstring about dataid
empty if dataid not found
"""
Debug(1,'get experiment details for expid %d' % expid)
rdata={}
rdata['expId']=expid
res=requests.get(db.dburl+'/experiments/get_details',json=rdata)
if res.status_code==200:
details=res.json()['details']
Debug(2,'Found %d details for experiment %d' % (len(details),expid))
return details
return []
def getexpannotations(db,expid):
"""
get the list of annotations for study studyid
input:
db : from dbstart()
expid : int
The dataid of the study
output:
info: list of str
the list of curations for this study (1 item per curation)
"""
Debug(1,'get experiment annotations for expid %d' % expid)
rdata={}
rdata['expId']=expid
res=requests.get(db.dburl+'/experiments/get_annotations',json=rdata)
if res.status_code!=200:
Debug(6,'error getting annotations for experiment %d' % expid)
return []
annotations=res.json()['annotations']
Debug(2,'Found %d annotations for experiment %d' % (len(annotations),expid))
# make it into a nice list of str
info=[]
for cann in annotations:
cstr='date:%s description:%s user:%s private:%s' % (cann['date'],cann['description'],cann['userid'],cann['private'])
info.append(cstr)
return info
def getannotationseqs(db,annotationid):
"""
get the list of sequences to which an annotation relates
input:
db : from dbstart()
annotationid : int
the unqiue id of the annotation (annotationid in the annotation details)
output:
seqids: list of int
list of sequences to which the annotation is attached
"""
Debug(1,'get annotationseqs for annotationid %d' % annotationid)
rdata={}
rdata['annotationid']=annotationid
res=requests.get(db.dburl+'/annotations/get_sequences',json=rdata)
if res.status_code!=200:
Debug(6,'error getting sequences for annotation %d' % annotationid)
Debug(6,res.content)
return []
seqids=res.json()['seqids']
Debug(2,'Found %d sequences for annotationid %d' % (len(seqids),annotationid))
return seqids
def getseqannotations(db,sequence):
"""
Get the manual curations for a sequence
input:
db : from scdbstart()
sequence : str (ACGT)
output:
curs : list of list of (curation dict,list of [Type,Value] of curation details)
"""
Debug(1,'get sequence annotations for sequence %s' % sequence)
rdata={}
rdata['sequence']=sequence
print('***'+db.dburl+'/sequences/get_annotations')
res=requests.get(db.dburl+'/sequences/get_annotations',json=rdata)
if res.status_code!=200:
Debug(6,'error getting annotations for sequence %s' % sequence)
return []
print(res.json())
annotations=res.json()['annotations']
Debug(2,'Found %d annotations for sequence %s' % (len(annotations),sequence))
return annotations
def getannotationstrings(db,sequence):
"""
get a nice string summary of a curation
input:
db : from scdbstart()
sequence : str (ACGT)
output:
shortdesc : list of (dict,str) (annotationdetails,annotationsummary)
a list of:
annotationdetails : dict
'annotationid' : int, the annotation id in the database
'annotationtype : str
...
annotationsummary : str
a short summary of the annotation
"""
shortdesc=[]
annotations=getseqannotations(db,sequence)
for cann in annotations:
annotationdetails=cann
cdesc=''
if cann['description']:
cdesc+=cann['description']+' ('
if cann['annotationtype']=='diffexp':
chigh=[]
clow=[]
call=[]
for cdet in cann['details']:
if cdet[0]=='all':
call.append(cdet[1])
continue
if cdet[0]=='low':
clow.append(cdet[1])
continue
if cdet[0]=='high':
chigh.append(cdet[1])
continue
cdesc+=' high in '
for cval in chigh:
cdesc+=cval+' '
cdesc+=' compared to '
for cval in clow:
cdesc+=cval+' '
cdesc+=' in '
for cval in call:
cdesc+=cval+' '
elif cann['annotationtype']=='isa':
cdesc+=' is a '
for cdet in cann['details']:
cdesc+='cdet,'
elif cann['annotationtype']=='contamination':
cdesc+='contamination'
else:
cdesc+=cann['annotationtype']+' '
for cdet in cann['details']:
cdesc=cdesc+' '+cdet[1]+','
shortdesc.append( (annotationdetails,cdesc) )
return shortdesc
def getseqcurations(db,sequence):
"""
Get the manual curations for a sequence
input:
db : from scdbstart()
sequence : str (ACGT)
output:
curs : list of list of (curation dict,list of [Type,Value] of curation details)
"""
curs=[]
seqid=getseq(db,sequence,insert=False)
if seqid==0:
Debug(2,'Sequence not found')
return curs
curids=getseqcurationids(db,seqid)
for cid in curids:
cdat=select_column_and_value(db.cur,"SELECT * FROM Curations WHERE CurationID = ?",[cid])
if cdat=={}:
Debug(8,'no curation found for curationid %d' % cid)
continue
ccur=cdat
Debug(2,cdat)
ccurdetails=[]
curation=db.cur.execute('SELECT Type,Value from CurationList WHERE CurationID = ?',[cid])
for ccuration in curation.fetchall():
Debug(2,ccuration)
ccurdetails.append([ccuration[0],ccuration[1]])
curs.append([ccur,ccurdetails])
return curs
def getcurationstrings(db,sequence):
"""
get a nice string summary of a curation
input:
db : from scdbstart()
sequence : str (ACGT)
output:
shortdesc : list of str
a short summary of the curations (1 item per curation)
"""
shortdesc=[]
curs=getseqcurations(db,sequence)
for ccur in curs:
cdesc=''
curinfo=ccur[0]
curdetails=ccur[1]
if curinfo['Description']:
cdesc+=curinfo['Description']+' ('
if curinfo['CurType']=='COMMON':
cdesc+='common in '
for cdet in curdetails:
cdesc+=cdet[1]+' '
elif curinfo['CurType']=='HIGHFREQ':
cdesc+='high freq in '
for cdet in curdetails:
cdesc+=cdet[1]+' '
elif curinfo['CurType']=='DIFFEXP':
chigh=[]
clow=[]
call=[]
for cdet in curdetails:
if cdet[0]=='ALL':
call.append(cdet[1])
continue
if cdet[0]=='LOW':
clow.append(cdet[1])
continue
if cdet[0]=='HIGH':
chigh.append(cdet[1])
continue
cdesc+=' high in '
for cval in chigh:
cdesc+=cval+' '
cdesc+=' compared to '
for cval in clow:
cdesc+=cval+' '
cdesc+=' in '
for cval in call:
cdesc+=cval+' '
else:
Debug(2,'unknown curation %s.' % curinfo['CurType'])
shortdesc.append(cdesc)
return shortdesc
def select_column_and_value(cur, sql, parameters=()):
"""
get a dict with field as key, value as values for an sql query
"""
execute = cur.execute(sql, parameters)
fetch = execute.fetchone()
if fetch is None:
return {}
return {k[0]: v for k, v in list(zip(execute.description, fetch))}
def createontologytree(db,ontologies=[],outname=None):
"""
load the ontology tree graphs into the scdb and store them in a pickle dict
input:
db : from scdbstart()
ontologies : list of str
list of obo ontologies to use or empty to use the default files (db.ontologyfiles)
outname : str
name of the output pickle file or None for default location
output:
db - scdbstruct
with the ontodict field added
"""
if outname is None:
outname=os.path.join(getheatsequerdir(),'db/ontologygraph.pickle')
if not ontologies:
ontologies=db.ontologyfiles
if not db.ontologyfromid:
db=loaddbonto(db)
ontodict={}
for conto in ontologies:
Debug(6,'Processing ontology %s' % conto)
g=ontologytotree(conto)
g=ontotreetonames(g,db.ontologyfromid)
ontodict[conto]=g
Debug(6,'ontologies loaded. saving to pickel %s' % outname)
fl=open(outname,'wb')
pickle.dump(ontodict,fl,protocol=2)
Debug(6,'ontologies pickled')
db.ontodict=ontodict
return db
def loadontotrees(db,ontopickle=None):
"""
load the ontology dict pickle file
input:
db : from scdbstart()
ontopickle : str
the pickled ontology dict filename (from createontologytree()) or None for default location
output:
db : scdbstruct
with the ontology dict in ontodict
"""
if ontopickle is None:
ontopickle=os.path.join(getheatsequerdir(),'db/ontologygraph.pickle')
Debug(6,'loadding ontology trees')
fl=open(ontopickle,'rb')
db.ontodict=pickle.load(fl)
Debug(6,'loaded %d trees' % len(db.ontodict))
return db
def getontoparents(db,term):
"""
get all the parent terms (including original term) for a given ontology term.
look in all ontology trees in db.ontodict
input:
db : from scdbstart()
term : str
the ontology term (lower case)
output:
terms : list of str
all the parent terms of this item
"""
terms=[]
for conto in db.ontodict.values():
parents=getnodeparents(conto,term)
terms+=parents
terms=list(set(terms))
return terms
def getcurationontologies(db,sequence):
"""
get the curation for items and all upstream (ontology-wise) items for a given sequence
input:
db : from scdbstart()
sequence : str (ACGT)
output:
onto : dict of key=str and value dict of key=str and value=int
a dictionary of ['up','down','contaminant'] of dict of ontology item and number of times total we see it
"""
if not db.ontodict:
loadontotrees(db)
onto={}
curs=getseqcurations(db,sequence)
for ccur in curs:
curinfo=ccur[0]
curdetails=ccur[1]
ctype='other'
lookseparate=False
if curinfo['CurType']=='COMMON':
ctype='up'
elif curinfo['CurType']=='HIGHFREQ':
ctype='up'
else:
lookseparate=True
for cdet in curdetails:
if lookseparate:
if cdet[0]=='ALL':
ctype='up'
elif cdet[0]=='LOW':
ctype='down'
elif cdet[0]=='HIGH':
ctype='up'
else:
ctype='other'
if ctype not in onto:
onto[ctype]={}
ontoparents=getontoparents(db,cdet[1])
for conto in ontoparents:
if conto not in onto[ctype]:
onto[ctype][conto]=1
else:
onto[ctype][conto]+=1
return onto
def delete_annotation(db,annotationid):
'''
delete an annotation from the database.
input:
db :
annotationid : int
the annotationid to delete
output:
'''
Debug(1,'delete annotation for annotatioid %d' % annotationid)
rdata={}
rdata['annotationid']=annotationid
res=requests.post(db.dburl+'/annotations/delete',json=rdata)
if res.status_code!=200:
Debug(6,'error deleting annotationid %d' % annotationid)
Debug(6,res.content)
return []
Debug(2,'Annotation %d deleted' % annotationid)
def get_experiment_annotations(db,exp):
'''
get annotations on all sequences in the experiment
input:
db:
exp : Experiment
output:
'''
Debug(1,'get experiment sequence annotations for %d sequences' % len(exp.seqs))
rdata={}
rdata['sequences']=exp.seqs
res=requests.get(db.dburl+'/sequences/get_list_annotations',json=rdata)
if res.status_code!=200:
Debug(6,'error getting list annotations')
Debug(6,res.content)
return []
return res.json()['seqannotations']
def convert_olddb_to_server(db,olddbfilename='db/supercooldb.db'):
'''
load the old local sqlite3 database to the server
input:
db
olddbfilename : str
the sqlite3 database filename
'''
con=sqlite3.connect(olddbfilename)
Debug(7,"Connected to database")
# and the cursor
cur=con.cursor()
# add all studies
# get studyids
Debug(7,'getting study ids')
cur.execute("SELECT DataID FROM Data")
allvals=cur.fetchall()
Debug(7,'found %d details' % len(allvals))
studyids=set()
for cres in allvals:
studyids.add(cres[0])
Debug(7,'found %d unique ids' % len(studyids))
Debug(7,'processing per study data')
studyoldnew={}
for cid in studyids:
cur.execute("SELECT Type,Value FROM Data WHERE DataID = ?",[cid])
allvals=cur.fetchall()
Debug(7,'found %d details for studyid %d' % (len(allvals),cid))
Debug(7,'adding study data')
Debug(7,'looking for md5 in previous studies')
for cres in allvals:
if cres[0]=='DataMD5':
datamd5=cres[1]
if cres[0]=='MapMD5':
mapmd5=cres[1]
newid=finddataid(db,datamd5=datamd5,mapmd5=mapmd5,getall=False)
if newid is not None:
Debug(7,'already exists. id=%d' % newid[0])
# newid = addexpdata(db,list(allvals))
newid=0
Debug(7,'added data new studyid %d' % newid)
studyoldnew[cid]=newid
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^geoprisma/', include("geoprisma.urls", namespace="geoprisma")),
url(r'^map/(?P<wsName>[\w-]+)/(?P<viewId>[\w-]+)$', 'example_project.views.maprender'),
)
|
import datetime
from django.contrib.auth.models import User
from django.contrib.comments.managers import CommentManager
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.db import models
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH',3000)
class BaseCommentAbstractModel(models.Model):
"""
An abstract base class that any custom comment models probably should
subclass.
"""
# Content-object field
content_type = models.ForeignKey(ContentType,
related_name="content_type_set_for_%(class)s")
object_pk = models.TextField(_('object ID'))
content_object = generic.GenericForeignKey(ct_field="content_type", fk_field="object_pk")
# Metadata about the comment
site = models.ForeignKey(Site)
class Meta:
abstract = True
def get_content_object_url(self):
"""
Get a URL suitable for redirecting to the content object.
"""
return urlresolvers.reverse(
"comments-url-redirect",
args=(self.content_type_id, self.object_pk)
)
class Comment(BaseCommentAbstractModel):
"""
A user comment about some object.
"""
# Who posted this comment? If ``user`` is set then it was an authenticated
# user; otherwise at least user_name should have been set and the comment
# was posted by a non-authenticated user.
user = models.ForeignKey(User, blank=True, null=True, related_name="%(class)s_comments")
user_name = models.CharField(_("user's name"), max_length=50, blank=True)
user_email = models.EmailField(_("user's email address"), blank=True)
user_url = models.URLField(_("user's URL"), blank=True)
comment = models.TextField(_('comment'), max_length=COMMENT_MAX_LENGTH)
# Metadata about the comment
submit_date = models.DateTimeField(_('date/time submitted'), default=None)
ip_address = models.IPAddressField(_('IP address'), blank=True, null=True)
is_public = models.BooleanField(_('is public'), default=True,
help_text=_('Uncheck this box to make the comment effectively ' \
'disappear from the site.'))
is_removed = models.BooleanField(_('is removed'), default=False,
help_text=_('Check this box if the comment is inappropriate. ' \
'A "This comment has been removed" message will ' \
'be displayed instead.'))
# Manager
objects = CommentManager()
class Meta:
db_table = "django_comments"
ordering = ('submit_date',)
permissions = [("can_moderate", "Can moderate comments")]
def __unicode__(self):
return "%s: %s..." % (self.name, self.comment[:50])
def save(self, force_insert=False, force_update=False):
if self.submit_date is None:
self.submit_date = datetime.datetime.now()
super(Comment, self).save(force_insert, force_update)
def _get_userinfo(self):
"""
Get a dictionary that pulls together information about the poster
safely for both authenticated and non-authenticated comments.
This dict will have ``name``, ``email``, and ``url`` fields.
"""
if not hasattr(self, "_userinfo"):
self._userinfo = {
"name" : self.user_name,
"email" : self.user_email,
"url" : self.user_url
}
if self.user_id:
u = self.user
if u.email:
self._userinfo["email"] = u.email
# If the user has a full name, use that for the user name.
# However, a given user_name overrides the raw user.username,
# so only use that if this comment has no associated name.
if u.get_full_name():
self._userinfo["name"] = self.user.get_full_name()
elif not self.user_name:
self._userinfo["name"] = u.username
return self._userinfo
userinfo = property(_get_userinfo, doc=_get_userinfo.__doc__)
def _get_name(self):
return self.userinfo["name"]
def _set_name(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the name is read-only."))
self.user_name = val
name = property(_get_name, _set_name, doc="The name of the user who posted this comment")
def _get_email(self):
return self.userinfo["email"]
def _set_email(self, val):
if self.user_id:
raise AttributeError(_("This comment was posted by an authenticated "\
"user and thus the email is read-only."))
self.user_email = val
email = property(_get_email, _set_email, doc="The email of the user who posted this comment")
def _get_url(self):
return self.userinfo["url"]
def _set_url(self, val):
self.user_url = val
url = property(_get_url, _set_url, doc="The URL given by the user who posted this comment")
def get_absolute_url(self, anchor_pattern="#c%(id)s"):
return self.get_content_object_url() + (anchor_pattern % self.__dict__)
def get_as_text(self):
"""
Return this comment as plain text. Useful for emails.
"""
d = {
'user': self.user or self.name,
'date': self.submit_date,
'comment': self.comment,
'domain': self.site.domain,
'url': self.get_absolute_url()
}
return _('Posted by %(user)s at %(date)s\n\n%(comment)s\n\nhttp://%(domain)s%(url)s') % d
class CommentFlag(models.Model):
"""
Records a flag on a comment. This is intentionally flexible; right now, a
flag could be:
* A "removal suggestion" -- where a user suggests a comment for (potential) removal.
* A "moderator deletion" -- used when a moderator deletes a comment.
You can (ab)use this model to add other flags, if needed. However, by
design users are only allowed to flag a comment with a given flag once;
if you want rating look elsewhere.
"""
user = models.ForeignKey(User, related_name="comment_flags")
comment = models.ForeignKey(Comment, related_name="flags")
flag = models.CharField(max_length=30, db_index=True)
flag_date = models.DateTimeField(default=None)
# Constants for flag types
SUGGEST_REMOVAL = "removal suggestion"
MODERATOR_DELETION = "moderator deletion"
MODERATOR_APPROVAL = "moderator approval"
class Meta:
db_table = 'django_comment_flags'
unique_together = [('user', 'comment', 'flag')]
def __unicode__(self):
return "%s flag of comment ID %s by %s" % \
(self.flag, self.comment_id, self.user.username)
def save(self, force_insert=False, force_update=False):
if self.flag_date is None:
self.flag_date = datetime.datetime.now()
super(CommentFlag, self).save(force_insert, force_update)
|
try:
from tn.plonehtmlpage import html_page
HAS_HTML_PAGE = True
except ImportError:
HAS_HTML_PAGE = False
if HAS_HTML_PAGE:
from five import grok
from plone import api
from Products.statusmessages.interfaces import IStatusMessage
from tn.plonebehavior.template import _
from tn.plonebehavior.template import interfaces
from tn.plonebehavior.template.html import ContextlessHTML
from zope.lifecycleevent import ObjectModifiedEvent
from zope.event import notify
import tn.plonebehavior.template as main
import zope.interface
IPossibleTemplate = interfaces.IPossibleTemplate
class HTML(grok.Adapter):
grok.context(html_page.IHTMLPageSchema)
grok.implements(interfaces.IHTML)
contextless_factory = ContextlessHTML
def __unicode__(self):
base_url = self.context.absolute_url()
return unicode(self.contextless_factory(base_url,
self.context.html))
class View(html_page.View):
grok.context(html_page.IHTMLPageSchema)
grok.layer(interfaces.IBrowserLayer)
grok.require('zope2.View')
def update(self):
super(View, self).update()
if api.user.is_anonymous():
# Anonymous users are unlikely to be interested in knowing if
# the page being viewed can work as a template.
return
if self._should_be_template():
if self._can_be_template():
# This should be have been done by the
# ITemplateConfiguration adapter, but when its fields are
# not modified when editing the item, that adapter is not
# used at all. Thus, in order to keep correctness and to
# workaround this fact (that the adapter is not used), we
# mark the item here and issue a modified event. This is
# not optimal, since GETs should be safe, and we're
# probably responding to a GET request here...
if not IPossibleTemplate.providedBy(self.context):
zope.interface.alsoProvides(self.context,
IPossibleTemplate)
notify(ObjectModifiedEvent(self.context))
else:
# In this situation, we expect that a grok.subscriber
# already registered for IPossibleTemplate to already have
# removed that marker interface. We just warn the user.
IStatusMessage(self.request).add(
_("This item has a selector set for usage as a "
"template, but the selector doesn't match a "
"single HTML element. If you really wish to use "
"this item as a template, edit it and modify "
"the selector in the Template configuration "
"tab or the HTML code to make them match. "
"If instead you wish this item to be a regular "
"HTML page, edit it, go to the Template "
"configuration tab, and leave the selector field "
"in blank."),
type=u'warning',
)
if IPossibleTemplate.providedBy(self.context):
IStatusMessage(self.request).add(
_(u'This item can be used as a template.'),
type=u'info',
)
def render(self):
return super(View, self).render()
def _should_be_template(self):
config = main.ITemplateConfiguration(self.context)
return not not config.xpath
def _can_be_template(self):
config = main.ITemplateConfiguration(self.context)
return config.xpath and main.html_contains_xpath_single(
config.html, config.xpath
)
|
from __future__ import absolute_import
import re
import six
import uuid
from datetime import datetime
from pytz import utc
from sentry.models import ProjectKey, OrganizationOption
def _generate_pii_config(project, org_options):
scrub_ip_address = (org_options.get('sentry:require_scrub_ip_address', False) or
project.get_option('sentry:scrub_ip_address', False))
scrub_data = (org_options.get('sentry:require_scrub_data', False) or
project.get_option('sentry:scrub_data', True))
fields = project.get_option('sentry:sensitive_fields')
if not scrub_data and not scrub_ip_address:
return None
custom_rules = {}
default_rules = []
ip_rules = []
databag_rules = []
if scrub_data:
default_rules.extend((
'@email',
'@mac',
'@creditcard',
'@userpath',
))
databag_rules.append('@password')
if fields:
custom_rules['strip-fields'] = {
'type': 'redactPair',
'redaction': 'remove',
'keyPattern': r'\b%s\n' % '|'.join(re.escape(x) for x in fields),
}
databag_rules.push('strip-fields')
if scrub_ip_address:
ip_rules.push('@ip')
return {
'rules': custom_rules,
'applications': {
'freeform': default_rules,
'databag': default_rules + databag_rules,
'username': scrub_data and ['@userpath'] or [],
'email': scrub_data and ['@email'] or [],
'ip': ip_rules,
}
}
def get_project_options(project):
"""Returns a dict containing the config for a project for the sentry relay"""
project_keys = ProjectKey.objects.filter(
project=project,
).all()
public_keys = {}
for project_key in list(project_keys):
public_keys[project_key.public_key] = project_key.status == 0
now = datetime.utcnow().replace(tzinfo=utc)
org_options = OrganizationOption.objects.get_all_values(
project.organization_id)
rv = {
'disabled': project.status > 0,
'slug': project.slug,
'lastFetch': now,
'lastChange': project.get_option('sentry:relay-rev-lastchange', now),
'rev': project.get_option('sentry:relay-rev', uuid.uuid4().hex),
'publicKeys': public_keys,
'config': {
'allowedDomains': project.get_option('sentry:origins', ['*']),
'trustedRelays': org_options.get('sentry:trusted-relays', []),
'piiConfig': _generate_pii_config(project, org_options),
},
}
return rv
def relay_has_org_access(relay, org):
# Internal relays always have access
if relay.is_internal:
return True
# Use the normalized form of the public key for the check
return six.text_type(relay.public_key_object) \
in org.get_option('sentry:trusted-relays', [])
def get_project_key_config(project_key):
"""Returns a dict containing the information for a specific project key"""
return {
'dsn': project_key.dsn_public,
}
|
"""
tests for magic_gui
"""
import wx
import unittest
import os
from pmagpy import pmag
from pmagpy import new_builder as nb
from pmagpy import data_model3 as data_model
from pmagpy import controlled_vocabularies3 as cv3
DMODEL = data_model.DataModel()
WD = pmag.get_test_WD()
PROJECT_WD = os.path.join(WD, "data_files", "magic_gui", "3_0")
class TestVocabularies(unittest.TestCase):
def setUp(self):
self.vocab = cv3.Vocabulary()
def tearDown(self):
pass
def test_vocabularies(self):
self.assertIn('timescale_era', self.vocab.vocabularies.index)
self.assertIn('Neoproterozoic', self.vocab.vocabularies.ix['timescale_era'])
def test_suggested(self):
self.assertIn('fossil_class', self.vocab.suggested.index)
self.assertIn('Anthozoa', self.vocab.suggested.ix['fossil_class'])
def test_methods(self):
self.assertIn('sample_preparation', list(self.vocab.methods.keys()))
for item in self.vocab.methods['sample_preparation']:
self.assertTrue(item.startswith('SP-'))
def test_all_codes(self):
self.assertIn('SM-TTEST', self.vocab.all_codes.index)
self.assertEqual('statistical_method',
self.vocab.all_codes.ix['SM-TTEST']['dtype'])
|
import os
import re
from setuptools import setup, find_packages
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
def read_file(filename):
"""Read a file into a string"""
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
def parse_requirements(file_name):
requirements = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'(\s*#)|(\s*$)', line):
continue
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line))
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
def parse_dependency_links(file_name):
dependency_links = []
for line in open(file_name, 'r').read().split('\n'):
if re.match(r'\s*-[ef]\s+', line):
dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
return dependency_links
setup(
name='django-safe-browsing',
version=__import__('safebrowsing').__version__,
description=' '.join(__import__('scribbler').__doc__.splitlines()).strip(),
author='Roberto Barreda',
author_email='roberto.barreda@gmail.com',
url='https://github.com/robertobarreda/django-safe-browsing',
packages=find_packages(exclude=['example']),
include_package_data=True,
license="MIT license, see LICENSE file",
install_requires=parse_requirements('requirements.txt'),
dependency_links=parse_dependency_links('requirements.txt'),
long_description=read_file('README.rst'),
test_suite="runtests.runtests",
)
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views import View
from django.utils.decorators import method_decorator
@method_decorator(login_required, name='dispatch')
class DashboardView(View):
template_name = "index.html"
def get(self, request):
return render(request, self.template_name)
|
import os
from flask import Blueprint, request, render_template, g
from flask.ext.login import login_required
from config import config
from werkzeug import secure_filename
from admin import perimeter_check
cms_page = Blueprint('cms_page', __name__, template_folder='templates')
@cms_page.route("/cms/")
@login_required
def show_cms():
perimeter_check("CMSINDEX")
return render_template("cms.html", editname="Content Management")
@cms_page.route("/cms/<editname>")
@login_required
def show_cms_editor(editname):
perimeter_check("CMSINDEX")
if editname == "Browse Orders":
perimeter_check("CMSCATEGORY")
order_row = read_orders()
order_status = read_order_status()
return render_template("cms.html", editname=editname, order_row=order_row, order_status=order_status)
elif editname == "Add Category":
perimeter_check("CMSCATEGORY")
return render_template("cms.html", editname=editname)
elif editname == "Add Product":
perimeter_check("CMSPRODUCT")
cat_info = read_categories()
return render_template("cms.html", editname=editname, cat_info = cat_info)
elif editname == "Edit Categories":
perimeter_check("CMSCATEGORY")
cat_info = read_categories()
return render_template("cms.html", editname=editname, cat_info = cat_info)
elif editname == "Edit Products":
perimeter_check("CMSSTOCK")
info = read_stock()
others = read_not_stock()
cat_info = read_categories()
return render_template("cms.html", editname=editname, info=info, others=others, cat_info=cat_info)
elif editname == "Remove Category":
perimeter_check("CMSCATEGORY")
cat_info = read_categories()
return render_template("cms.html", editname=editname, cat_info=cat_info)
elif editname == "Remove Product":
perimeter_check("CMSPRODUCT")
info = read_products_and_categories()
return render_template("cms.html", editname=editname, info=info)
else:
return render_template("cms.html", editname="Content Management")
def read_order_status():
db = getattr(g, 'db', None)
query = "select * from tbl_order_status;"
with db as cursor:
cursor.execute(query)
res = []
for s in cursor.fetchall():
res.append(s[0])
return res
def read_order_detail(orderid):
db = getattr(g, 'db', None)
query = "select tbl_order.id, tbl_order.date, \
sum(tbl_orderlines.amount), sum(tbl_orderlines.price * tbl_orderlines.amount), tbl_order.order_status from " \
"tbl_orderlines inner join\
tbl_order on tbl_orderlines.order_id=tbl_order.id\
where tbl_order.id = %s;"
query2 = "select tbl_order.id, tbl_order.date from tbl_order where tbl_order.id=%s;"
with db as cursor:
cursor.execute(query, (orderid,))
return cursor.fetchone()
def read_orders():
db = getattr(g, 'db', None)
query = "select tbl_order.id, tbl_order.date, tbl_order.order_status from tbl_order order by tbl_order.id desc;"
with db as cursor:
cursor.execute(query)
return cursor.fetchall()
def read_product_rows(orderid):
db = getattr(g, 'db', None)
query ="SELECT tbl_category.name, tbl_product.id, tbl_product.name, tbl_orderlines.amount, " \
"tbl_orderlines.price " \
"from tbl_orderlines " \
"left join tbl_product on tbl_orderlines.prod_id = tbl_product.id " \
"join tbl_order on tbl_orderlines.order_id = tbl_order.id " \
"join tbl_category on tbl_product.cat_id = tbl_category.id " \
"where tbl_order.id = %s;"
with db as cursor:
cursor.execute(query, (orderid, ))
return cursor.fetchall()
def read_user_details(orderid):
db = getattr(g, 'db', None)
query ="SELECT tbl_user.name, tbl_user.address, tbl_user.postcode, tbl_user.city, tbl_user.country from tbl_user " \
"where tbl_user.id = (select tbl_order.customer_id from tbl_order where tbl_order.id = %s);"
with db as cursor:
cursor.execute(query, (orderid, ))
return cursor.fetchone()
@cms_page.route("/cms/Browse Orders/<orderid>")
@login_required
def show_order(orderid):
order_detail = read_order_detail(orderid)
product_rows = read_product_rows(orderid)
user_details = read_user_details(orderid)
order_status = read_order_status()
perimeter_check("CMSCATEGORY")
return render_template("cms.html", editname="Browse Orders", order_detail=order_detail,
product_rows=product_rows, user_details=user_details, order_status=order_status)
@cms_page.route("/cms/Browse Orders/<orderid>", methods=['POST'])
@login_required
def update_status(orderid):
perimeter_check("CMSCATEGORY")
status = request.form['status']
db = getattr(g, 'db', None)
query = "update tbl_order set tbl_order.order_status = %s where tbl_order.id = %s;"
with db as cursor:
cursor.execute(query, (status,orderid))
db.commit()
return show_order(orderid)
@cms_page.route("/cms/Browse Orders", methods=["POST"])
@login_required
def update_status_2():
perimeter_check("CMSCATEGORY")
orderid = request.form['orderid']
status = request.form['status']
db = getattr(g, 'db', None)
query = "update tbl_order set tbl_order.order_status = %s where tbl_order.id = %s;"
with db as cursor:
cursor.execute(query, (status,orderid))
db.commit()
order_row = read_orders()
order_status = read_order_status()
return render_template("cms.html", editname="Browse Orders", order_row=order_row, order_status=order_status)
@cms_page.route("/cms/Add Category", methods=['POST'])
@login_required
def add_category():
perimeter_check("CMSCATEGORY")
catname = request.form['catname']
categories = read_categories()
if catname in categories:
cat_info = read_categories()
return render_template("cms.html", editname="Add Category", ins="error")
db = getattr(g, 'db', None)
query = "insert into tbl_category (name) VALUES (%s);"
with db as cursor:
cursor.execute(query, (catname,))
db.commit()
cat_info = read_categories()
return render_template("cms.html", editname="Add Category", ins="success")
@cms_page.route("/cms/Add Product", methods=['POST'])
@login_required
def add_product():
perimeter_check("CMSPRODUCT")
prodname = request.form['prodname']
prodprice = request.form['prodprice']
proddesc = request.form['proddesc']
prodcat = request.form['prodcat']
prodstock = request.form['prodstock']
prodfile = request.files['prodfile']
produrl = ""
existing_products = read_products()
cat_info = read_categories()
if prodname in existing_products:
return render_template("cms.html", editname = "Add Product", cat_info=cat_info, ins = "error")
db = getattr(g, 'db', None)
query = "INSERT INTO tbl_product (name, description,price, cat_id) VALUES (%s, %s, %s, (SELECT id from tbl_category WHERE name=%s));"
with db as cursor:
data = (prodname, proddesc, prodprice, prodcat)
cursor.execute(query, data)
db.commit()
query = "INSERT INTO tbl_stock (product_id, amount) VALUES ((SELECT id FROM tbl_product WHERE name = %s), %s);"
with db as cursor:
data = (prodname, prodstock)
cursor.execute(query, data)
db.commit()
query = "SELECT id FROM tbl_product WHERE name = %s;"
id = None
with db as cursor:
data = (prodname,)
cursor.execute(query, data)
db.commit()
id = cursor.fetchone()[0]
#attempt fileupload
if id:
filename = str(id) + "_" + secure_filename(prodfile.filename)
add_file(prodfile, filename)
produrl = filename
query = "UPDATE tbl_product SET image_url=%s WHERE id=%s;"
with db as cursor:
data = (produrl, id)
cursor.execute(query, data)
db.commit()
return render_template("cms.html", editname = "Add Product", cat_info=cat_info, ins = "success")
@cms_page.route("/cms/Edit Categories", methods=['POST'])
@login_required
def edit_categories():
perimeter_check("CMSCATEGORY")
newname = request.form['rename_cat']
oldname = request.form['old_name']
cat_info = read_categories()
if newname in cat_info:
return render_template("cms.html", editname="Edit Categories", cat_info = cat_info, ins="error")
db = getattr(g, 'db', None)
query = "UPDATE tbl_category SET name=%s WHERE name=%s;"
with db as cursor:
cursor.execute(query, (newname, oldname))
db.commit()
cat_info = read_categories()
return render_template("cms.html", editname="Edit Categories", cat_info = cat_info, ins="success")
def edit_specific_product(oldname):
perimeter_check("CMSPRODUCT")
prodname = request.form['prodname']
prodprice = request.form['prodprice']
proddesc = request.form['proddesc']
prodcat = request.form['prodcat']
prodstock = request.form['prodstock']
prodfile = request.files['prodfile']
produrl = ""
existing_products = read_products()
cat_info = read_categories()
db = getattr(g, 'db', None)
query = "UPDATE tbl_product SET name=%s, description=%s, price=%s, cat_id=(SELECT id FROM " \
"tbl_category WHERE tbl_category.name=%s) WHERE name=%s;"
with db as cursor:
data = (prodname, proddesc, prodprice,prodcat, oldname)
cursor.execute(query, data)
db.commit()
query = "UPDATE tbl_stock SET amount=%s WHERE product_id=(SELECT id FROM tbl_product WHERE name=%s);"
with db as cursor:
data = (prodstock,prodname)
cursor.execute(query, data)
db.commit()
query = "SELECT id, image_url FROM tbl_product WHERE name = %s;"
id = None
old_url = ""
with db as cursor:
data = (prodname,)
cursor.execute(query, data)
db.commit()
temp = cursor.fetchone()
id = temp[0]
old_url = temp[1]
#attempt fileupload
if id:
filename = str(id) + "_" + secure_filename(prodfile.filename)
if prodfile:
add_file(prodfile, filename)
remove_file(old_url)
produrl = filename
query = "UPDATE tbl_product SET image_url=%s WHERE id=%s;"
with db as cursor:
data = (produrl, id)
cursor.execute(query, data)
db.commit()
return render_template("cms.html", editname = "Add Product", cat_info=cat_info, ins = "success")
@cms_page.route("/cms/Edit Products", methods=['POST'])
@login_required
def edit_products():
perimeter_check("CMSPRODUCT")
alt = request.form['edit']
if alt == "edit_prod":
oldname = request.form['old_name']
edit_specific_product(oldname)
info = read_stock()
others = read_not_stock()
return render_template("cms.html", editname="Edit Products", info=info, others=others, ins = "success")
unchecked = read_products()
checked = []
for p in unchecked:
try:
temp = request.form["check_" + p]
checked.append(temp)
except Exception:
pass
for p in checked:
if p in unchecked:
unchecked.remove(p)
if alt == "set_unavaliable":
for p in unchecked:
remove_from_stock(p)
for p in checked:
stock_value = request.form["stock_" + p]
add_to_stock(p, stock_value);
elif alt == "set_avaliable":
for p in checked:
stock_value = request.form["stock_" + p]
add_to_stock(p, stock_value);
info = read_stock()
others = read_not_stock()
return render_template("cms.html", editname="Edit Products", info=info, others=others, ins = "success")
@cms_page.route("/cms/Remove Category", methods=['POST'])
@login_required
def remove_category():
perimeter_check("CMSCATEGORY")
cats = read_categories()
to_remove = []
status = "error"
for c in cats:
try:
temp = request.form[c]
to_remove.append(temp)
except Exception:
pass
for c in to_remove:
if category_remover(c):
status = "success"
else:
status = "error"
cat_info = read_categories()
return render_template("cms.html", editname = "Remove Category", ins = status, cat_info=cat_info)
def category_remover(catname):
db = getattr(g, 'db', None)
query = "DELETE FROM tbl_category WHERE name = %s;";
try:
with db as cursor:
cursor.execute(query, catname)
db.commit()
return True
except Exception:
return False
@cms_page.route("/cms/Remove Product", methods=['POST'])
@login_required
def remove_product():
perimeter_check("CMSPRODUCT")
prods = read_products()
to_remove = []
for p in prods:
try:
temp = request.form[p]
to_remove.append(temp)
except Exception:
pass
for p in to_remove:
product_remover(p)
info = read_products_and_categories()
return render_template("cms.html", editname = "Remove Product", info=info, ins = "success")
def product_remover(prodname):
db = getattr(g, 'db', None)
query = "DELETE FROM tbl_stock WHERE product_id = (SELECT id FROM tbl_product WHERE name = %s);"
with db as cursor:
cursor.execute(query, (prodname,))
db.commit()
url = ""
query = "SELECT image_url from tbl_product WHERE name = %s;"
with db as cursor:
cursor.execute(query, (prodname,))
url = cursor.fetchone()[0]
db.commit()
if url != config['DEFAULT_IMAGE']:
remove_file(url)
query = "DELETE FROM tbl_product WHERE name = %s;"
with db as cursor:
cursor.execute(query, (prodname,))
db.commit()
def search_product(name):
info = []
db = getattr(g, 'db', None)
cursor = db.cursor()
query = "select tbl_category.name, tbl_product.name, tbl_product.price, " \
"tbl_product.description, tbl_product.image_url " \
"from tbl_product " \
"inner join tbl_category on tbl_category.id = tbl_product.cat_id " \
"where tbl_product.name = %s;"
cursor.execute(query, (name,))
for x in cursor.fetchall():
info.append(x)
return info
def read_stock():
info = []
db = getattr(g, 'db', None)
cursor = db.cursor()
query = "select tbl_category.name, tbl_product.name, tbl_stock.amount, tbl_product.price, " \
"tbl_product.description, tbl_product.image_url " \
"from tbl_product " \
"inner join tbl_stock on tbl_product.id = tbl_stock.product_id " \
"inner join tbl_category on tbl_category.id = tbl_product.cat_id;"
cursor.execute(query)
for x in cursor.fetchall():
info.append(x)
return info
def read_not_stock():
info = []
db = getattr(g, 'db', None)
cursor = db.cursor()
query = "SELECT tbl_category.name, tbl_product.name " \
"FROM tbl_product " \
"inner join tbl_category on tbl_category.id = tbl_product.cat_id " \
"WHERE tbl_product.id NOT IN (SELECT tbl_stock.product_id " \
"FROM tbl_stock);"
cursor.execute(query)
for x in cursor.fetchall():
info.append(x)
return info
def read_categories():
db = getattr(g, 'db', None)
cursor = db.cursor()
cursor.execute("select name from tbl_category ORDER BY name;")
categories = []
for c in cursor:
categories.append(c[0])
return categories
def read_products_and_categories():
db = getattr(g, 'db', None)
cursor = db.cursor()
query = "select tbl_category.name, tbl_product.name " \
"from tbl_product " \
"inner join tbl_category on tbl_category.id = tbl_product.cat_id;"
cursor.execute(query)
prod = []
for x in cursor.fetchall():
prod.append(x)
return prod
def read_products():
db = getattr(g, 'db', None)
cursor = db.cursor()
query = "select name from tbl_product;"
cursor.execute(query)
prod = []
for x in cursor.fetchall():
prod.append(x[0])
return prod
def remove_from_stock(product):
db = getattr(g, 'db', None)
query = "DELETE FROM tbl_stock WHERE product_id = (SELECT id FROM tbl_product WHERE name = %s);"
with db as cursor:
cursor.execute(query, (product,))
db.commit()
def add_to_stock(product, stock_value):
db = getattr(g, 'db', None)
query = "INSERT INTO tbl_stock (product_id, amount) VALUES " \
"((SELECT id FROM tbl_product WHERE name = %s), %s) " \
"ON DUPLICATE KEY UPDATE amount = %s ;"
with db as cursor:
data = (product, stock_value, stock_value)
cursor.execute(query, data)
db.commit()
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in config['ALLOWED_EXTENSIONS']
def remove_file(filename):
url = get_os_string(filename)
if filename and os.path.isfile(url):
os.remove(url)
def add_file(prodfile, filename):
url = get_os_string(filename)
if prodfile and allowed_file(prodfile.filename) and not os.path.isfile(url):
prodfile.save(url)
return True
else:
return False
def get_os_string(filename):
return os.path.dirname(os.path.realpath(__file__)) + config['UPLOAD_FOLDER'] + filename
|
import m5
from m5.objects import *
system = System(cpu = AtomicSimpleCPU(cpu_id=0),
physmem = PhysicalMemory(),
membus = Bus())
system.physmem.port = system.membus.port
system.cpu.connectMemPorts(system.membus)
system.cpu.clock = '2GHz'
root = Root(system = system)
|
from __future__ import unicode_literals
from ..denoising import GaussianBlurImageFilter
def test_GaussianBlurImageFilter_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='%s',
position=-2,
),
outputVolume=dict(argstr='%s',
hash_files=False,
position=-1,
),
sigma=dict(argstr='--sigma %f',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = GaussianBlurImageFilter.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_GaussianBlurImageFilter_outputs():
output_map = dict(outputVolume=dict(position=-1,
),
)
outputs = GaussianBlurImageFilter.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends.openssl.utils import _truncate_digest
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
AsymmetricSignatureContext, AsymmetricVerificationContext, dsa
)
def _truncate_digest_for_dsa(dsa_cdata, digest, backend):
"""
This function truncates digests that are longer than a given DS
key's length so they can be signed. OpenSSL does this for us in
1.0.0c+, leaving us with three releases (1.0.0, 1.0.0a, and 1.0.0b) where
this is a problem.
"""
q = backend._ffi.new("BIGNUM **")
backend._lib.DSA_get0_pqg(
dsa_cdata, backend._ffi.NULL, q, backend._ffi.NULL
)
backend.openssl_assert(q[0] != backend._ffi.NULL)
order_bits = backend._lib.BN_num_bits(q[0])
return _truncate_digest(digest, order_bits)
@utils.register_interface(AsymmetricVerificationContext)
class _DSAVerificationContext(object):
def __init__(self, backend, public_key, signature, algorithm):
self._backend = backend
self._public_key = public_key
self._signature = signature
self._algorithm = algorithm
self._hash_ctx = hashes.Hash(self._algorithm, self._backend)
def update(self, data):
self._hash_ctx.update(data)
def verify(self):
data_to_verify = self._hash_ctx.finalize()
data_to_verify = _truncate_digest_for_dsa(
self._public_key._dsa_cdata, data_to_verify, self._backend
)
# The first parameter passed to DSA_verify is unused by OpenSSL but
# must be an integer.
res = self._backend._lib.DSA_verify(
0, data_to_verify, len(data_to_verify), self._signature,
len(self._signature), self._public_key._dsa_cdata)
if res != 1:
self._backend._consume_errors()
raise InvalidSignature
@utils.register_interface(AsymmetricSignatureContext)
class _DSASignatureContext(object):
def __init__(self, backend, private_key, algorithm):
self._backend = backend
self._private_key = private_key
self._algorithm = algorithm
self._hash_ctx = hashes.Hash(self._algorithm, self._backend)
def update(self, data):
self._hash_ctx.update(data)
def finalize(self):
data_to_sign = self._hash_ctx.finalize()
data_to_sign = _truncate_digest_for_dsa(
self._private_key._dsa_cdata, data_to_sign, self._backend
)
sig_buf_len = self._backend._lib.DSA_size(self._private_key._dsa_cdata)
sig_buf = self._backend._ffi.new("unsigned char[]", sig_buf_len)
buflen = self._backend._ffi.new("unsigned int *")
# The first parameter passed to DSA_sign is unused by OpenSSL but
# must be an integer.
res = self._backend._lib.DSA_sign(
0, data_to_sign, len(data_to_sign), sig_buf,
buflen, self._private_key._dsa_cdata)
self._backend.openssl_assert(res == 1)
self._backend.openssl_assert(buflen[0])
return self._backend._ffi.buffer(sig_buf)[:buflen[0]]
@utils.register_interface(dsa.DSAParametersWithNumbers)
class _DSAParameters(object):
def __init__(self, backend, dsa_cdata):
self._backend = backend
self._dsa_cdata = dsa_cdata
def parameter_numbers(self):
p = self._backend._ffi.new("BIGNUM **")
q = self._backend._ffi.new("BIGNUM **")
g = self._backend._ffi.new("BIGNUM **")
self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g)
self._backend.openssl_assert(p[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(q[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(g[0] != self._backend._ffi.NULL)
return dsa.DSAParameterNumbers(
p=self._backend._bn_to_int(p[0]),
q=self._backend._bn_to_int(q[0]),
g=self._backend._bn_to_int(g[0])
)
def generate_private_key(self):
return self._backend.generate_dsa_private_key(self)
@utils.register_interface(dsa.DSAPrivateKeyWithSerialization)
class _DSAPrivateKey(object):
def __init__(self, backend, dsa_cdata, evp_pkey):
self._backend = backend
self._dsa_cdata = dsa_cdata
self._evp_pkey = evp_pkey
p = self._backend._ffi.new("BIGNUM **")
self._backend._lib.DSA_get0_pqg(
dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL
)
self._backend.openssl_assert(p[0] != backend._ffi.NULL)
self._key_size = self._backend._lib.BN_num_bits(p[0])
key_size = utils.read_only_property("_key_size")
def signer(self, signature_algorithm):
return _DSASignatureContext(self._backend, self, signature_algorithm)
def private_numbers(self):
p = self._backend._ffi.new("BIGNUM **")
q = self._backend._ffi.new("BIGNUM **")
g = self._backend._ffi.new("BIGNUM **")
pub_key = self._backend._ffi.new("BIGNUM **")
priv_key = self._backend._ffi.new("BIGNUM **")
self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g)
self._backend.openssl_assert(p[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(q[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(g[0] != self._backend._ffi.NULL)
self._backend._lib.DSA_get0_key(self._dsa_cdata, pub_key, priv_key)
self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(priv_key[0] != self._backend._ffi.NULL)
return dsa.DSAPrivateNumbers(
public_numbers=dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=self._backend._bn_to_int(p[0]),
q=self._backend._bn_to_int(q[0]),
g=self._backend._bn_to_int(g[0])
),
y=self._backend._bn_to_int(pub_key[0])
),
x=self._backend._bn_to_int(priv_key[0])
)
def public_key(self):
dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata)
self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL)
dsa_cdata = self._backend._ffi.gc(
dsa_cdata, self._backend._lib.DSA_free
)
pub_key = self._backend._ffi.new("BIGNUM **")
self._backend._lib.DSA_get0_key(
self._dsa_cdata, pub_key, self._backend._ffi.NULL
)
self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL)
pub_key_dup = self._backend._lib.BN_dup(pub_key[0])
res = self._backend._lib.DSA_set0_key(
dsa_cdata, pub_key_dup, self._backend._ffi.NULL
)
self._backend.openssl_assert(res == 1)
evp_pkey = self._backend._dsa_cdata_to_evp_pkey(dsa_cdata)
return _DSAPublicKey(self._backend, dsa_cdata, evp_pkey)
def parameters(self):
dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata)
self._backend.openssl_assert(dsa_cdata != self._backend._ffi.NULL)
dsa_cdata = self._backend._ffi.gc(
dsa_cdata, self._backend._lib.DSA_free
)
return _DSAParameters(self._backend, dsa_cdata)
def private_bytes(self, encoding, format, encryption_algorithm):
return self._backend._private_key_bytes(
encoding,
format,
encryption_algorithm,
self._evp_pkey,
self._dsa_cdata
)
@utils.register_interface(dsa.DSAPublicKeyWithSerialization)
class _DSAPublicKey(object):
def __init__(self, backend, dsa_cdata, evp_pkey):
self._backend = backend
self._dsa_cdata = dsa_cdata
self._evp_pkey = evp_pkey
p = self._backend._ffi.new("BIGNUM **")
self._backend._lib.DSA_get0_pqg(
dsa_cdata, p, self._backend._ffi.NULL, self._backend._ffi.NULL
)
self._backend.openssl_assert(p[0] != backend._ffi.NULL)
self._key_size = self._backend._lib.BN_num_bits(p[0])
key_size = utils.read_only_property("_key_size")
def verifier(self, signature, signature_algorithm):
if not isinstance(signature, bytes):
raise TypeError("signature must be bytes.")
return _DSAVerificationContext(
self._backend, self, signature, signature_algorithm
)
def public_numbers(self):
p = self._backend._ffi.new("BIGNUM **")
q = self._backend._ffi.new("BIGNUM **")
g = self._backend._ffi.new("BIGNUM **")
pub_key = self._backend._ffi.new("BIGNUM **")
self._backend._lib.DSA_get0_pqg(self._dsa_cdata, p, q, g)
self._backend.openssl_assert(p[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(q[0] != self._backend._ffi.NULL)
self._backend.openssl_assert(g[0] != self._backend._ffi.NULL)
self._backend._lib.DSA_get0_key(
self._dsa_cdata, pub_key, self._backend._ffi.NULL
)
self._backend.openssl_assert(pub_key[0] != self._backend._ffi.NULL)
return dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
p=self._backend._bn_to_int(p[0]),
q=self._backend._bn_to_int(q[0]),
g=self._backend._bn_to_int(g[0])
),
y=self._backend._bn_to_int(pub_key[0])
)
def parameters(self):
dsa_cdata = self._backend._lib.DSAparams_dup(self._dsa_cdata)
dsa_cdata = self._backend._ffi.gc(
dsa_cdata, self._backend._lib.DSA_free
)
return _DSAParameters(self._backend, dsa_cdata)
def public_bytes(self, encoding, format):
if format is serialization.PublicFormat.PKCS1:
raise ValueError(
"DSA public keys do not support PKCS1 serialization"
)
return self._backend._public_key_bytes(
encoding,
format,
self,
self._evp_pkey,
None
)
|
"""
Copyright (c) 2013, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of California nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
Various utility functions.
"""
from copy import deepcopy
import urllib
import urlparse
from functools import wraps
from pyramid.httpexceptions import exception_response
from webhelpers.feedgenerator import Atom1Feed
import logging
logger = logging.getLogger(__name__)
def require_post(fn):
"""Requires that a function receives a POST request,
otherwise returning a 405 Method Not Allowed.
Requires that a function recieves a Content-type
of application/x-www-form-urlencoded otherwise returning
a 406 Not Acceptable.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
# We could be called with (context, request) or just (request,)
request = args[0]
if len(args) > 1:
request = args[1]
if request.method != "POST":
response = exception_response(405)
response.headers.extend([('Allow', 'POST')])
return response
content_type = request.headers.get('Content-Type', None)
if (content_type != "application/x-www-form-urlencoded"):
response = exception_response(406)
response.headers.extend(
[('Accept', 'application/x-www-form-urlencoded')]
)
return response
return fn(*args, **kwargs)
return wrapper
def normalize_iri(url):
"""Converts a URL (possibly containing unicode characters) to an IRI.
Args:
url: String (normal or unicode) containing a URL, presumably having
already been percent-decoded by a web framework receiving request
parameters in a POST body or GET request's URL.
Returns:
A properly encoded IRI (see RFC 3987).
"""
def chr_or_escape(unicode_char):
if ord(unicode_char) > 0x7f:
return urllib.quote(unicode_char.encode('utf-8'))
else:
return unicode_char
return ''.join(chr_or_escape(c) for c in unicode(url))
def is_valid_url(url):
"""Returns True if the URL is valid, False otherwise."""
split = urlparse.urlparse(url)
if not split.scheme in ('http', 'https'):
return False
netloc, port = (split.netloc.split(':', 1) + [''])[:2]
if not netloc:
return False
if split.fragment:
return False
return True
class FeedComparator(object):
def __init__(self, new_feed, past_feed):
"""
Provides methods for comparing 2 Atom/RSS feeds.
Arguments:
* new_feed: The parsed feed of the newer content
* past_feed: The parsed feed of an older version of the content.
"""
self.new_feed = new_feed
self.past_feed = past_feed
def new_entries(self):
"""
Finds new entries in the feed and returns them.
New entries are determined by comparing the set of IDs
found in each feed.
"""
new = []
new_entry_ids = [e.id for e in self.new_feed.entries]
past_entry_ids = [e.id for e in self.past_feed.entries]
for entry in self.new_feed.entries:
if entry.id in new_entry_ids and entry.id not in past_entry_ids:
new.append(entry)
return new
def updated_entries(self):
"""
Finds existing updated entries and returns them.
Entries are differentiated by their ID, and are considered updated
if the parsed date/time of the new entry is more recent than the
old entry's.
"""
updated = []
past_ids = [e.id for e in self.past_feed.entries]
for entry in self.new_feed.entries:
if entry.id not in past_ids:
continue
idx = past_ids.index(entry.id)
past_entry = self.past_feed.entries[idx]
if entry.updated_parsed > past_entry.updated_parsed:
updated.append(entry)
if entry.link != past_entry.link:
updated.append(entry)
return updated
def removed_entries(self):
removed = []
new_ids = [e.id for e in self.new_feed.entries]
for entry in self.past_feed.entries:
if entry.id in new_ids:
continue
removed.append(entry)
return removed
def changed_metadata(self):
"""
Detects changes to the feed metadata.
If *any* of the attributes have changed, we use them all.
"""
changed = False
past_feed = self.past_feed['feed']
new_feed = self.new_feed['feed']
if past_feed['title'] != new_feed['title']:
changed = True
if past_feed.get('author', None) != new_feed.get('author', None):
changed = True
if len(new_feed.keys()) > len(past_feed.keys()):
changed = True
if changed:
metadata = deepcopy(self.new_feed)
else:
metadata = deepcopy(self.past_feed)
del metadata['entries']
return metadata
class Atom1FeedKwargs(Atom1Feed):
"""An Atom1Feed that can handle the kwargs passed in for new feed
items.
XXX: This is far from optimal. Need to find a better solution for
this problem.
"""
# List of fields that have already been handled, or are generated
default_fields = [
'author_email',
'author_link',
'author_name',
'categories',
'description',
'enclosure',
'guidislink',
'item_copyright',
'link',
'pubdate',
'published',
'published_parsed',
'summary',
'title',
'ttl',
'unique_id',
'updated',
'updated_parsed',
]
def _handle_kwarg(self, handler, key, value):
"""Handle each item and recursively handle lists
"""
if key in self.default_fields or value is None:
logger.debug('ignoring: %s, %s' % (key, value))
return
if isinstance(value, dict):
# Handle a dictionary and assume the "value" is what
# will be the text of the element.
value = deepcopy(value)
el_content = value.pop('value', '')
# The xml parser can't handle a None value
for k, v in value.items():
if v is None:
value.pop(k)
if value.get("type") == "application/xhtml+xml": # XXX: fragile
self.add_xml_element(handler, key, el_content, value)
else:
handler.addQuickElement(key, el_content, value)
elif isinstance(value, (list, tuple)):
# Loop over a list and add each item
for item in value:
self._handle_kwarg(handler, key, item)
else:
# Assume everything else is just a simple string
handler.addQuickElement(key, value)
def add_item_elements(self, handler, item):
"""Process all the default items, then try and add the elements
from the keyword arguments.
"""
super(Atom1FeedKwargs, self).add_item_elements(handler, item)
for k, v in item.items():
self._handle_kwarg(handler, k, v)
def add_xml_element(self, handler, name, value, attrs):
"""Add element with XML content"""
if attrs is None:
attrs = {}
handler.startElement(name, attrs)
if value is not None:
handler._write(value) # XXX: private access
handler.endElement(name)
|
import os
from .lib import StringIO
from .processors import ProcessorPipeline
from .utils import (img_to_fobj, open_image, IKContentFile, extension_to_format,
UnknownExtensionError)
class SpecFileGenerator(object):
def __init__(self, processors=None, format=None, options=None,
autoconvert=True, storage=None):
self.processors = processors
self.format = format
self.options = options or {}
self.autoconvert = autoconvert
self.storage = storage
def process_content(self, content, filename=None, source_file=None):
img = open_image(content)
original_format = img.format
# Run the processors
processors = self.processors
if callable(processors):
processors = processors(source_file)
img = ProcessorPipeline(processors or []).process(img)
options = dict(self.options or {})
# Determine the format.
format = self.format
if filename and not format:
# Try to guess the format from the extension.
extension = os.path.splitext(filename)[1].lower()
if extension:
try:
format = extension_to_format(extension)
except UnknownExtensionError:
pass
format = format or img.format or original_format or 'JPEG'
imgfile = img_to_fobj(img, format, **options)
content = IKContentFile(filename, imgfile.read(), format=format)
return img, content
def generate_file(self, filename, source_file, save=True):
"""
Generates a new image file by processing the source file and returns
the content of the result, ready for saving.
"""
if source_file: # TODO: Should we error here or something if the source_file doesn't exist?
# Process the original image file.
try:
fp = source_file.storage.open(source_file.name)
except IOError:
return
fp.seek(0)
fp = StringIO(fp.read())
img, content = self.process_content(fp, filename, source_file)
if save:
storage = self.storage or source_file.storage
storage.save(filename, content)
return content
|
"""Function for generating the SkUserConfig file, customized for Android."""
import os
AUTOGEN_WARNING = (
"""
///////////////////////////////////////////////////////////////////////////////
//
// THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
//
// This file contains Skia's upstream include/config/SkUserConfig.h as a
// reference, followed by the actual defines set for Android.
//
///////////////////////////////////////////////////////////////////////////////
"""
)
BUILD_GUARD = 'SkUserConfig_Android_DEFINED'
def generate_user_config(original_sk_user_config, target_dir, ordered_set):
"""Generate the SkUserConfig file specific to the Android framework.
Android needs its #defines in its skia/include/core directory, so that other
libraries which use Skia's headers get the right definitions. This function
takes the existing sample version of SkUserConfig, checked into Skia, and
appends the defines from ordered_set, which is expected to be a
vars_dict_lib.OrderedSet containing the defines. The result is written to
target_dir/SkUserConfig.h
Args:
original_sk_user_config: Path to original SkUserConfig.h
target_dir: Directory within which the modified SkUserConfig.h will be
written. Its name will be the same basename as
original_sk_user_config. If None, the new file will be written to the
working directory.
ordered_set: A vars_dict_lib.OrderedSet, containing a list of defines to
be appended to SkUserConfig.
Raises:
AssertionError: If original_sk_user_config does not exist.
"""
assert os.path.exists(original_sk_user_config)
dst_filename = os.path.basename(original_sk_user_config)
if target_dir:
dst_filename = os.path.join(target_dir, dst_filename)
with open(dst_filename, 'w') as dst:
dst.write(AUTOGEN_WARNING)
# Copy the original exactly. This is merely for reference. Many of the
# defines written to the file below, either manually or generated from the
# gyp files, have explanations in the original SkUserConfig.h
with open(original_sk_user_config, 'r') as original:
for line in original:
dst.write(line)
# Now add the defines specific to Android. Write a custom build guard to
# ensure they don't get defined more than once.
dst.write('\n// Android defines:\n')
dst.write('#ifndef ' + BUILD_GUARD + '\n')
dst.write('#define ' + BUILD_GUARD + '\n')
# Add conditional defines manually:
# do this build check for other tools that still read this header
dst.write('#ifdef ANDROID\n')
dst.write(' #include <utils/misc.h>\n')
dst.write('#endif\n\n')
dst.write('#if __BYTE_ORDER == __BIG_ENDIAN\n')
dst.write(' #define SK_CPU_BENDIAN\n')
dst.write(' #undef SK_CPU_LENDIAN\n')
dst.write('#else\n')
dst.write(' #define SK_CPU_LENDIAN\n')
dst.write(' #undef SK_CPU_BENDIAN\n')
dst.write('#endif\n\n')
# Now add the defines from the gyp files.
for item in ordered_set:
# Although our defines may have '=' in them, when written to the header
# there should be a space between the macro and what it replaces.
dst.write('#define ' + item.replace('=', ' ') + '\n')
dst.write('\n#endif // ' + BUILD_GUARD + '\n')
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Assignment'
db.create_table(u'assignment_assignment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=50)),
('sequence', self.gf('django.db.models.fields.IntegerField')()),
('description', self.gf('django.db.models.fields.TextField')()),
('deadline', self.gf('django.db.models.fields.DateTimeField')()),
('score_weight', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=1)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'assignment', ['Assignment'])
def backwards(self, orm):
# Deleting model 'Assignment'
db.delete_table(u'assignment_assignment')
models = {
u'assignment.assignment': {
'Meta': {'object_name': 'Assignment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deadline': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score_weight': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'}),
'sequence': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['assignment']
|
from __future__ import unicode_literals
from django.db import models, migrations
import django_mobile_app_distribution.models
import django.core.files.storage
class Migration(migrations.Migration):
dependencies = [
('django_mobile_app_distribution', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='androidapp',
options={'verbose_name': 'Android App', 'verbose_name_plural': 'Android Apps', 'ordering': ('name', 'operating_system', '-version', '-updatedAt')},
),
migrations.AlterField(
model_name='androidapp',
name='app_binary',
field=models.FileField(verbose_name='APK file', upload_to=django_mobile_app_distribution.models.normalize_android_filename, storage=django.core.files.storage.FileSystemStorage(location='/Users/moritz/Alp-Phone/Projects/mobile_app_distribution/ota_ad_hoc_management/ota_ad_hoc_management/android')),
),
migrations.AlterField(
model_name='androidapp',
name='operating_system',
field=models.CharField(verbose_name='Operating system', default='Android', max_length=50, choices=[('iOS', 'iOS'), ('Android', 'Android')], editable=False),
),
migrations.AlterField(
model_name='app',
name='groups',
field=models.ManyToManyField(verbose_name='Groups', default=None, blank=True, to='auth.Group', related_name='apps'),
),
migrations.AlterField(
model_name='app',
name='updatedAt',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='iosapp',
name='bundle_identifier',
field=models.CharField(verbose_name='Bundle identifier', default='', max_length=200, help_text='e.g. org.example.app'),
),
migrations.AlterField(
model_name='iosapp',
name='operating_system',
field=models.CharField(verbose_name='Operating system', default='iOS', max_length=50, choices=[('iOS', 'iOS'), ('Android', 'Android')], editable=False),
),
migrations.AlterField(
model_name='userinfo',
name='language',
field=models.CharField(default='en', max_length=20, choices=[('en', 'English'), ('de', 'Deutsch')]),
),
]
|
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import svm
from sklearn.metrics import classification_report
N = 100
data = np.genfromtxt("classification.txt")
X = data[:, 0:2]
t = data[:, 2]
pl.figure(figsize=(18, 5))
no = 1
for kernel in ('linear', 'poly', 'rbf'):
# 分類器を訓練
clf = svm.SVC(kernel=kernel, C=10000)
clf.fit(X, t)
pl.subplot(1, 3, no)
cmap1 = ListedColormap(['red', 'blue'])
cmap2 = ListedColormap(['#FFAAAA', '#AAAAFF'])
# 訓練データをプロット
pl.scatter(X[:, 0], X[:, 1], c=t, zorder=10, cmap=cmap1)
pl.scatter(clf.support_vectors_[:, 0],
clf.support_vectors_[:, 1],
s=80, facecolors='none', zorder=10)
# 決定境界をプロット
xmin = ymin = -2
xmax = ymax = 2
XX, YY = np.mgrid[xmin:xmax:200j, ymin:ymax:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
Z = Z.reshape(XX.shape)
pl.pcolormesh(XX, YY, Z > 0, cmap=cmap2)
pl.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-0.5, 0, 0.5])
predict_svm = clf.predict(X)
print classification_report(t, predict_svm)
pl.title("kernel: %s" % kernel)
pl.xlim(xmin, xmax)
pl.ylim(ymin, ymax)
no += 1
pl.show()
|
'''It provides common statements.'''
from .util import Statement
from .clause import returning, where
from .clause import insert, columns, values, on_duplicate_key_update, replace
from .clause import select, from_, joins, group_by, having, order_by, limit, offset
from .clause import for_, of, nowait
from .clause import for_update, lock_in_share_mode
from .clause import update, set_
from .clause import delete
from .clause import type_, join, on, using
def insert_preprocessor(clause_args):
if 'values' not in clause_args and 'set' in clause_args:
if hasattr(clause_args['set'], 'items'):
pairs = clause_args['set'].items()
else:
pairs = clause_args['set']
del clause_args['set']
if pairs:
clause_args['columns'], clause_args['values'] = zip(*pairs)
else:
clause_args['columns'] = clause_args['values'] = tuple()
insert = Statement([insert, columns, values, returning, on_duplicate_key_update], preprocessor=insert_preprocessor)
def select_preprocessor(clause_args):
if 'from_' in clause_args:
clause_args['from'] = clause_args['from_']
del clause_args['from_']
if 'for_' in clause_args:
clause_args['for'] = clause_args['for_']
del clause_args['for_']
if 'for' in clause_args:
clause_args['for'] = clause_args['for'].upper()
select = Statement([
select, from_, joins, where, group_by, having, order_by, limit, offset,
for_, of, nowait,
for_update, lock_in_share_mode
], preprocessor=select_preprocessor)
update = Statement([update, set_, where, returning])
delete = Statement([delete, where, returning])
def join_preprocessor(clause_args):
if 'type' not in clause_args:
if 'using' in clause_args or 'on' in clause_args:
clause_args['type'] = 'INNER'
else:
clause_args['type'] = 'NATURAL'
else:
clause_args['type'] = clause_args['type'].upper()
join = Statement([type_, join, on, using], preprocessor=join_preprocessor)
replace = Statement([replace, columns, values], preprocessor=insert_preprocessor)
|
import gtk
import gtk.glade
import gtksourceview
import gtkmozembed
PLUGIN_NAME='Browser'
PLUGIN_VERSION='0.1'
PLUGIN_EVOEDITOR_VERSION='0.1'
PLUGIN_DESCRIPTION='Plugin navegación'
PLUGIN_CONFIGURABLE=True
PLUGIN_IMAGE_PATH='plugins/browser/pixmaps/'
PLUGIN_ICON=PLUGIN_IMAGE_PATH + 'browser.png'
PLUGIN_GLADE_FILE='plugins/browser/browser.glade'
class PluginLoader:
"""
Descripción del plugin
"""
def __init__(self,gui):
"""
Constructor del Plugin
"""
# Guardar instancia del GUI
self.gui=gui
# Guardar ventana principal
self.mainWindow=gui.mainWindow
# Diccionario de widgets contenedores disponibles para del plugin
self.container=gui.containers['editor']
# Carga de fichero glade del plugin
self.gladetree=gtk.glade.XML(PLUGIN_GLADE_FILE)
# Carga de los widgets necesarios
self.__getwidgets()
# Conectar las señales del widget de tareas
self.__connectSignals()
# Inicialización de parámetros del GUI y demás widgets
self.__initGui()
# Insertar widgets en los contenedores
self.__dockWidgets()
def disable(self):
"""
Desactivador del Plugin
"""
#self.widgets['sepEditFind'].destroy()
#self.findDialog.destroy()
#self.toolbar.destroy()
pass
########################################################################
# Métodos para la inicialización de las distintas partes del plugin
########################################################################
def __initGui(self):
"""
Inicializar los widgets necesarios para el plugin
"""
# Carga de la ventana de configuración del plugin
if PLUGIN_CONFIGURABLE:
self.configDialog=self.gladetree.get_widget('winConfig')
else:
self.configDialog=None
self.configDialog.set_version(PLUGIN_VERSION)
# Ventana navegador
self.browserDialog=self.gladetree.get_widget('winBrowser')
# Barra de herramientas
self.toolbar=self.gladetree.get_widget('hndFindReplace')
# Instancia del editor
self.editor=self.gui.editor
# Lista de páginas con editor:
self.browsers=[]
def __getwidgets(self):
self.widgets={}
widgetlist=[
]
for widgetname in widgetlist:
self.widgets[widgetname]=self.gladetree.get_widget(widgetname)
def __connectSignals(self):
"""
Conectar señales de los widgets del plugin
"""
# Definición del diccionario de señales del plugin
signals = {
'showconfig': self.showConfig,
}
self.gladetree.signal_autoconnect(signals)
def __dockWidgets(self):
"""
Insertar widgets del plugin en el contenedor seleccionado
"""
moz=gtkmozembed.MozEmbed()
moz.show()
label=gtk.Label('HOLAAA')
label.show()
page=self.container.append_page(moz,label)
self.editor.dummyfile()
self.browsers.append(page)
moz.load_url('http://www.google.com')
# Añadir entradas al menú de edición
#editmenu=self.gui.gladetree.get_widget('menuEdit_menu')
#accelgroup=gtk.AccelGroup()
#self.gui.mainWindow.add_accel_group(accelgroup)
#self.widgets['mnuEditFind'].unparent()
#self.widgets['mnuEditReplace'].unparent()
#self.widgets['sepEditFind']=gtk.SeparatorMenuItem()
#self.widgets['sepEditFind'].show()
#editmenu.insert(self.widgets['sepEditFind'],14)
#editmenu.insert(self.widgets['mnuEditReplace'],14)
#editmenu.insert(self.widgets['mnuEditFind'],14)
#self.widgets['mnuEditFind'].add_accelerator('activate', accelgroup,ord('F'),gtk.gdk.CONTROL_MASK,gtk.ACCEL_VISIBLE)
#self.widgets['mnuEditReplace'].add_accelerator('activate', accelgroup,ord('R'),gtk.gdk.CONTROL_MASK,gtk.ACCEL_VISIBLE)
## Añadir barra de búsqueda
#self.toolbar.unparent()
#self.toolbar.show()
#self.gui.widgets['hbxToolbars'].add(self.toolbar)
# Conectar shortcuts
# self.mainWindow.add_accel_group(self.shortcuts)
########################################################################
# Señales comunes de los plugins
########################################################################
def showConfig(self):
"""
Mostrar diálogo de configuración del plugin
"""
if self.configDialog:
self.gui.openDialog(self.configDialog,close=True)
########################################################################
# Métodos para interconexión con EVOEditor
########################################################################
def newProject(self):
"""
Se ejecutará cuando se cree un nuevo proyecto
"""
pass
def loadProject(self):
"""
Se ejecutará cuando se cargue un proyecto
"""
pass
def saveProject(self):
"""
Se ejecutará cuando se guarde un proyecto
"""
pass
def closeProject(self):
"""
Se ejecutará cuando se cierre un proyecto
"""
pass
def newFile(self,args):
"""
Se ejecutará cuando se cree un nuevo archivo para edición
"""
pass
def loadFile(self,args):
"""
Se ejecutará cuando se cargue un archivo para edición
"""
pass
def saveFile(self,args):
"""
Se ejecutará cuando se guarde un archivo en edición
"""
pass
def closeFile(self,args):
"""
Se ejecutará cuando se inicie el cierre de un archivo en edición
"""
pass
########################################################################
# Métodos propios del plugin
########################################################################
|
"""The tests for the manual Alarm Control Panel component."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components import alarm_control_panel
import homeassistant.util.dt as dt_util
from tests.common import fire_time_changed, get_test_home_assistant
CODE = 'HELLO_CODE'
class TestAlarmControlPanelManual(unittest.TestCase):
"""Test the manual alarm module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_arm_home_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_HOME
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_ARMED_HOME
def test_arm_home_with_invalid_code(self):
"""Attempt to arm home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_away_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_AWAY
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_ARMED_AWAY
def test_arm_away_with_invalid_code(self):
"""Attempt to arm away without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_night_no_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_arm_night_with_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == \
STATE_ALARM_ARMED_NIGHT
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_ARMED_NIGHT
def test_arm_night_with_invalid_code(self):
"""Attempt to night home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_no_pending(self):
"""Test triggering when no pending submitted method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 1,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=60)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
def test_trigger_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 2,
'trigger_time': 3,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_DISARMED
def test_armed_home_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 10,
'armed_home': {
'pending_time': 2
}
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_arm_home(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_armed_away_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 10,
'armed_away': {
'pending_time': 2
}
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_arm_away(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_armed_night_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 10,
'armed_night': {
'pending_time': 2
}
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_arm_night(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_trigger_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 10,
'triggered': {
'pending_time': 2
},
'trigger_time': 3,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_disarm_after_trigger(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': True
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_no_disarm_after_trigger(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_back_to_back_trigger_with_no_disarm_after_trigger(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_disarm_while_pending_trigger(self):
"""Test disarming while pending state."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'trigger_time': 5,
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_during_trigger_with_invalid_code(self):
"""Test disarming while code is invalid."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual',
'name': 'test',
'pending_time': 5,
'code': CODE + '2',
'disarm_after_trigger': False
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
|
import datetime
import hashlib
import json
from bson.objectid import ObjectId
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from mongoengine.base import ValidationError
from crits.core.class_mapper import class_from_id, class_from_value
from crits.core.crits_mongoengine import EmbeddedSource
from crits.core.crits_mongoengine import create_embedded_source, json_handler
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import is_admin, user_sources
from crits.core.user_tools import is_user_subscribed
from crits.certificates.certificate import Certificate
from crits.notifications.handlers import remove_user_from_notification
from crits.services.analysis_result import AnalysisResult
from crits.services.handlers import run_triage, get_supported_services
def generate_cert_csv(request):
"""
Generate a CSV file of the Certificate information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request,Certificate)
return response
def get_certificate_details(md5, analyst):
"""
Generate the data to render the Certificate details template.
:param md5: The MD5 of the Certificate to get details for.
:type md5: str
:param analyst: The user requesting this information.
:type analyst: str
:returns: template (str), arguments (dict)
"""
template = None
sources = user_sources(analyst)
cert = Certificate.objects(md5=md5, source__name__in=sources).first()
if not cert:
template = "error.html"
args = {'error': 'Certificate not yet available or you do not have access to view it.'}
else:
cert.sanitize("%s" % analyst)
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, cert.id, 'Certificate')
# subscription
subscription = {
'type': 'Certificate',
'id': cert.id,
'subscribed': is_user_subscribed("%s" % analyst,
'Certificate', cert.id),
}
#objects
objects = cert.sort_objects()
#relationships
relationships = cert.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Certificate',
'value': cert.id
}
#comments
comments = {'comments': cert.get_comments(),
'url_key': md5}
#screenshots
screenshots = cert.get_screenshots(analyst)
# services
service_list = get_supported_services('Certificate')
# analysis results
service_results = cert.get_analysis_results()
args = {'service_list': service_list,
'objects': objects,
'relationships': relationships,
'comments': comments,
'relationship': relationship,
"subscription": subscription,
"screenshots": screenshots,
'service_results': service_results,
"cert": cert}
return template, args
def generate_cert_jtable(request, option):
"""
Generate the jtable data for rendering in the list template.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Certificate
type_ = "certificate"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type,request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Certificates",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_),
args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts,request)
jtable['toolbar'] = [
{
'tooltip': "'All Certificates'",
'text': "'All'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Certificates'",
'text': "'New'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Certificates'",
'text': "'In Progress'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Certificates'",
'text': "'Analyzed'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Certificates'",
'text': "'Deprecated'",
'click': "function () {$('#certificate_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Certificate'",
'text': "'Add Certificate'",
'click': "function () {$('#new-certificate').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_cert_file(filename, data, source_name, user=None,
description=None, related_id=None, related_md5=None,
related_type=None, method='', reference='',
relationship=None, bucket_list=None, ticket=None):
"""
Add a Certificate.
:param filename: The filename of the Certificate.
:type filename: str
:param data: The filedata of the Certificate.
:type data: str
:param source_name: The source which provided this Certificate.
:type source_name: str,
:class:`crits.core.crits_mongoengine.EmbeddedSource`,
list of :class:`crits.core.crits_mongoengine.EmbeddedSource`
:param user: The user adding the Certificate.
:type user: str
:param description: Description of the Certificate.
:type description: str
:param related_id: ObjectId of a top-level object related to this Certificate.
:type related_id: str
:param related_md5: MD5 of a top-level object related to this Certificate.
:type related_md5: str
:param related_type: The CRITs type of the related top-level object.
:type related_type: str
:param method: The method of acquiring this Certificate.
:type method: str
:param reference: A reference to the source of this Certificate.
:type reference: str
:param relationship: The relationship between the parent and the Certificate.
:type relationship: str
:param bucket_list: Bucket(s) to add to this Certificate
:type bucket_list: str(comma separated) or list.
:param ticket: Ticket(s) to add to this Certificate
:type ticket: str(comma separated) or list.
:returns: dict with keys:
'success' (boolean),
'message' (str),
'md5' (str) if successful.
"""
if not data:
status = {
'success': False,
'message': 'No data object passed in'
}
return status
if len(data) <= 0:
status = {
'success': False,
'message': 'Data length <= 0'
}
return status
if ((related_type and not (related_id or related_md5)) or
(not related_type and (related_id or related_md5))):
status = {
'success': False,
'message': 'Must specify both related_type and related_id or related_md5.'
}
return status
related_obj = None
if related_id or related_md5:
if related_id:
related_obj = class_from_id(related_type, related_id)
else:
related_obj = class_from_value(related_type, related_md5)
if not related_obj:
status = {
'success': False,
'message': 'Related object not found.'
}
return status
# generate md5 and timestamp
md5 = hashlib.md5(data).hexdigest()
timestamp = datetime.datetime.now()
# generate Certificate
cert = Certificate.objects(md5=md5).first()
if not cert:
cert = Certificate()
cert.filename = filename
cert.created = timestamp
cert.size = len(data)
cert.description = description
cert.md5 = md5
# generate source information and add to certificate
if isinstance(source_name, basestring) and len(source_name) > 0:
s = create_embedded_source(source_name,
method=method,
reference=reference,
analyst=user)
cert.add_source(s)
elif isinstance(source_name, EmbeddedSource):
cert.add_source(source_name, method=method, reference=reference)
elif isinstance(source_name, list) and len(source_name) > 0:
for s in source_name:
if isinstance(s, EmbeddedSource):
cert.add_source(s, method=method, reference=reference)
if bucket_list:
cert.add_bucket_list(bucket_list, user)
if ticket:
cert.add_ticket(ticket, user)
# add file to GridFS
if not isinstance(cert.filedata.grid_id, ObjectId):
cert.add_file_data(data)
# save cert
cert.save(username=user)
cert.reload()
# run certificate triage
if len(AnalysisResult.objects(object_id=str(cert.id))) < 1 and data:
run_triage(cert, user)
# update relationship if a related top-level object is supplied
if related_obj and cert:
if not relationship:
relationship = "Related_To"
cert.add_relationship(rel_item=related_obj,
rel_type=relationship,
analyst=user,
get_rels=False)
related_obj.save(username=user)
cert.save(username=user)
status = {
'success': True,
'message': 'Uploaded certificate',
'md5': md5,
'id': str(cert.id),
'object': cert
}
return status
def delete_cert(md5, username=None):
"""
Delete a Certificate.
:param md5: The MD5 of the Certificate to delete.
:type md5: str
:param username: The user deleting the certificate.
:type username: str
:returns: True, False
"""
if is_admin(username):
cert = Certificate.objects(md5=md5).first()
if cert:
cert.delete(username=username)
return True
else:
return False
else:
return False
|
import numpy as np
import random
import milk.supervised.svm
import milk.supervised.multi
from milk.supervised.classifier import ctransforms
from .fast_classifier import fast_classifier
import milksets.wine
features,labels = milksets.wine.load()
A = np.arange(len(features))
random.seed(9876543210)
random.shuffle(A)
features = features[A]
labels = labels[A]
labelset = set(labels)
base = ctransforms(milk.supervised.svm.svm_raw(C=2.,kernel=milk.supervised.svm.rbf_kernel(2.**-3)),milk.supervised.svm.svm_binary())
def test_one_against_rest():
M = milk.supervised.multi.one_against_rest(base)
M = M.train(features[:100,:],labels[:100])
tlabels = [M.apply(f) for f in features[100:]]
for tl in tlabels:
assert tl in labelset
def test_one_against_one():
M = milk.supervised.multi.one_against_one(base)
M = M.train(features[:100,:],labels[:100])
tlabels = [M.apply(f) for f in features[100:]]
for tl in tlabels:
assert tl in labelset
tlabels_many = M.apply_many(features[100:])
assert np.all(tlabels == tlabels_many)
def test_two_thirds():
np.random.seed(2345)
C = milk.supervised.defaultclassifier('fast')
X = np.random.rand(120,4)
X[:40] += np.random.rand(40,4)
X[:40] += np.random.rand(40,4)
X[40:80] -= np.random.rand(40,4)
X[40:80] -= np.random.rand(40,4)
Y = np.repeat(np.arange(3), 40)
model = C.train(X,Y)
Y_ = np.array([model.apply(x) for x in X])
assert (Y_ == Y).mean() * 3 > 2
def test_multi_labels():
clabels = [[lab, lab+7] for lab in labels]
multi_label = milk.supervised.multi.one_against_rest_multi(base)
model = multi_label.train(features[::2], clabels[::2])
test_vals = [model.apply(f) for f in features[1::2]]
for ts in test_vals:
if 0.0 in ts: assert 7.0 in ts
if 1.0 in ts: assert 8.0 in ts
if 2.0 in ts: assert 9.0 in ts
def test_classifier_no_set_options():
# Basically these should not raise an exception
milk.supervised.multi.one_against_rest_multi(fast_classifier())
milk.supervised.multi.one_against_rest(fast_classifier())
milk.supervised.multi.one_against_one(fast_classifier())
def test_tree():
mtree = milk.supervised.multi.multi_tree_learner(fast_classifier())
labels = [0,1,2,2,3,3,3,3]
features = np.random.random_sample((len(labels), 8))
model = mtree.train(features, labels)
counts = np.zeros(4)
for ell in labels:
counts[ell] += 1
g0,g1 = milk.supervised.multi.split(counts)
assert np.all(g0 == [3]) or np.all(g1 == [3])
def list_to_zero(v):
if isinstance(v, list):
return 1000
return v
def r(m):
if len(m) == 1: return int(m[0])
else: return sorted([r(m[1]), r(m[2])], key=list_to_zero)
assert r(model.model) == [3,[2,[0,1]]]
|
from nose.tools import assert_true, nottest
CUSHION_PERCENT = 0.01
LOG_ALL_RESULTS = False
BENCHMARK_TO_DESIRED_KEY_MAP = {
"index": "Index splitting",
"random": "Random splitting",
"scaffold": "Scaffold splitting",
"logreg": "logistic regression",
"tf": "Multitask network",
"tf_robust": "robust MT-NN",
"tf_regression": "NN regression",
"graphconv": "graph convolution",
"graphconvreg": "graphconv regression",
}
DESIRED_RESULTS_CSV = "devtools/jenkins/desired_results.csv"
TEST_RESULTS_CSV = "examples/results.csv"
def parse_desired_results(desired_results):
retval = []
for line in desired_results:
vars = line.split(',')
retval.append({
"split": vars[0],
"data_set": vars[1],
"model": vars[2],
"train_score": float(vars[3]),
"test_score": float(vars[4])
})
return retval
@nottest
def parse_test_results(test_results):
retval = []
for line in test_results:
vars = line.split(',')
retval.append({
"split": BENCHMARK_TO_DESIRED_KEY_MAP[vars[2]],
"data_set": vars[1],
"model": BENCHMARK_TO_DESIRED_KEY_MAP[vars[5]],
"train_score": float(vars[6]),
"test_score": float(vars[9])
})
return retval
def find_desired_result(result, desired_results):
for desired_result in desired_results:
if result['data_set'] == desired_result['data_set'] and \
result['split'] == desired_result['split'] and \
result['model'] == desired_result['model']:
return desired_result
raise Exception("Unable to find desired result \n%s" % result)
def is_good_result(my_result, desired_result):
retval = True
message = []
for key in ['train_score', 'test_score']:
# Higher is Better
desired_value = desired_result[key] - CUSHION_PERCENT
if my_result[key] < desired_value or LOG_ALL_RESULTS:
message_part = "%s,%s,%s,%s,%s,%s" % (
my_result['data_set'], my_result['model'], my_result['split'], key,
my_result[key], desired_result[key])
message.append(message_part)
retval = False
return retval, message
def test_compare_results():
desired_results = open(DESIRED_RESULTS_CSV).readlines()[1:]
desired_results = parse_desired_results(desired_results)
test_results = open(TEST_RESULTS_CSV).readlines()
test_results = parse_test_results(test_results)
failures = []
exceptions = []
for test_result in test_results:
try:
desired_result = find_desired_result(test_result, desired_results)
passes, message = is_good_result(test_result, desired_result)
if not passes:
failures.extend(message)
except Exception as e:
exceptions.append("Unable to find desired result for %s" % test_result)
for exception in exceptions:
print(exception)
for failure in failures:
print(failure)
assert_true(len(exceptions) == 0, "Error parsing performance results")
assert_true(len(failures) == 0, "Some performance benchmarks not passed")
if __name__ == "__main__":
test_compare_results()
|
from collections import OrderedDict
from typing import Optional
from qrl.core import config
from qrl.core.Message import Message
from qrl.core.MessageRequest import MessageRequest
from qrl.core.txs.CoinBase import CoinBase
from qrl.generated import qrllegacy_pb2
from qrl.generated.qrllegacy_pb2 import LegacyMessage
class MessageReceipt(object):
"""
1> dict Hash to peer
2> dict peer to Hash
Remove hash
1. check peers for that particular hash
2. remove hash from each peer in peer to hash
3. Finally remove hash from hash to peer
Remove peer
1. Check hash for that particular peer
2. remove peer from each hash in hash to peer
3. remove peer from peer to hash
In case of a peer requested for a particular hash message, fails to
provide that, then it is considered that peer doesn't have message
of that hash. so peer is removed from that hash and also the hash
is removed from that peer.
Next peer is asked for that same hash message.
Hash has to be removed if it has no peer
TODO:
1. If a peer fails to provide particular message for X number of times
in a last Y hrs of time. Then that peer is forcefully disconnected.
IP could be added into block list of that particular peer for couple
of hours.
"""
# TODO: Use enumerations instead of strings to reduce data size
allowed_types = [LegacyMessage.TX,
LegacyMessage.LT,
LegacyMessage.EPH,
LegacyMessage.BK,
LegacyMessage.MT,
LegacyMessage.TK,
LegacyMessage.TT,
LegacyMessage.SL]
services_arg = {
######################
qrllegacy_pb2.LegacyMessage.VE: 'veData',
qrllegacy_pb2.LegacyMessage.PL: 'plData',
qrllegacy_pb2.LegacyMessage.PONG: 'pongData',
######################
qrllegacy_pb2.LegacyMessage.MR: 'mrData',
qrllegacy_pb2.LegacyMessage.SFM: 'mrData',
qrllegacy_pb2.LegacyMessage.BK: 'block',
qrllegacy_pb2.LegacyMessage.FB: 'fbData',
qrllegacy_pb2.LegacyMessage.PB: 'pbData',
############################
qrllegacy_pb2.LegacyMessage.TX: 'txData',
qrllegacy_pb2.LegacyMessage.MT: 'mtData',
qrllegacy_pb2.LegacyMessage.TK: 'tkData',
qrllegacy_pb2.LegacyMessage.TT: 'ttData',
qrllegacy_pb2.LegacyMessage.LT: 'ltData',
qrllegacy_pb2.LegacyMessage.SL: 'slData',
qrllegacy_pb2.LegacyMessage.EPH: 'ephData',
qrllegacy_pb2.LegacyMessage.SYNC: 'syncData',
}
def __init__(self):
self._hash_msg = OrderedDict()
self.requested_hash = OrderedDict()
def register_duplicate(self, msg_hash: bytes):
self.requested_hash[msg_hash].is_duplicate = True
def register(self, msg_type, msg_hash: bytes, pbdata):
"""
Registers an object and type on with msg_hash as key
There is a limitation on the amount of items (config.dev.message_q_size)
Containers operate in a FIFO fashion.
:param msg_hash:
:param pbdata:
:param msg_type: Any type!? There is not check on msg_type
"""
# FIXME: Hash is converted to string
# FIXME: No check on the validity of the message type
if len(self._hash_msg) >= config.dev.message_q_size:
self.__remove__(self._hash_msg)
message = Message(pbdata, msg_type)
self._hash_msg[msg_hash] = message
def get(self, msg_type, msg_hash: bytes) -> Optional[qrllegacy_pb2.LegacyMessage]:
if not self.contains(msg_hash, msg_type):
return None
msg = self._hash_msg[msg_hash].msg
data = qrllegacy_pb2.LegacyMessage(**{'func_name': msg_type,
self.services_arg[msg_type]: msg})
return data
def add_peer(self, msg_hash: bytes, msg_type, peer, data=None):
# Filter
if msg_type not in self.allowed_types:
return
# Limit amount
if len(self.requested_hash) >= config.dev.message_q_size:
self.__remove__(self.requested_hash)
if msg_hash not in self.requested_hash:
self.requested_hash[msg_hash] = MessageRequest()
self.requested_hash[msg_hash].add_peer(msg_type, peer, data)
def isRequested(self, msg_hash: bytes, peer, block=None):
if msg_hash in self.requested_hash:
if peer in self.requested_hash[msg_hash].peers_connection_list:
return True
if block:
if self.block_params(msg_hash, block):
return True
self.remove_hash(msg_hash, peer)
return False
def block_params(self, msg_hash: bytes, block):
if msg_hash not in self.requested_hash:
return False
params = self.requested_hash[msg_hash].params
coinbase_tx = CoinBase.from_pbdata(block.transactions[0])
if coinbase_tx.addr_from != params.stake_selector:
return False
if block.block_number != params.block_number:
return False
if block.prev_headerhash != params.prev_headerhash:
return False
if block.reveal_hash != params.reveal_hash:
return False
return True
def deregister(self, msg_hash: bytes, msg_type):
if msg_hash in self._hash_msg:
del self._hash_msg[msg_hash]
def __remove__(self, myObj):
myObj.popitem(last=False)
def remove_hash(self, msg_hash: bytes, peer):
if msg_hash in self.requested_hash:
message_request = self.requested_hash[msg_hash]
if peer in message_request.peers_connection_list:
message_request.peers_connection_list.remove(peer)
if not message_request.peers_connection_list:
del self.requested_hash[msg_hash]
def contains(self, msg_hash: bytes, msg_type):
"""
Indicates if a msg_obj has been registered with that
msg_hash and matches the msg_type
:param msg_hash: Hash to use as a key
:param msg_type: The type of msg to match
:return: True is the msg_obj is known and matches the msg_type
"""
if msg_hash in self._hash_msg:
message = self._hash_msg[msg_hash]
if message.msg_type == msg_type:
return True
return False
def is_callLater_active(self, msg_hash):
if msg_hash in self.requested_hash:
if self.requested_hash[msg_hash].callLater:
return True
return False
|
import os
import tornado.autoreload
import tornado.ioloop
import tornado.web
from tornado_project_skeleton.tools import config
pwd = os.path.dirname(os.path.abspath(__file__))
config.add_config_ini('%s/main.ini' % pwd)
class MainHandler(tornado.web.RequestHandler):
def get(self):
name = config.NAME
items = ['smart', 'clever', 'intuitive']
kwargs = {'title': 'My title',
'name': name,
'items': items}
self.render('template.html', **kwargs)
def data_received(self, chunk):
pass
application = tornado.web.Application(
handlers=[
(r'/', MainHandler),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': '%s/static/' % pwd}),
])
if __name__ == "__main__":
application.listen(8888)
tornado.autoreload.start()
tornado.autoreload.watch('%s/template.html' % pwd)
tornado.autoreload.watch('%s/static/style.css' % pwd)
tornado.ioloop.IOLoop.instance().start()
|
from __future__ import with_statement
import warnings
from unittest import TestCase
class DeprecationTest(TestCase):
# python >= 2.6 is required to make deprecation warning tests useful
# this DeprecationTest is always successful for python < 2.6
def assertDeprecated(self, cls, *args, **kwargs):
if hasattr(warnings, 'catch_warnings'):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
obj = cls(*args, **kwargs)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
def assertNotDeprecated(self, cls, *args, **kwargs):
if hasattr(warnings, 'catch_warnings'):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
obj = cls(*args, **kwargs)
assert len(w) == 0
def test_dashboard(self):
from admin_tools.dashboard import models
self.assertDeprecated(models.Dashboard)
self.assertDeprecated(models.DefaultIndexDashboard)
self.assertDeprecated(models.DefaultAppIndexDashboard, '', [])
self.assertDeprecated(models.AppIndexDashboard, '', [])
self.assertDeprecated(models.DashboardModule)
self.assertDeprecated(models.AppListDashboardModule)
self.assertDeprecated(models.ModelListDashboardModule)
self.assertDeprecated(models.LinkListDashboardModule)
self.assertDeprecated(models.FeedDashboardModule)
def test_dashboard_new(self):
from admin_tools import dashboard
self.assertNotDeprecated(dashboard.Dashboard)
self.assertNotDeprecated(dashboard.DefaultIndexDashboard)
self.assertNotDeprecated(dashboard.DefaultAppIndexDashboard, '', [])
self.assertNotDeprecated(dashboard.AppIndexDashboard, '', [])
from admin_tools.dashboard import modules
self.assertNotDeprecated(modules.DashboardModule)
self.assertNotDeprecated(modules.AppList)
self.assertNotDeprecated(modules.ModelList)
self.assertNotDeprecated(modules.LinkList)
self.assertNotDeprecated(modules.Feed)
def test_menu(self):
from admin_tools.menu import models
self.assertDeprecated(models.Menu)
self.assertDeprecated(models.DefaultMenu)
self.assertDeprecated(models.MenuItem)
self.assertDeprecated(models.AppListMenuItem)
self.assertDeprecated(models.BookmarkMenuItem)
def test_menu_new(self):
from admin_tools import menu
self.assertNotDeprecated(menu.Menu)
self.assertNotDeprecated(menu.DefaultMenu)
from admin_tools.menu import items
self.assertNotDeprecated(items.MenuItem)
self.assertNotDeprecated(items.AppList)
self.assertNotDeprecated(items.Bookmarks)
|
"""
A component which allows you to send data to StatsD.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/statsd/
"""
import logging
import homeassistant.util as util
from homeassistant.const import EVENT_STATE_CHANGED
from homeassistant.helpers import state as state_helper
_LOGGER = logging.getLogger(__name__)
DOMAIN = "statsd"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8125
DEFAULT_PREFIX = 'hass'
DEFAULT_RATE = 1
REQUIREMENTS = ['statsd==3.2.1']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_RATE = 'rate'
CONF_ATTR = 'log_attributes'
def setup(hass, config):
"""Setup the StatsD component."""
import statsd
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = util.convert(conf.get(CONF_PORT), int, DEFAULT_PORT)
sample_rate = util.convert(conf.get(CONF_RATE), int, DEFAULT_RATE)
prefix = util.convert(conf.get(CONF_PREFIX), str, DEFAULT_PREFIX)
show_attribute_flag = conf.get(CONF_ATTR, False)
statsd_client = statsd.StatsClient(
host=host,
port=port,
prefix=prefix
)
def statsd_event_listener(event):
"""Listen for new messages on the bus and sends them to StatsD."""
state = event.data.get('new_state')
if state is None:
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
return
states = dict(state.attributes)
_LOGGER.debug('Sending %s.%s', state.entity_id, _state)
if show_attribute_flag is True:
statsd_client.gauge(
"%s.state" % state.entity_id,
_state,
sample_rate
)
# Send attribute values
for key, value in states.items():
if isinstance(value, (float, int)):
stat = "%s.%s" % (state.entity_id, key.replace(' ', '_'))
statsd_client.gauge(stat, value, sample_rate)
else:
statsd_client.gauge(state.entity_id, _state, sample_rate)
# Increment the count
statsd_client.incr(state.entity_id, rate=sample_rate)
hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener)
return True
|
"""
WSGI config for superlists project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superlists.settings")
application = get_wsgi_application()
|
from MOAIRuntime import MOAIRuntime
from LuaPrint import tracebackFunc, luaBeforePrint, luaAfterPrint, printSeparator
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/pet/shared_bordok_hue.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
from remote_helper.job import Job, NullJob
class JobContainer:
"""
A container for a single Job instance.
"""
def __init__(self):
self._job = NullJob()
def create_new_job(self, events_url):
self._job = Job(events_url)
def get(self):
return self._job
|
import time
import datetime
import logging
import urllib2
from shapely.wkt import loads
from owslib.util import http_post
from pycsw.core.etree import etree
LOGGER = logging.getLogger(__name__)
ranking_enabled = False
ranking_pass = False
ranking_query_geometry = ''
def get_today_and_now():
"""Get the date, right now, in ISO8601"""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())
def datetime2iso8601(value):
"""Return a datetime value as ISO8601"""
if value is None:
return None
if isinstance(value, datetime.date):
return value.strftime('%Y-%m-%d')
if value.hour == 0 and value.minute == 0 and value.second == 0:
# YYYY-MM-DD only
return value.strftime('%Y-%m-%d')
else:
return value.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_time_iso2unix(isotime):
"""Convert ISO8601 to UNIX timestamp"""
return int(time.mktime(time.strptime(
isotime, '%Y-%m-%dT%H:%M:%SZ'))) - time.timezone
def get_version_integer(version):
"""Get an integer of the OGC version value x.y.z"""
if version is not None: # split and make integer
xyz = version.split('.')
if len(xyz) != 3:
return -1
try:
return int(xyz[0]) * 10000 + int(xyz[1]) * 100 + int(xyz[2])
except Exception as err:
raise RuntimeError('%s' % str(err))
else: # not a valid version string
return -1
def find_exml(val, attrib=False):
"""Test that the XML value exists, return value, else return None"""
if val is not None:
if attrib: # it's an XML attribute
return val
else: # it's an XML value
return val.text
else:
return None
def nspath_eval(xpath, nsmap):
"""Return an etree friendly xpath"""
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{%s}%s' % (nsmap[namespace], element))
return '/'.join(out)
def xmltag_split(tag):
"""Return XML element bare tag name (without prefix)"""
try:
return tag.split('}')[1]
except:
return tag
def xmltag_split2(tag, namespaces, colon=False):
"""Return XML namespace prefix of element"""
try:
nsuri = tag.split('}')[0].split('{')[1]
nsprefix = [key for key, value in namespaces.iteritems()
if value == nsuri]
value = nsprefix[0]
if colon:
return '%s:' % nsprefix[0]
else:
return nsprefix[0]
except:
return ''
def wkt2geom(wkt, bounds=True):
"""return Shapely geometry object based on WKT/EWKT"""
geometry = None
if wkt.find('SRID') != -1:
wkt = wkt.split(';')[-1]
geometry = loads(wkt)
if bounds:
return geometry.envelope.bounds
else:
return geometry
def bbox2wktpolygon(bbox):
"""Return OGC WKT Polygon of a simple bbox string"""
tmp = bbox.split(',')
minx = float(tmp[0])
miny = float(tmp[1])
maxx = float(tmp[2])
maxy = float(tmp[3])
return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \
% (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
def query_spatial(bbox_data_wkt, bbox_input_wkt, predicate, distance):
"""perform spatial query"""
if bbox_data_wkt is None or bbox_input_wkt is None:
return 'false'
if predicate in ['beyond', 'dwithin'] and distance == 'false':
return 'false'
if bbox_data_wkt.find('SRID') != -1: # it's EWKT; chop off 'SRID=\d+;'
bbox1 = loads(bbox_data_wkt.split(';')[-1])
else:
bbox1 = loads(bbox_data_wkt)
bbox2 = loads(bbox_input_wkt)
# map query to Shapely Binary Predicates:
if predicate == 'bbox':
result = bbox1.intersects(bbox2)
elif predicate == 'beyond':
result = bbox1.distance(bbox2) > float(distance)
elif predicate == 'contains':
result = bbox1.contains(bbox2)
elif predicate == 'crosses':
result = bbox1.crosses(bbox2)
elif predicate == 'disjoint':
result = bbox1.disjoint(bbox2)
elif predicate == 'dwithin':
result = bbox1.distance(bbox2) <= float(distance)
elif predicate == 'equals':
result = bbox1.equals(bbox2)
elif predicate == 'intersects':
result = bbox1.intersects(bbox2)
elif predicate == 'overlaps':
if bbox1.intersects(bbox2) and not bbox1.touches(bbox2):
result = True
else:
result = False
elif predicate == 'touches':
result = bbox1.touches(bbox2)
elif predicate == 'within':
result = bbox1.within(bbox2)
else:
raise RuntimeError('Invalid spatial query predicate: %s' % predicate)
if result:
return 'true'
else:
return 'false'
def get_geometry_area(geometry):
"""Derive area of a given geometry"""
try:
if geometry is not None:
return str(loads(geometry).area)
return '0'
except:
return '0'
def get_spatial_overlay_rank(target_geometry, query_geometry):
"""Derive spatial overlay rank for geospatial search as per Lanfear (2006)
http://pubs.usgs.gov/of/2006/1279/2006-1279.pdf"""
from shapely.geometry.base import BaseGeometry
#TODO: Add those parameters to config file
kt = 1.0
kq = 1.0
if target_geometry is not None and query_geometry is not None:
try:
q_geom = loads(query_geometry)
t_geom = loads(target_geometry)
Q = q_geom.area
T = t_geom.area
if any(item == 0.0 for item in [Q, T]):
LOGGER.warn('Geometry has no area')
return '0'
X = t_geom.intersection(q_geom).area
if kt == 1.0 and kq == 1.0:
LOGGER.debug('Spatial Rank: %s', str((X/Q)*(X/T)))
return str((X/Q)*(X/T))
else:
LOGGER.debug('Spatial Rank: %s', str(((X/Q)**kq)*((X/T)**kt)))
return str(((X/Q)**kq)*((X/T)**kt))
except Exception as err:
LOGGER.warn('Cannot derive spatial overlay ranking %s', err)
return '0'
return '0'
def bbox_from_polygons(bboxs):
"""Derive an aggregated bbox from n polygons"""
from shapely.geometry import MultiPolygon
polys = []
for bbx in bboxs:
polys.append(loads(bbx))
try:
bbx = MultiPolygon(polys).bounds
bstr = '%.2f,%.2f,%.2f,%.2f' % (bbx[0], bbx[1], bbx[2], bbx[3])
return bbox2wktpolygon(bstr)
except Exception as err:
raise RuntimeError('Cannot aggregate polygons: %s' % str(err))
def update_xpath(nsmap, xml, recprop):
"""Update XML document XPath values"""
if isinstance(xml, unicode): # not lxml serialized yet
xml = etree.fromstring(xml)
recprop = eval(recprop)
nsmap = eval(nsmap)
try:
nodes = xml.xpath(recprop['rp']['xpath'], namespaces=nsmap)
if len(nodes) > 0: # matches
for node1 in nodes:
if node1.text != recprop['value']: # values differ, update
node1.text = recprop['value']
except Exception as err:
raise RuntimeError('ERROR: %s' % str(err))
return etree.tostring(xml)
def transform_mappings(queryables, typename, reverse=False):
"""transform metadata model mappings"""
if reverse: # from csw:Record
for qbl in queryables.keys():
if qbl in typename.values():
tmp = [k for k, v in typename.iteritems() if v == qbl][0]
val = queryables[tmp]
queryables[qbl] = {}
queryables[qbl]['xpath'] = val['xpath']
queryables[qbl]['dbcol'] = val['dbcol']
else: # to csw:Record
for qbl in queryables.keys():
if qbl in typename.keys():
queryables[qbl] = queryables[qbl]
def get_anytext(bag):
"""
generate bag of text for free text searches
accepts list of words, string of XML, or etree.Element
"""
if isinstance(bag, list): # list of words
return ' '.join(filter(None, bag)).strip()
else: # xml
if isinstance(bag, unicode) or isinstance(bag, str): # not serialized yet
bag = etree.fromstring(bag)
# get all XML element content
return ' '.join([value.strip() for value in bag.xpath('//text()')])
def exml2dict(element, namespaces):
"""Convert an lxml object to JSON
From:
http://bitbucket.org/smulloni/pesterfish/src/1578db946d74/pesterfish.py
"""
jdict = dict(tag='%s%s' % (xmltag_split2(element.tag, namespaces, True),
xmltag_split(element.tag)))
if element.text:
if element.text.find('\n') == -1:
jdict['text'] = element.text
if element.attrib:
jdict['attributes'] = dict(('%s%s' % (xmltag_split2(k, namespaces, True),
xmltag_split(k)), f(v) if hasattr(v, 'keys') else v)
for k, v in element.attrib.items())
children = element.getchildren()
if children:
jdict['children'] = map(lambda x: exml2dict(x, namespaces), children)
return jdict
def getqattr(obj, name):
"""get value of an object, safely"""
try:
value = getattr(obj, name)
if hasattr(value, '__call__'): # function generated value
LOGGER.debug('attribute is a function')
if name.find('link') != -1: # list of link tuple quadruplets
LOGGER.debug('attribute is a link')
return _linkify(value())
return value()
elif (isinstance(value, datetime.datetime)
or isinstance(value, datetime.date)): # datetime object
LOGGER.debug('attribute is a date')
return datetime2iso8601(value)
return value
except:
return None
def _linkify(value):
"""create link format"""
out = []
for link in value:
out.append(','.join(list(link)))
return '^'.join(out)
def http_request(method, url, request=None, timeout=30):
"""Perform HTTP request"""
if method == 'POST':
return http_post(url, request, timeout=timeout)
else: # GET
request = urllib2.Request(url)
request.add_header('User-Agent', 'pycsw (http://pycsw.org/)')
return urllib2.urlopen(request, timeout=timeout).read()
def bind_url(url):
"""binds an HTTP GET query string endpiont"""
if url.find('?') == -1: # like http://host/wms
binder = '?'
# if like http://host/wms?foo=bar& or http://host/wms?foo=bar
if url.find('=') != -1:
if url.find('&', -1) != -1: # like http://host/wms?foo=bar&
binder = ''
else: # like http://host/wms?foo=bar
binder = '&'
# if like http://host/wms?foo
if url.find('?') != -1:
if url.find('?', -1) != -1: # like http://host/wms?
binder = ''
elif url.find('&', -1) == -1: # like http://host/wms?foo=bar
binder = '&'
return '%s%s' % (url, binder)
def ip_in_network_cidr(ip, net):
"""decipher whether IP is within CIDR range"""
ipaddr = int(''.join([ '%02x' % int(x) for x in ip.split('.') ]), 16)
netstr, bits = net.split('/')
netaddr = int(''.join([ '%02x' % int(x) for x in netstr.split('.') ]), 16)
mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
return (ipaddr & mask) == (netaddr & mask)
def ipaddress_in_whitelist(ipaddress, whitelist):
"""
decipher whether IP is in IP whitelist
IP whitelist is a list supporting:
- single IP address (e.g. 192.168.0.1)
- IP range using CIDR (e.g. 192.168.0/22)
- IP range using subnet wildcard (e.g. 192.168.0.*, 192.168.*)
"""
if ipaddress in whitelist:
return True
else:
for white in whitelist:
if white.find('/') != -1: # CIDR
if ip_in_network_cidr(ipaddress, white):
return True
elif white.find('*') != -1: # subnet wildcard
if ipaddress.startswith(white.split('*')[0]):
return True
return False
def sniff_table(table):
"""Checks whether repository.table is a schema namespaced"""
schema = None
table = table
if table.find('.') != - 1:
schema, table = table.split('.')
return [schema, table]
def validate_4326(bbox_list):
''' Helper function to validate 4326 '''
is_valid = False
if ((-180.0 <= float(bbox_list[0]) <= 180.0) and
(-90.0 <= float(bbox_list[1]) <= 90.0) and
(-180.0 <= float(bbox_list[2]) <= 180.0) and
(-90.0 <= float(bbox_list[3]) <= 90.0)):
is_valid = True
return is_valid
|
from django.apps import AppConfig
class WwwConfig(AppConfig):
name = "www"
|
from __future__ import division
import os
import numpy as np
import tensorflow as tf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float32_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def convert_write(X, Y, writer, tf_stuff):
X = np.round(X * 255)
#if len(X.shape) < 3:
# X = np.expand_dims(X, axis=2)
x_serialised = tf_stuff['tf_sess'].run(tf_stuff['img_serialised'], feed_dict={
tf_stuff['tf_input'] : X.astype(np.uint8),
})
#x_serialised = X.astype(np.uint8).tostring()
y_serialised = Y.astype(np.int64).tostring()
#currently, we require a 3d shape for both X and Y
#so we add singleton dimensions as necessary
x_shape = []
y_shape = []
for i in xrange(3):
if len(X.shape) <= i:
x_shape.append(1)
else:
x_shape.append(X.shape[i])
if len(Y.shape) <= i:
y_shape.append(1)
else:
y_shape.append(Y.shape[i])
x_shape_serialised = np.asarray(x_shape).astype(np.int64).tostring()
y_shape_serialised = np.asarray(y_shape).astype(np.int64).tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'x_raw': _bytes_feature(x_serialised),
'y_raw': _bytes_feature(y_serialised),
'x_shape': _bytes_feature(x_shape_serialised),
'y_shape': _bytes_feature(y_shape_serialised),}))
writer.write(example.SerializeToString())
def write_all_to_record(X, Y, writer):
num_examples = X.shape[0]
for i in xrange(num_examples):
convert_write(X[i, :], Y[i], writer)
writer.close()
def write_all_to_records(X, Y, writer_base_name, max_items_in_record, shape):
tf_stuff = {}
tf_stuff['tf_sess'] = tf.Session()
tf_stuff['tf_input'] = tf.placeholder(tf.uint8)
tf_stuff['img_serialised'] = tf.image.encode_jpeg(tf_stuff['tf_input'], optimize_size=True, quality=100)
num_examples = X.shape[0]
current_writer_idx = 0
num_examples_in_current_writer = 0
current_writer = tf.python_io.TFRecordWriter(writer_base_name + '_' + str(current_writer_idx) + '.tfrecords')
perm = np.random.permutation(num_examples)
for i in xrange(num_examples):
if num_examples_in_current_writer >= max_items_in_record:
print('Num examples written in file: ' + str(num_examples_in_current_writer))
current_writer.close()
current_writer_idx += 1
current_writer = tf.python_io.TFRecordWriter(writer_base_name + '_' + str(current_writer_idx) + '.tfrecords')
num_examples_in_current_writer = 0
print('Started Writer: ' + str(current_writer_idx))
#otherwise, read image an write to dataset
idx = perm[i]
convert_write(X[idx, :].reshape(shape), Y[idx], current_writer, tf_stuff)
num_examples_in_current_writer += 1
current_writer.close()
print('Created ' + str(current_writer_idx + 1) + ' tfrecord files.')
def load_dataset(dir_name, subdir_name=''):
if subdir_name != '':
data_dir = dir_name + '/' + subdir_name
else:
data_dir = dir_name
print('Loading data from directory: [ ' + data_dir + ' ]...')
data = {}
data['train_x'] = np.load(data_dir + 'trainX.npy')
data['train_y'] = np.load(data_dir + 'trainY.npy')
data['valid_x'] = np.load(data_dir + 'validX.npy')
data['valid_y'] = np.load(data_dir + 'validY.npy')
data['test_x'] = np.load(data_dir + 'testX.npy')
if os.path.exists(data_dir + 'testY.npy'):
data['test_y'] = np.load(data_dir + 'testY.npy')
return data
def load_mnist_dataset(dir_name, subdir_name=''):
if subdir_name != '':
data_dir = dir_name + '/' + subdir_name
else:
data_dir = dir_name
print('Loading data from directory: [ ' + data_dir + ' ]...')
train = np.load(data_dir + '/rotated_train.npz')
valid = np.load(data_dir + '/rotated_valid.npz')
test = np.load(data_dir + '/rotated_test.npz')
data = {}
data['train_x'] = train['x']
data['train_y'] = train['y']
data['valid_x'] = valid['x']
data['valid_y'] = valid['y']
data['test_x'] = test['x']
data['test_y'] = test['y']
return data
print('Processing CIFAR10')
data = load_dataset('/home/sgarbin/TFR_CONVERSION/cifar_numpy/')
writer_train = '/home/sgarbin/TFR_CONVERSION/cifar_numpy/tfrecords/train'
writer_valid = '/home/sgarbin/TFR_CONVERSION/cifar_numpy/tfrecords/valid'
writer_test = '/home/sgarbin/TFR_CONVERSION/cifar_numpy/tfrecords/test'
write_all_to_records(data['train_x'], data['train_y'], writer_train, 2000, (32, 32, 3))
write_all_to_records(data['valid_x'], data['valid_y'], writer_valid, 2000, (32, 32, 3))
write_all_to_records(data['test_x'], data['test_y'], writer_test, 2000, (32, 32, 3))
print('Processing rotated MNIST')
data = load_mnist_dataset('/home/sgarbin/TFR_CONVERSION/mnist_rotation_new/')
writer_train = '/home/sgarbin/TFR_CONVERSION/mnist_rotation_new/tfrecords/train'
writer_valid = '/home/sgarbin/TFR_CONVERSION/mnist_rotation_new/tfrecords/valid'
writer_test = '/home/sgarbin/TFR_CONVERSION/mnist_rotation_new/tfrecords/test'
write_all_to_records(data['train_x'], data['train_y'], writer_train, 2000,(28, 28, 1))
write_all_to_records(data['valid_x'], data['valid_y'], writer_valid, 2000, (28, 28, 1))
write_all_to_records(data['test_x'], data['test_y'], writer_test, 2000, (28, 28, 1))
print(data['valid_y'][0:200])
|
from __future__ import print_function, absolute_import, division
import pytest
import numpy as np
import numpy.testing as npt
import astropy.units as u
from ..io.sim_tools import create_cube_header, create_image_header, create_fits_hdu
def test_create_cube_header():
pixel_scale = 0.001 * u.deg
spec_pixel_scale = 1000. * u.m / u.s
beamfwhm = 0.003 * u.deg
imshape = (2, 3, 4)
restfreq = 1.4 * u.GHz
bunit = u.K
hdr = create_cube_header(pixel_scale, spec_pixel_scale, beamfwhm, imshape,
restfreq, bunit)
assert hdr['CDELT1'] == -pixel_scale.value
assert hdr['CDELT2'] == pixel_scale.value
assert hdr['CDELT3'] == spec_pixel_scale.value
assert hdr['CUNIT1'] == 'deg'
assert hdr['CUNIT2'] == 'deg'
assert u.Unit(hdr['CUNIT3']).is_equivalent(spec_pixel_scale.unit)
assert hdr["RESTFRQ"] == restfreq.to(u.Hz).value
def test_create_image_header():
pixel_scale = 0.001 * u.deg
beamfwhm = 0.003 * u.deg
imshape = (3, 4)
restfreq = 1.4 * u.GHz
bunit = u.K
hdr = create_image_header(pixel_scale, beamfwhm, imshape,
restfreq, bunit)
assert hdr['CDELT1'] == -pixel_scale.value
assert hdr['CDELT2'] == pixel_scale.value
assert hdr['CUNIT1'] == 'deg'
assert hdr['CUNIT2'] == 'deg'
assert hdr["RESTFRQ"] == restfreq.to(u.Hz).value
def test_create_cube_hdu():
cube = np.zeros((2, 3, 4))
pixel_scale = 0.001 * u.deg
spec_pixel_scale = 1000. * u.m / u.s
beamfwhm = 0.003 * u.deg
imshape = (2, 3, 4)
restfreq = 1.4 * u.GHz
bunit = u.K
hdu = create_fits_hdu(cube, pixel_scale, spec_pixel_scale, beamfwhm, imshape, restfreq, bunit)
assert hdu.header['NAXIS'] == 3
assert hdu.header['NAXIS1'] == 4
assert hdu.header['NAXIS2'] == 3
assert hdu.header['NAXIS3'] == 2
def test_create_image_hdu():
img = np.zeros((3, 4))
pixel_scale = 0.001 * u.deg
beamfwhm = 0.003 * u.deg
imshape = (3, 4)
restfreq = 1.4 * u.GHz
bunit = u.K
hdu = create_fits_hdu(img, pixel_scale, beamfwhm, imshape, restfreq, bunit)
assert hdu.header['NAXIS'] == 2
assert hdu.header['NAXIS1'] == 4
assert hdu.header['NAXIS2'] == 3
|
'''cgat_logfiles2tsv.py - create summary from logfiles
===================================================
Purpose
-------
This script takes a list of logfiles and collates summary information
about execution times. This can be useful for post-mortem
benchmark analysis.
This script uses the ``# job finished`` tag that is added by scripts
using the module :mod:`CGAT.Experiment`.
Usage
-----
To collect logfile information from all files matching the pattern
``bwa.dir/mC-juvenile-stressed-R[12]*.log``, type::
python cgat_logfiles2tsv.py --glob="bwa.dir/mC-juvenile-stressed-R[12]*.log"
to receive output such as this::
file chunks wall user sys cuser csys
bwa.dir/mC-juvenile-stressed-R2.bwa.bam.log 2 2552.00 1563.91 13.73 0.00 0.04
bwa.dir/mC-juvenile-stressed-R2.bwa.bw.log 1 2068.00 170.66 4.50 237.51 1194.92
bwa.dir/mC-juvenile-stressed-R1.bwa.bam.log 2 1378.00 762.52 9.90 0.00 0.04
bwa.dir/mC-juvenile-stressed-R1.bwa.contextstats.log 1 948.00 150.21 2.13 726.00 7.92
bwa.dir/mC-juvenile-stressed-R2.bwa.contextstats.log 1 935.00 137.00 2.26 775.07 8.35
bwa.dir/mC-juvenile-stressed-R1.bwa.bw.log 1 2150.00 159.64 4.12 214.59 1566.41
total 8 10031.00 2943.94 36.64 1953.17 2777.68
The output lists for each file how often it was executed (``chunks``) and
the total execution time in terms of wall clock time, user time, system
time, child process user time and child process system time.
The last line contains the sum total.
Type::
python cgat_logfiles2tsv.py --help
for command line help.
Command line options
--------------------
'''
import sys
import re
import gzip
import glob
import CGAT.Experiment as E
import CGAT.Logfile as Logfile
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
parser = E.OptionParser(
version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option(
"-g", "--glob", dest="glob_pattern", type="string",
help="glob pattern to use for collecting files [%default].")
parser.add_option(
"-f", "--file-pattern", dest="file_pattern", type="string",
help="only check files matching this pattern [%default].")
parser.add_option("-m", "--mode", dest="mode", type="choice",
choices=("file", "node"),
help="analysis mode [%default].")
parser.add_option(
"-r", "--recursive", action="store_true",
help="recursively look for logfiles from current directory "
"[%default].")
parser.set_defaults(
truncate_sites_list=0,
glob_pattern="*.log",
mode="file",
recursive=False,
)
(options, args) = E.Start(parser)
if args:
filenames = args
elif options.glob_pattern:
filenames = glob.glob(options.glob_pattern)
if len(filenames) == 0:
raise ValueError("no files to analyse")
if options.mode == "file":
totals = Logfile.LogFileData()
options.stdout.write("file\t%s\n" % totals.getHeader())
for filename in filenames:
if filename == "-":
infile = sys.stdin
elif filename[-3:] == ".gz":
infile = gzip.open(filename, "r")
else:
infile = open(filename, "r")
subtotals = Logfile.LogFileData()
for line in infile:
subtotals.add(line)
infile.close()
options.stdout.write("%s\t%s\n" % (filename, str(subtotals)))
totals += subtotals
options.stdout.write("%s\t%s\n" % ("total", str(totals)))
elif options.mode == "node":
chunks_per_node = {}
rx_node = re.compile("# job started at .* \d+ on (\S+)")
for filename in filenames:
if filename == "-":
infile = sys.stdin
elif filename[-3:] == ".gz":
infile = gzip.open(filename, "r")
else:
infile = open(filename, "r")
data = Logfile.LogFileDataLines()
for line in infile:
if rx_node.match(line):
node_id = rx_node.match(line).groups()[0]
data = Logfile.LogFileDataLines()
if node_id not in chunks_per_node:
chunks_per_node[node_id] = []
chunks_per_node[node_id].append(data)
continue
data.add(line)
options.stdout.write("node\t%s\n" % data.getHeader())
total = Logfile.LogFileDataLines()
for node, data in sorted(chunks_per_node.items()):
subtotal = Logfile.LogFileDataLines()
for d in data:
# options.stdout.write( "%s\t%s\n" % (node, str(d) ) )
subtotal += d
options.stdout.write("%s\t%s\n" % (node, str(subtotal)))
total += subtotal
options.stdout.write("%s\t%s\n" % ("total", str(total)))
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/droid/shared_droid_damage_repair_kit_c.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.